diff --git "a/6334.jsonl" "b/6334.jsonl" new file mode 100644--- /dev/null +++ "b/6334.jsonl" @@ -0,0 +1,1603 @@ +{"seq_id":"11588253041","text":"import os\nfrom flask import Flask, jsonify, request\nfrom faker import Factory\nfrom twilio.access_token import AccessToken, ConversationsGrant\n\napp = Flask(__name__)\nfake = Factory.create()\n\n@app.route('/')\ndef index():\n return app.send_static_file('index.html')\n\n@app.route('/token')\ndef token():\n # get credentials for environment variables\n account_sid = os.environ['TWILIO_ACCOUNT_SID']\n api_key = os.environ['TWILIO_API_KEY']\n api_secret = os.environ['TWILIO_API_SECRET']\n\n # Create an Access Token\n token = AccessToken(account_sid, api_key, api_secret)\n\n # Set the Identity of this token\n token.identity = fake.user_name()\n\n # Grant access to Conversations\n grant = ConversationsGrant()\n grant.configuration_profile_sid = os.environ['TWILIO_CONFIGURATION_SID']\n token.add_grant(grant)\n\n # Return token info as JSON\n return jsonify(identity=token.identity, token=token.to_jwt())\n\nif __name__ == '__main__':\n app.run(debug=True)\n","repo_name":"mattmakai/video-calls-python-swift","sub_path":"video-quickstart-python-master/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":980,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"77"} +{"seq_id":"4678283261","text":"import discord, asyncio, requests, re, time, os, sys, json;from re import search;from discord.ext import commands;from discord.ext.commands import has_permissions, MissingPermissions\r\nimport subprocess\r\nimport random\r\nimport json\r\nfrom itertools import cycle\r\nfrom exchanges.bitfinex import Bitfinex\r\n\r\nwith open('config.json', 'r+', encoding='utf-8') as f:\r\n config = json.load(f)\r\n\r\nbot = commands.Bot(command_prefix='$')\r\n\r\nbot.remove_command('help')\r\n@bot.event\r\nasync def on_ready():\r\n os.system('title Bot running.')\r\n print('Bot started / Running.')\r\n while True:\r\n await bot.change_presence(activity=discord.Activity(type=discord.ActivityType.watching, name=f\"$help | BTC: ${Bitfinex().get_current_price()}\"))\r\n await asyncio.sleep(60)\r\n\r\ndisplayoptions = [\"If you'd like to contribute to the bot's development (not required), feel free to send any necessary amount to 17rpaAv4XXDLeTLP6kzMKxd3d3zqdkCpgD\", \" Invite this discord bot to your server! https://discord.com/oauth2/authorize?client_id=806580500986593282&scope=bot\"]\r\ndef checkConfirmations(txid, proxy=None):\r\n if proxy == None:\r\n getconv = requests.get(f'https://api.blockcypher.com/v1/btc/main/txs/{txid}?limit=50&includeHex=true')\r\n if getconv.status_code == 200:\r\n if getconv.json()['double_spend'] == True:\r\n return \"DoubleSpent\"\r\n else:\r\n return getconv.json()['confirmations']\r\n else:\r\n return checkConfirmations(txid)\r\n\r\ndef blockcypheraccelerate(rawtxid):\r\n data = {\r\n 'tx': rawtxid\r\n }\r\n r = requests.post(' https://api.blockcypher.com/v1/bcy/test/txs/push', data=data)\r\n if r.status_code == 200:\r\n return True\r\n else:\r\n return False\r\ndef smartbitaccelerate(rawtxid):\r\n data = {\r\n 'hex': rawtxid\r\n }\r\n r = requests.post('https://api.smartbit.com.au/v1/blockchain/pushtx', data=data)\r\n if r.status_code == 200:\r\n return True\r\n else:\r\n return False\r\n\r\ndef coinbinaccelerate(rawtxid):\r\n params = {\r\n 'uid': 1,\r\n 'key': 12345678901234567890123456789012,\r\n 'setmodule': 'bitcoin',\r\n 'request': 'sendrawtransaction'\r\n }\r\n data = {\r\n 'rawtx': rawtxid\r\n }\r\n r = requests.get(f'https://coinb.in/api/?uid=1&key=12345678901234567890123456789012&setmodule=bitcoin&request=sendrawtransaction', params=params, data=data)\r\n if r.status_code == 200:\r\n return True\r\n else:\r\n return False\r\n\r\n@bot.command()\r\nasync def check(ctx, txid=None, confcheck=None):\r\n if txid != None:\r\n try:\r\n if confcheck == None:\r\n confcheck = 1\r\n \r\n currrentconf = checkConfirmations(txid)\r\n if currrentconf != 'DoubleSpent':\r\n if int(checkConfirmations(txid)) >= int(confcheck):\r\n embed = discord.Embed(\r\n description=f'{ctx.author.mention}, your transaction ``{txid}`` has already hit ``{confcheck}`` confirmations. The transaction is currently on ``{checkConfirmations(txid)}`` confirmation(s).',\r\n color=0xd43b33\r\n )\r\n\r\n embed.set_footer(text=random.choice(displayoptions))\r\n\r\n await ctx.send(embed=embed)\r\n else:\r\n embed = discord.Embed(\r\n description=f'{ctx.author.mention}, monitoring your transaction ``{txid}`` on the bitcoin network for ``{confcheck}`` confirmations. The transaction is currently on ``{checkConfirmations(txid)}`` confirmations.',\r\n color=0x5CDBF0\r\n )\r\n\r\n embed.set_footer(text=random.choice(displayoptions))\r\n\r\n message = await ctx.send(embed=embed)\r\n\r\n embed = discord.Embed(\r\n description=f'{ctx.author.mention}, monitoring your transaction ``{txid}`` on the bitcoin network for ``{confcheck}`` confirmations. The transaction is currently on ``{checkConfirmations(txid)}`` confirmations.\\n**Your transaction was successfully accelerated on smartbit, coinbin, and blockcypher!** ✅',\r\n color=0x38f232\r\n )\r\n\r\n embed.set_footer(text=random.choice(displayoptions))\r\n\r\n\r\n boosttxid = requests.get(f'https://blockstream.info/api/tx/{txid}/hex').text\r\n coinbinaccelerate(boosttxid)\r\n smartbitaccelerate(boosttxid)\r\n blockcypheraccelerate(boosttxid)\r\n await message.edit(embed=embed)\r\n while True:\r\n await asyncio.sleep(30)\r\n currrentconf = checkConfirmations(txid)\r\n if currrentconf != 'DoubleSpent':\r\n if int(currrentconf) >= int(confcheck):\r\n await ctx.send(f'{ctx.author.mention}, your transaction ``{txid}`` has successfully hit ``{confcheck}`` confirmations.')\r\n await ctx.author.send(f'{ctx.author.mention}, your transaction ``{txid}`` has successfully hit ``{confcheck}`` confirmations.')\r\n break\r\n else:\r\n embed = discord.Embed(\r\n description=f'{ctx.author.mention} **WARNING** your transaction ``{txid}`` was maliciously labeled as doublespent on the senders\\' side. If you are undergoing a deal, please stay cautious and know that the bitcoin delivered will be rolled back to the sender.',\r\n color=0xd43b33\r\n )\r\n\r\n embed.set_footer(text=random.choice(displayoptions))\r\n\r\n message = await ctx.send(embed=embed)\r\n message = await ctx.author.send(embed=embed)\r\n else:\r\n embed = discord.Embed(\r\n description=f'{ctx.author.mention} **WARNING** your transaction ``{txid}`` was maliciously labeled as doublespent on the senders\\' side. If you are undergoing a deal, please stay cautious and know that the bitcoin delivered will be rolled back to the sender.',\r\n color=0xd43b33\r\n )\r\n\r\n embed.set_footer(text=random.choice(displayoptions))\r\n\r\n message = await ctx.send(embed=embed)\r\n await ctx.author.send(embed=embed)\r\n\r\n except discord.ext.commands.errors.MissingRequiredArgument:\r\n await ctx.send(f'{ctx.author.mention}, a required arguement is missing when using this command. Please retry the command by running ``!check (txid) (confirmations)``')\r\n else: \r\n await ctx.author.send('The required bitcoin network transaction ID is missing when using this command. Please retry the command by running ``!check (txid) (confirmations)``')\r\n\r\n@bot.command()\r\nasync def invite(ctx):\r\n await ctx.send(f'{ctx.author.mention}, invite the ``Crypto Checker`` discord bot to your discord server by using the following link: https://discord.com/oauth2/authorize?client_id=806580500986593282&scope=bot')\r\n\r\n@bot.command()\r\nasync def help(ctx):\r\n pass\r\n\r\nbot.run(config['token'])\r\n","repo_name":"acierp/CryptoChecker","sub_path":"bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":7323,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"77"} +{"seq_id":"24300819058","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sun Nov 8 21:38:07 2020\r\n\r\n@author: Navneet Yadav\r\n\"\"\"\r\nimport cv2\r\nimport numpy as np\r\n#%% image print function\r\ndef imagepr(img):\r\n cv2.imshow('image', img) \r\n k = cv2.waitKey(0) & 0xFF\r\n \r\n # # wait for 's' key to save and exit\r\n if k == ord('s'): \r\n cv2.imwrite('copy.png',img) \r\n cv2.destroyAllWindows() \r\n # any key to exit \r\n else :\r\n cv2.destroyAllWindows()\r\n#%% resize function\r\ndef resize(img,w):\r\n h_org, w_org = img.shape[:2]\r\n # Calculating the ratio \r\n ratio = w / w_org\r\n # Creating a tuple containing width and height \r\n dim = (w, int(h_org * ratio)) \r\n # Resizing the image \r\n return cv2.resize(img, dim)\r\n#%%\r\ndef canny(image, sigma=0.33):\r\n\t# compute the median of the single channel pixel intensities\r\n\tv = np.median(image)\r\n\t# apply automatic Canny edge detection using the computed median\r\n\tlower = int(max(0, (1.0 - sigma) * v))\r\n\tupper = int(min(255, (1.0 + sigma) * v))\r\n\tedged = cv2.Canny(image, lower, upper)\r\n\t# return the edged image\r\n\treturn edged\r\n# %% taking pic\r\ncap = cv2.VideoCapture(0)\r\ncap.set(3,1280)\r\ncap.set(4,720)\r\nwhile(cap.isOpened()):\r\n ret, frame = cap.read()\r\n if ret==True:\r\n # Display the resulting frame\r\n cv2.imshow('frame', frame)\r\n if cv2.waitKey(1) & 0xFF == ord('s'):\r\n cv2.imwrite(\"NewPicture.jpg\",frame)\r\n break\r\n else:\r\n break\r\n# Release everything if job is finished\r\ncap.release()\r\ncv2.destroyAllWindows()\r\n#%%image preprocessing\r\nimg = cv2.imread(\"rgpv_smart_card.jpg\")\r\nimg=resize(img, 400)\r\ngray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\r\nblurred = cv2.GaussianBlur(gray, (3, 3), 0)\r\nedges = cv2.Canny(gray, 75, 200)\r\n\r\n#morphological dilation\r\nkernel = np.ones((5,5),np.uint8)\r\ndilation = cv2.dilate(edges,kernel,iterations = 1)\r\nimagepr(edges)\r\n#%% finding contours \r\ncontours, hierarchy = cv2.findContours(edges.copy(),cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)\r\ncnts = sorted(contours, key = cv2.contourArea, reverse = True)[:5]\r\n# loop over the contours\r\nfor c in cnts:\r\n\t# approximate the contour\r\n\tperi = cv2.arcLength(c, True)\r\n\tapprox = cv2.approxPolyDP(c, 0.02 * peri, True)\r\n\t# if our approximated contour has four points, then we\r\n\t# can assume that we have found our screen\r\n\tif len(approx) == 4:\r\n\t\tpCnt = approx\r\n\t\tbreak\r\n#%%\r\n#cv2.drawContours(img, [pCnt], -1, (0, 255, 0), 2)\r\n#imagepr(img)\r\n#%%\r\nx, y, width, height = cv2.boundingRect(pCnt)\r\nroi = img[y:y+height, x:x+width]\r\ncv2.imwrite(\"result_doc.png\", roi)","repo_name":"navneet05/doc_scanner_opencv","sub_path":"doc_scanner.py","file_name":"doc_scanner.py","file_ext":"py","file_size_in_byte":2548,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"319533740","text":"\n\"\"\"Mock handlers for the collector datastore backends.\"\"\"\n\nfrom typing import List, Dict, Set, Optional, Iterable\n\nfrom .. import ConfigEntity\nfrom ..abc_backend import (\n AbcDataStoreBackend,\n Entity,\n NamespaceTemplateEntity,\n as_namespace_template_entity,\n ServiceIdConfigEntity,\n as_service_id_config_entity,\n GatewayConfigEntity,\n as_gateway_config_entity,\n ServiceColorTemplateEntity,\n as_service_color_template_entity,\n TemplateEntity,\n)\nfrom .....protect import RouteProtection\n\n\nclass MockBackend(AbcDataStoreBackend):\n \"\"\"Back-end data store mock object.\"\"\"\n active_versions: Set[str]\n namespace_entities: Dict[NamespaceTemplateEntity, str]\n service_color_entities: Dict[ServiceColorTemplateEntity, str]\n gateway_entities: Dict[GatewayConfigEntity, str]\n service_id_entities: Dict[ServiceIdConfigEntity, str]\n committed: Optional[str]\n\n def __init__(self):\n self.active_versions = set()\n self.namespace_entities = {}\n self.service_color_entities = {}\n self.gateway_entities = {}\n self.service_id_entities = {}\n self.committed = None\n\n def get_active_version(self, activity: str) -> str:\n assert activity not in self.active_versions\n self.active_versions.add(activity)\n return activity\n\n def start_changes(self, activity: str) -> str:\n assert activity not in self.active_versions\n self.active_versions.add(activity)\n return activity\n\n def commit_changes(self, version: str) -> None:\n assert version in self.active_versions\n self.committed = version\n\n def download(self, version: str, entity: Entity) -> str:\n assert version in self.active_versions\n sct = as_service_color_template_entity(entity)\n if sct:\n return self.service_color_entities[sct]\n nte = as_namespace_template_entity(entity)\n if nte:\n return self.namespace_entities[nte]\n sic = as_service_id_config_entity(entity)\n if sic:\n return self.service_id_entities[sic]\n gwc = as_gateway_config_entity(entity)\n assert gwc\n return self.gateway_entities[gwc]\n\n def upload(self, version: str, entity: Entity, contents: str) -> None:\n assert version in self.active_versions\n sct = as_service_color_template_entity(entity)\n if sct:\n self.service_color_entities[sct] = contents\n return\n nte = as_namespace_template_entity(entity)\n if nte:\n self.namespace_entities[nte] = contents\n return\n sic = as_service_id_config_entity(entity)\n if sic:\n self.service_id_entities[sic] = contents\n return\n gwc = as_gateway_config_entity(entity)\n assert gwc\n self.gateway_entities[gwc] = contents\n\n def rollback_changes(self, version: str) -> None:\n self.active_versions.remove(version)\n self.gateway_entities = {}\n self.service_id_entities = {}\n self.service_color_entities = {}\n self.namespace_entities = {}\n\n def get_template_entities(self, version: str) -> Iterable[TemplateEntity]:\n ret: List[TemplateEntity] = list(self.service_color_entities.keys())\n ret.extend(self.namespace_entities.keys())\n return ret\n\n def get_config_entities(self, version: str) -> Iterable[ConfigEntity]:\n ret: List[ConfigEntity] = list(self.service_id_entities.keys())\n ret.extend(self.gateway_entities.keys())\n return ret\n\n def get_namespace_template_entities(\n self, version: str, namespace: Optional[str] = None,\n protection: Optional[RouteProtection] = None, purpose: Optional[str] = None,\n ) -> Iterable[NamespaceTemplateEntity]:\n for entity in self.namespace_entities:\n if (\n (namespace is None or namespace == entity.namespace)\n and (protection is None or protection == entity.protection)\n and (purpose is None or purpose == entity.purpose)\n ):\n yield entity\n\n def get_gateway_config_entities(\n self, version: str, namespace: Optional[str] = None,\n protection: Optional[RouteProtection] = None, purpose: Optional[str] = None,\n ) -> Iterable[GatewayConfigEntity]:\n for entity in self.gateway_entities:\n if (\n (namespace is None or namespace == entity.namespace_id)\n and (protection is None or protection == entity.protection)\n and (purpose is None or purpose == entity.purpose)\n ):\n yield entity\n\n def get_service_color_template_entities(\n self, version: str, namespace: Optional[str] = None,\n service: Optional[str] = None, color: Optional[str] = None,\n purpose: Optional[str] = None,\n ) -> Iterable[ServiceColorTemplateEntity]:\n for entity in self.service_color_entities:\n if (\n (namespace is None or namespace == entity.namespace)\n and (service is None or service == entity.service)\n and (color is None or color == entity.color)\n and (purpose is None or purpose == entity.purpose)\n ):\n yield entity\n\n def get_service_id_config_entities(\n self, version: str, namespace_id: Optional[str] = None,\n service_id: Optional[str] = None, service: Optional[str] = None,\n color: Optional[str] = None, purpose: Optional[str] = None,\n ) -> Iterable[ServiceIdConfigEntity]:\n for entity in self.service_id_entities:\n if (\n (namespace_id is None or namespace_id == entity.namespace_id)\n and (service_id is None or service_id == entity.service_id)\n and (service is None or service == entity.service)\n and (color is None or color == entity.color)\n and (purpose is None or purpose == entity.purpose)\n ):\n yield entity\n","repo_name":"groboclown/nightjar-mesh","sub_path":"old-stuff-for-reference/nightjar-base/nightjar-src/python-src/nightjar/backend/api/data_store/tests/mock.py","file_name":"mock.py","file_ext":"py","file_size_in_byte":6124,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"77"} +{"seq_id":"14017925530","text":"# IMPORT MODULES\nimport hashlib\nimport json\nfrom time import time\nfrom urllib.parse import urlparse\nfrom uuid import uuid4\nimport requests\nfrom flask import Flask, jsonify, request\n\n# CREATE BLOCKCHAIN CLASS\nclass Blockchain:\n def __init__(self):\n self.current_transactions = []\n self.chain = []\n self.nodes = set() # for idempotency\n\n # CREATE GENESIS BLOCK\n self.new_block(previous_hash=1, proof=100)\n\n # NEW BLOCK METHOD (create new block and add to the chain)\n def new_block(self, proof, previous_hash):\n block = {\n 'index': len(self.chain) + 1,\n 'timestamp': time(),\n 'transactions': self.current_transactions,\n 'proof': proof,\n 'previous_hash': previous_hash or self.hash(self.chain[-1])\n }\n\n # reset current transactions list\n self.current_transactions = []\n self.chain.append(block)\n\n return block\n\n # NEW TRANSACTION METHOD (create new transaction for next mined block)\n def new_transaction(self, sender, recipient, amount):\n self.current_transactions.append({\n 'sender': sender,\n 'recipient': recipient,\n 'amount': amount\n })\n\n return self.last_block['index'] + 1\n\n # PROPERTY DECORATOR TO RETURN THE LAST BLOCK\n @property\n def last_block(self):\n return self.chain[-1]\n\n # HASH METHOD (create SHA-256 hash of a block)\n # TURN HASH METHOD INTO STATIC METHOD (cannot modify class or object)\n @staticmethod\n def hash(block):\n # must have an ORDERED dictionary\n block_string = json.dumps(block, sort_keys=True).encode()\n\n return hashlib.sha256(block_string).hexdigest()\n\n # PROOF OF WORK (POW) METHOD (where check condition is hash(pp') has 4 leading 0s)\n def proof_of_work(self, last_proof):\n proof = 0\n while self.valid_proof(last_proof, proof) is False:\n proof += 1\n\n return proof\n\n # TURN VALID PROOF METHOD INTO STATIC METHOD (validates if hash has 4 leading 0s)\n @staticmethod\n def valid_proof(last_proof, proof):\n guess = f'{last_proof}{proof}'.encode()\n guess_hash = hashlib.sha256(guess).hexdigest()\n\n return guess_hash[:4] == '0000'\n\n # REGISTERING NODE METHOD (add new ndoe to list of nodes)\n def register_node(self, address):\n parsed_url = urlparse(address)\n\n if parsed_url.netloc:\n self.nodes.add(parsed_url.netloc)\n elif parsed_url.path:\n # accepts url without a 'xxx.xxx.x.x:xxxx' scheme\n self.nodes.add(parsed_url.path)\n else:\n raise ValueError('INVALID URL')\n\n '''\n CODE BELOW IS THE CONSENSUS ALGORITHM - ADDRESSES CONFLICTS IN THE BLOCKCHAIN OVER DIFFERENT NODES BY FINDING THE LONGEST CHAIN AND REPLACING WITH THAT CHAIN\n '''\n\n # VALID CHAIN CHECK METHOD\n def valid_chain(self, chain):\n last_block = chain[0]\n current_index = 1\n\n while current_index < len(chain):\n block = chain[current_index]\n print(f'{last_block}')\n print(f'{block}')\n print(\"\\n-----------\\n\")\n\n # check if previous hash is correct\n if block['previous_hash'] != self.hash(last_block):\n return False\n\n # check POW\n if not self.valid_proof(last_block['proof'], block['proof']):\n return False\n\n last_block = block\n current_index += 1\n\n return True\n\n # CONFLICT RESOLUTION METHOD\n def resolve_conflicts(self):\n neighbors = self.nodes\n new_chain = None\n\n max_length = len(self.chain)\n # grab chains from all other nodes and check their length\n for node in neighbors:\n response = requests.get(f'http://{node}/chain')\n\n if response.status_code == 200:\n length = response.json()['length']\n chain = response.json()['chain']\n\n # compare length with current node's chain and check validity of chain\n if length > max_length and self.valid_chain(chain):\n max_length = length\n new_chain = chain\n\n # replace with new longest chain\n if new_chain:\n self.chain = new_chain\n return True\n\n return False\n\n'''\nCODE BELOW IS FOR SETTING UP API TO MAKE HTTP REQUESTS TO THE BLOCKCHAIN\n'''\n\n# INSTANTIATE NODE\napp = Flask(__name__)\n\n# GENERATE GLOBALLY UNIQUE ADDRESS FOR NODE\nnode_id = str(uuid4()).replace('-','')\n\n# INSTANTIATE BLOCKCHAIN\nblockchain = Blockchain()\n\n'''\nBELOW ARE ALL THE ROUTES NEEDED TO CREATE AND MANAGE THE BLOCKCHAIN\n'''\n# MINE A NEW BLOCK\n@app.route('/mine', methods=['GET'])\ndef mine():\n # run POW algo\n last_block = blockchain.last_block\n last_proof = last_block['proof']\n proof = blockchain.proof_of_work(last_proof)\n\n # reward the sender for the mine; sender is '0' to signify that this node has mined a new coin\n blockchain.new_transaction(\n sender=\"0\",\n recipient=node_id,\n amount=1\n )\n\n # add new block to the chain\n previous_hash = blockchain.hash(last_block)\n block = blockchain.new_block(proof, previous_hash)\n\n response = {\n 'message': 'New Block Forged',\n 'index': block['index'],\n 'transactions': block['transactions'],\n 'proof': block['proof'],\n 'previous_hash': block['previous_hash']\n }\n\n return jsonify(response), 200\n\n# MAKE A NEW TRANSACTION\n@app.route('/transaction/new', methods=['POST'])\ndef new_transaction():\n values = request.get_json()\n print(values)\n\n # check required fields\n required = ['sender', 'recipient', 'amount']\n if not all(k in values for k in required):\n return 'MISSING VALUES', 400\n\n # create new transaction\n index = blockchain.new_transaction(values['sender'], values['recipient'], values['amount'])\n response = {'message': f'Transaction will be added to Block {index}'}\n\n return jsonify(response), 201\n\n# GET THE FULL CHAIN AND LENGTH\n@app.route('/chain', methods=['GET'])\ndef full_chain():\n response = {\n 'chain': blockchain.chain,\n 'length': len(blockchain.chain)\n }\n\n return jsonify(response), 200\n\n# REGISTER A NEW NODE\n@app.route('/nodes/register', methods=['POST'])\ndef register_nodes():\n values = request.get_json()\n\n nodes = values.get('nodes')\n if nodes is None:\n return \"ERROR: PLEASE SUPPLY A VALID LIST OF NODES\", 400\n\n for node in nodes:\n blockchain.register_node(node)\n\n response = {\n 'message': 'New nodes have been added',\n 'total_nodes': list(blockchain.nodes)\n }\n\n return jsonify(response), 201\n\n# RESOLVE NODES\n@app.route('/nodes/resolve', methods=['GET'])\ndef consensus():\n replaced = blockchain.resolve_conflicts()\n\n if replaced:\n response= {\n 'message': 'Our chain was replaced/updated',\n 'new_chain': blockchain.chain\n }\n else:\n response = {\n 'message': 'Our chain is authoritative',\n 'chain': blockchain.chain\n }\n\n return jsonify(response), 200\n\nif __name__ == '__main__':\n app.run(host='127.0.0.1', port=5000)\n","repo_name":"sophia2798/blockchain","sub_path":"blockchain.py","file_name":"blockchain.py","file_ext":"py","file_size_in_byte":7245,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"73339890808","text":"\"\"\"\n\n## Stream Producer\nLambda function that will query all metrics for a given namespace\nand send metric data for the last 60 seconds to a kinesis stream\n\n\"\"\"\nimport os\nimport json\nfrom typing import List\nfrom datetime import timedelta, datetime, timezone\nimport dateutil.parser\nfrom enum import Enum\nimport itertools\n\nimport boto3\nfrom botocore.exceptions import ClientError\n\nfrom dataquality.stream import MetricStream\nfrom definitions.definition import Definition\n\nCW_CLIENT = boto3.client('cloudwatch')\nKINESIS_CLIENT = boto3.client('kinesis')\n\nclass StreamName(Enum):\n KINESIS_MINUTE_STREAM_NAME: str = 'minute'\n KINESIS_HOUR_STREAM_NAME: str = 'hour'\n KINESIS_DAY_STREAM_NAME: str = 'day'\n\ndef main(\n event: dict,\n context: dict\n) -> None:\n \"\"\"Lambda Handler.\"\"\"\n\n account_number = context.invoked_function_arn.split(\":\")[4]\n definition = Definition(account=account_number)\n dataset_stream = MetricStream(metric_sets=definition.metric_sets)\n\n end_time_exact = datetime.utcnow()\n end_time = end_time_exact - timedelta(minutes=end_time_exact.minute % 10,\n seconds=end_time_exact.second,\n microseconds=end_time_exact.microsecond)\n\n md_queries = dataset_stream.metric_data_queries(\n frequency=event['frequency']\n )\n\n if len(md_queries) <= 0:\n print(f\"No metrics matched for {event['frequency']} frequency.\")\n return False\n else:\n print(\"Matched metrics:\")\n print(md_queries)\n\n # Group metrics by Period\n grouped_dict = {}\n for i in md_queries:\n k = i[\"MetricStat\"][\"Period\"]\n if (grouped_dict.get(k) == None):\n grouped_dict[k]=[i]\n else:\n grouped_dict[k].append(i)\n\n # For each Period in grouped_dict get metric data and append to metrics_data list\n metrics_data = []\n\n for period, md in grouped_dict.items():\n start_time = end_time - timedelta(seconds=period)\n period_metrics_data = get_metric_data(\n metric_data_queries=md,\n start_time=start_time,\n end_time=end_time\n )\n for metric_object in period_metrics_data:\n metrics_data.append(metric_object)\n print(metric_object)\n\n put_metrics(\n metrics_data=metrics_data,\n time=end_time,\n event=event,\n context=context,\n metric_sets=dataset_stream.metrics\n )\n\ndef get_metric_data(metric_data_queries, start_time, end_time):\n \"\"\"Paginate and return all metric data under namspace.\"\"\"\n metric_data_results = []\n paginator = CW_CLIENT.get_paginator('get_metric_data')\n page_iterator = paginator.paginate(\n MetricDataQueries=metric_data_queries,\n StartTime=start_time,\n EndTime=end_time\n )\n for page in page_iterator:\n metric_data_results += page['MetricDataResults']\n return metric_data_results\n\ndef translate_metrics_to_records(metrics_data: List[dict], time: datetime, event: dict, context: dict, metric_sets):\n \"\"\"Translate CW metrics list to Kinesis stream records.\"\"\"\n records = []\n metadata_map = {}\n dimensions_map = {}\n\n for metric_object in metrics_data:\n for metric in metric_sets:\n if metric.unique_id() == metric_object['Id']:\n metric_object['Namespace'] = metric.namespace\n metric_object['Name'] = metric.name\n metric_object['Period'] = metric.period\n metric_object['Statistic'] = metric.statistic\n if metric.metadata:\n for meta in metric.metadata:\n metadata_map[meta.name] = meta.value\n metric_object['Metadata'] = metadata_map\n if metric.dimensions:\n for dimension in metric.dimensions:\n dimensions_map[dimension.name] = dimension.value\n metric_object['Dimensions'] = dimensions_map\n else:\n continue\n\n metric_object['CollectionTime'] = time.replace(tzinfo=timezone.utc).isoformat()\n metric_object['AccountId'] = context.invoked_function_arn.split(\":\")[4]\n metric_object['Region'] = context.invoked_function_arn.split(\":\")[3]\n metric_object['MetricTimestamp'] = metric_object['Timestamps'][0] if len(metric_object['Timestamps']) > 0 else None\n metric_object['MetricValue'] = metric_object['Values'][0] if len(metric_object['Values']) > 0 else None\n metric_object['Frequency'] = event['frequency']\n records.append({\n 'Data': json.dumps(metric_object, default=str),\n 'PartitionKey': 'default'\n })\n print(records)\n return records\n\ndef put_metrics(metrics_data: List[dict], time: datetime, event: dict, context: dict, metric_sets):\n \"\"\"Put records to kinesis stream\"\"\"\n try:\n KINESIS_CLIENT.put_records(\n Records=translate_metrics_to_records(\n metrics_data=metrics_data,\n time=time,\n event=event,\n context=context,\n metric_sets=metric_sets\n ),\n StreamName=os.environ[StreamName(event['frequency']).name]\n )\n except ClientError as ex:\n raise ex\n\ndef frequency_to_period(frequency: str) -> int:\n \"\"\" Convert rate string to period in seconds.\"\"\"\n if frequency == \"day\":\n period = 86400\n if frequency == \"minute\":\n period = 60\n if frequency == \"hour\":\n period = 3600\n return period\n","repo_name":"awslabs/aws-dataset-ingestion-metrics-collection-framework","sub_path":"lambda/metric_stream_producer.py","file_name":"metric_stream_producer.py","file_ext":"py","file_size_in_byte":5529,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"77"} +{"seq_id":"34038245148","text":"###########################################\r\n######### Enrico Ubaldino ##############\r\n###########################################\r\n\r\nimport cv2\r\n\r\nimport numpy as np\r\n\r\nimport matplotlib.pyplot as plt\r\n\r\n \r\n#Creo variabile per avvio cattura frame. 0 prima webcam, 1 la seconda\r\nlive_Camera = cv2.VideoCapture(0)\r\n\r\n \r\n\r\n #lower_bound = np.array([11,33,111])\r\nlower_bound = np.array([18,50,50])\r\nlower_bound = np.array(lower_bound, dtype=\"uint8\")\r\n\r\n#upper_bound = np.array([90,255,255])\r\nupper_bound = np.array([35,255,255])\r\nupper_bound = np.array(upper_bound, dtype=\"uint8\")\r\n\r\n \r\n\r\nwhile(live_Camera.isOpened()):\r\n\r\n ret, frame = live_Camera.read()\r\n\r\n frame = cv2.resize(frame,(1280,720))\r\n\r\n frame = cv2.flip(frame,1)\r\n\r\n \r\n\r\n frame_smooth = cv2.GaussianBlur(frame,(7,7),0)\r\n cv2.imshow(\"Image Gauss\",frame_smooth)\r\n \r\n frame_hsv = cv2.cvtColor(frame_smooth,cv2.COLOR_BGR2HSV)\r\n cv2.imshow(\"Image HSV\",frame_hsv)\r\n \r\n mask = cv2.inRange(frame_hsv, lower_bound, upper_bound)\r\n \r\n\r\n output = cv2.bitwise_and(frame,frame_hsv, mask)\r\n cv2.imshow(\"Image processing\",output)\r\n image_binary = output\r\n\r\n \r\n\r\n check_if_fire_detected = cv2.countNonZero(mask)\r\n\r\n \r\n\r\n if int(check_if_fire_detected) >= 20000 :\r\n\r\n cv2.putText(frame,\"Fire Detected !\",(300,60),cv2.FONT_HERSHEY_COMPLEX,3,(0,0,255),2)\r\n print('Fire detected')\r\n \r\n\r\n \r\n\r\n cv2.imshow(\"Fire Detection\",frame)\r\n \r\n\r\n \r\n\r\n if cv2.waitKey(10) == 27 :\r\n\r\n break\r\n\r\n \r\n\r\nlive_Camera.release()\r\n\r\ncv2.destroyAllWindows()\r\n","repo_name":"Cyberg96/UnioneElettronica","sub_path":"FireDetection_UE.py","file_name":"FireDetection_UE.py","file_ext":"py","file_size_in_byte":1558,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"36825391924","text":"# -*- coding: utf-8 -*-\r\n#\r\n# Copyright (c) 2018 Leland Stanford Junior University\r\n# Copyright (c) 2018 The Regents of the University of California\r\n#\r\n# This file is part of the SimCenter Backend Applications\r\n#\r\n# Redistribution and use in source and binary forms, with or without\r\n# modification, are permitted provided that the following conditions are met:\r\n#\r\n# 1. Redistributions of source code must retain the above copyright notice,\r\n# this list of conditions and the following disclaimer.\r\n#\r\n# 2. Redistributions in binary form must reproduce the above copyright notice,\r\n# this list of conditions and the following disclaimer in the documentation\r\n# and/or other materials provided with the distribution.\r\n#\r\n# 3. Neither the name of the copyright holder nor the names of its contributors\r\n# may be used to endorse or promote products derived from this software without\r\n# specific prior written permission.\r\n#\r\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\r\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\r\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\r\n# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE\r\n# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\r\n# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\r\n# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\r\n# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN\r\n# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\r\n# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE\r\n# POSSIBILITY OF SUCH DAMAGE.\r\n#\r\n# You should have received a copy of the BSD 3-Clause License along with\r\n# this file. If not, see .\r\n#\r\n# Contributors:\r\n# Kuanshi Zhong\r\n#\r\n\r\nimport os\r\nimport subprocess\r\nimport json\r\nimport random\r\nimport numpy as np\r\nimport pandas as pd\r\nfrom FetchOpenSHA import *\r\n\r\n\r\ndef create_earthquake_scenarios(scenario_info, stations, input_dir=[], output_dir=[], output_format='SimCenterEvent'):\r\n\r\n # Number of scenarios\r\n source_num = scenario_info.get('Number', 1)\r\n if source_num == 'All':\r\n # Large number to consider all sources in the ERF\r\n source_num = 10000000\r\n # Directly defining earthquake ruptures\r\n if scenario_info['Generator'] == 'Simulation':\r\n # TODO:\r\n print('Physics-based earthquake simulation is under development.')\r\n return 1\r\n # Searching earthquake ruptures that fulfill the request\r\n elif scenario_info['Generator'] == 'Selection':\r\n # Collecting all possible earthquake scenarios\r\n lat = []\r\n lon = []\r\n for s in stations['Stations']:\r\n lat.append(s['Latitude'])\r\n lon.append(s['Longitude'])\r\n # Reference location\r\n lat = np.mean(lat)\r\n lon = np.mean(lon)\r\n ref_station = [lat, lon]\r\n # Getting earthquake rupture forecast data\r\n source_type = scenario_info['EqRupture']['Type']\r\n if source_type == 'ERF':\r\n source_model = scenario_info['EqRupture']['Model']\r\n source_name = scenario_info['EqRupture'].get('Name', None)\r\n min_M = scenario_info['EqRupture'].get('min_Mag', 5.0)\r\n max_M = scenario_info['EqRupture'].get('max_Mag', 9.0)\r\n max_R = scenario_info['EqRupture'].get('max_Dist', 1000.0)\r\n eq_source = getERF(source_model, True)\r\n erf_data = export_to_json(eq_source, ref_station, outfile = None, \\\r\n EqName = source_name, minMag = min_M, \\\r\n maxMag = max_M, maxDistance = max_R, \\\r\n maxSources = np.max([500, source_num]))\r\n # Parsing data\r\n feat = erf_data['features']\r\n tag = []\r\n for i, cur_f in enumerate(feat):\r\n if source_name and (source_name not in cur_f['properties']['Name']):\r\n continue\r\n if min_M > cur_f['properties']['Magnitude']:\r\n continue\r\n tag.append(i)\r\n # Abstracting desired ruptures\r\n s_tag = random.sample(tag, min(source_num, len(tag)))\r\n erf_data['features'] = list(feat[i] for i in s_tag)\r\n scenario_data = dict()\r\n for i, rup in enumerate(erf_data['features']):\r\n scenario_data.update({i: {\r\n 'Type': source_type,\r\n 'RuptureForecast': source_model,\r\n 'SourceIndex': rup['properties']['Source'],\r\n 'RuptureIndex': rup['properties']['Rupture']\r\n }})\r\n # Cleaning tmp outputs\r\n del erf_data\r\n elif source_type == 'PointSource':\r\n scenario_data = dict()\r\n try:\r\n magnitude = scenario_info['EqRupture']['Magnitude']\r\n location = scenario_info['EqRupture']['Location']\r\n average_rake = scenario_info['EqRupture']['AverageRake']\r\n average_dip = scenario_info['EqRupture']['AverageDip']\r\n scenario_data.update({0: {\r\n 'Type': source_type,\r\n 'Magnitude': magnitude,\r\n 'Location': location,\r\n 'AverageRake': average_rake,\r\n 'AverageDip': average_dip\r\n }})\r\n except:\r\n print('Please check point-source inputs.')\r\n # Simulating the earthquake\r\n elif scenario_info['Generator'] == 'ShakerMaker':\r\n from imm.ShakerMakerSimulation import ShakerMakerModel\r\n # initialization\r\n sm_model = ShakerMakerModel(scenario_info['SimuConfig'], stations['Stations'], input_dir)\r\n # configuration\r\n sm_model.model_configuration()\r\n scenario_data = sm_model\r\n sm_model.run_simulation(output_dir, output_format)\r\n\r\n # return\r\n return scenario_data\r\n\r\n\r\ndef create_wind_scenarios(scenario_info, event_info, stations, data_dir):\r\n\r\n # Number of scenarios\r\n source_num = scenario_info.get('Number', 1)\r\n # Directly defining earthquake ruptures\r\n if scenario_info['Generator'] == 'Simulation':\r\n # Collecting site locations\r\n lat = []\r\n lon = []\r\n for s in stations['Stations']:\r\n lat.append(s['Latitude'])\r\n lon.append(s['Longitude'])\r\n # Station list\r\n station_list = {\r\n 'Latitude': lat,\r\n 'Longitude': lon\r\n }\r\n # Track data\r\n try:\r\n track_file = scenario_info['Storm'].get('Track')\r\n df = pd.read_csv(os.path.join(data_dir, track_file), header = None, index_col = None)\r\n track = {\r\n 'Latitude': df.iloc[:, 0].values.tolist(),\r\n 'Longitude': df.iloc[:, 1].values.tolist()\r\n }\r\n except:\r\n print('CreateScenario: no storm track provided or file format not accepted.')\r\n # Save Lat_w.csv\r\n track_simu_file = scenario_info['Storm'].get('TrackSimu', None)\r\n if track_simu_file: \r\n df = pd.read_csv(os.path.join(data_dir, track_simu_file), header = None, index_col = None)\r\n track_simu = df.iloc[:, 0].values.tolist()\r\n else:\r\n track_simu = track['Latitude']\r\n # Reading Terrain info (if provided)\r\n terrain_file = scenario_info.get('Terrain', None)\r\n if terrain_file:\r\n with open(os.path.join(data_dir, terrain_file)) as f:\r\n terrain_data = json.load(f)\r\n else:\r\n terrain_data = []\r\n # Parsing storm properties\r\n param = []\r\n param.append(scenario_info['Storm']['Landfall']['Latitude'])\r\n param.append(scenario_info['Storm']['Landfall']['Longitude'])\r\n param.append(scenario_info['Storm']['LandingAngle'])\r\n param.append(scenario_info['Storm']['Pressure'])\r\n param.append(scenario_info['Storm']['Speed'])\r\n param.append(scenario_info['Storm']['Radius'])\r\n # Monte-Carlo\r\n #del_par = [0, 0, 0] # default\r\n # Parsing mesh configurations\r\n mesh_info = [1000., scenario_info['Mesh']['DivRad'], 1000000.]\r\n mesh_info.extend([0., scenario_info['Mesh']['DivDeg'], 360.])\r\n # Wind speed measuring height\r\n measure_height = event_info['IntensityMeasure']['MeasureHeight']\r\n # Saving results\r\n scenario_data = dict()\r\n for i in range(source_num):\r\n scenario_data.update({i: {\r\n 'Type': 'Wind',\r\n 'CycloneParam': param,\r\n 'StormTrack': track,\r\n 'StormMesh': mesh_info,\r\n 'Terrain': terrain_data,\r\n 'TrackSimu': track_simu,\r\n 'StationList': station_list,\r\n 'MeasureHeight': measure_height\r\n }})\r\n # return\r\n return scenario_data\r\n else:\r\n print('Currently only supporting Simulation generator.')\r\n","repo_name":"kuanshi/HazardSimulation","sub_path":"CreateScenario.py","file_name":"CreateScenario.py","file_ext":"py","file_size_in_byte":9159,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"41487245267","text":"def breadth_first_search(graph, node):\n if node not in graph: return\n\n queue = [node]\n visited = [node]\n\n while len(queue) > 0:\n node = queue.pop(0)\n print(node, end = \" \")\n\n for neighbor in graph[node]:\n if neighbor not in visited:\n visited.append(neighbor)\n queue.append(neighbor)\n\n\n#key is node, list is neighbors\nsample_graph = {\n 0: [1, 2, 3, 5],\n 1: [2, 3, 5],\n 2: [4, 5],\n 3: [4],\n 4: [5],\n 5: []\n}\n\nbreadth_first_search(sample_graph, 0)","repo_name":"FOSS-UCSC/FOSSALGO","sub_path":"algorithms/gr-bfsrh/python3/breadth_first_search.py","file_name":"breadth_first_search.py","file_ext":"py","file_size_in_byte":482,"program_lang":"python","lang":"en","doc_type":"code","stars":58,"dataset":"github-code","pt":"77"} +{"seq_id":"40967219846","text":"import pandas as pd\nfrom techminer.core import explode\n\n\ndef keywords_coverage(data, column, keywords_list):\n\n data = data[[column, \"ID\"]].dropna()\n num_documents = len(data)\n x = pd.DataFrame({column: keywords_list})\n x[\"Cum Coverage (Cum Num Documents)\"] = 0\n\n data[column] = data[column].map(lambda w: w.split(\";\"))\n data[\"SELECTED\"] = False\n keywords_list = [\" \".join(keyword.split(\" \")[:-1]) for keyword in keywords_list]\n x.index = keywords_list\n for keyword in keywords_list:\n data[\"SELECTED\"] = data.SELECTED | data[column].map(lambda w: keyword in w)\n selected = data[data.SELECTED][[\"ID\"]].drop_duplicates()\n x.loc[keyword, \"Cum Coverage (Cum Num Documents)\"] = len(selected)\n\n x[\"Cum Coverage (%)\"] = x[\"Cum Coverage (Cum Num Documents)\"].map(\n lambda w: str(round(100 * w / num_documents, 1)) + \" %\"\n )\n x = x.reset_index()\n x.pop(\"index\")\n return x\n","repo_name":"jdvelasq/techminer","sub_path":"src/core/keywords_coverage.py","file_name":"keywords_coverage.py","file_ext":"py","file_size_in_byte":932,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"77"} +{"seq_id":"10678621473","text":"import matplotlib.pyplot as plt\nimport numpy as np\n\nimport sampler as randc\n\n############################################################################\n# RANDOM COUNTERS\n############################################################################\nbins = 17\nrc = randc.Sampler(np.ones(bins))\nrc.update(.3, 6)\nrc.update(.3, 7)\nrc.update(.5, 1)\nrc.update(.1, bins - 1)\nsampling = []\nfor i in range(10000):\n sampling.append(rc.sample())\nprint(rc.score_tree)\nplt.hist(sampling, bins=bins)\nplt.show()\n","repo_name":"remilepriol/sdca4crf","sub_path":"test/random_counters_test.py","file_name":"random_counters_test.py","file_ext":"py","file_size_in_byte":500,"program_lang":"python","lang":"de","doc_type":"code","stars":2,"dataset":"github-code","pt":"77"} +{"seq_id":"34510334883","text":"\"\"\"\nWrite a Python program that accept a positive number and subtract from this number the sum of its digits and so on. Continues this operation until the number is positive.\n\"\"\"\n\nnum = int(input(\"Enter a number\"))\ndigits = 0\nnum1 = num\nwhile (num != 0):\n digi = num % 10\n digits += digi\n num = num // 10\n# print(digits)\nwhile num1 > 0:\n num1 = num1 - digits\n print(num1)\n","repo_name":"shraddhaa43/Python-Interview-Solutions-Repository","sub_path":"Challenge Zone Medium to Hard Level/Q23.py","file_name":"Q23.py","file_ext":"py","file_size_in_byte":387,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"7212013091","text":"import sys\nimport os\n\npath_dir = os.getcwd()\nsys.path.insert(0, \"..\")\n#print(sys.path)\n\n\n\"\"\"Selenium Packages\"\"\"\nimport argparse\n\nfrom selenium.common.exceptions import ElementClickInterceptedException, NoSuchElementException, \\\n ElementNotInteractableException\nfrom selenium.webdriver import Keys\nfrom selenium.webdriver.support import expected_conditions\n\"\"\" \nPage Object classes \nCreated By: Prankur Garg\nDate: 25th May 2022\n\"\"\"\nfrom Cumulative_Reporting.BaseClass import BaseClass\nfrom PageObjectAdiosPlus.AdiosPlusDespatch import AdiosPlusDespatch\nfrom PageObjectAdiosPlus.AdiosPlusHome import AdiosPlusHome\nfrom PageObjectAdiosPlus.AdiosPlusResourcesPage import AdiosPlusResourcesPage\n\n\n\"\"\"Python Package\"\"\"\nimport time\nimport datetime\nimport logging\n\n\nclass AdiosPlusMain(BaseClass):\n def __init__(self):\n self._current_time = datetime.datetime.now()\n self._current_time = self._current_time.strftime(\"%Y%m%d\")\n self._log_name = \"Adios_run_\" + self._current_time + \".log\"\n self._logger = logging.getLogger(__name__)\n self._logger.setLevel(logging.INFO)\n self._handler = logging.FileHandler(self._log_name)\n self._logger.addHandler(self._handler)\n self._formatter = logging.Formatter('%(asctime)s-%(message)s')\n self._handler.setFormatter(self._formatter)\n\n def parse_commandline_args(self):\n parser = argparse.ArgumentParser(description=\"Need to enter test cycle, qual name and team name\")\n parser.add_argument('--test_cycle', type=str, help=\"Cumulative epack test cycle\", required=True)\n parser.add_argument('--qual', type=str, help=\"e.g. Cumulative_Epack_\", required=True)\n parser.add_argument('--team', type = str, help = \"Enter Team Name core/data-services\", required= True)\n if len(sys.argv[1:]) < 6 or '--test_cycle' not in sys.argv[1:] or '--qual' not in sys.argv[1:] or '--team' not in sys.argv[1:]:\n self.driver.close()\n self._logger.error(\"Either parameters were not provided or wrongly given\")\n parse_args = parser.parse_args()\n sys.exit(\"Required Parameters were not provided\")\n teams = [\"core\", \"data-services\"]\n parse_args = parser.parse_args()\n self._test_cycle = parse_args.test_cycle\n self._qual = parse_args.qual\n self._team = parse_args.team\n if self._team not in teams:\n self._logger.error(\"Team name entered is wrong. It should be either core or data-sustaining\")\n sys.exit(\"Team name entered is wrong. It should be either core or data-sustaining\")\n self._logger.info(f\"User has entered Test Cycle name as {self._test_cycle} and qual Name as {self._qual}\")\n\n\n def adios_plus_login(self):\n self.driver.get(\"http://adiosplus.cec.lab.emc.com/dashboard\")\n self._home_adios_plus = AdiosPlusHome(self.driver)\n user_name = self._home_adios_plus.get_user_name()\n user_name.send_keys(\"gargp6\")\n user_password = self._home_adios_plus.get_user_password()\n user_password.send_keys(\"\")\n submit_button = self._home_adios_plus.submit_button()\n submit_button.click()\n self._logger.info(\"User has entered username and password and logging into Adios Page\")\n\n def navigate_resources_page(self):\n element = self._home_adios_plus.navigate_resources()\n self.driver.execute_script(\"arguments[0].click();\", element)\n self._logger.info(\"We are resource tab of Adios Page\")\n resource_page = AdiosPlusResourcesPage(self.driver)\n time.sleep(30)\n print(AdiosPlusResourcesPage.search_plus)\n\n self.wait.until(expected_conditions.presence_of_element_located((AdiosPlusResourcesPage.search_plus)))\n\n self.wait.until(expected_conditions.element_to_be_clickable((AdiosPlusResourcesPage.search_plus)))\n\n data_services_suites = {\n \"Cumulative_Testing_P2\": \"10.60.153.253\",\n \"Cumulative_Testing_P3\": \"10.60.153.195\",\n \"Defrag_Shrink_SP_Cumulative\": \"10.60.154.6\",\n \"LREP_SP_Cumulative\" : \"10.60.154.51\",\n \"RDF_SP_Cumulative\" : \"10.60.155.123\",\n \"Cumulative_Testing_P1\" : \"10.60.153.202\",\n \"Long_Running_Tests_SP_Cumulative\": \"10.60.153.132\",\n \"Cumulative_Testing_P4\": \"10.60.153.195\"\n }\n\n core_suites = {\n \"ACS DataMobility\" : \"10.60.153.55\",\n \"ACT Platform\" : \"10.60.153.55\",\n \"ACT Config\" : \"10.60.153.55\",\n \"ACT Backend\" : \"10.60.153.55\",\n \"ACS_Enginuity Services\" : \"10.60.153.55\"\n }\n self._suites = {}\n if self._team == \"core\":\n self._suites = core_suites\n elif self._team == \"data-services\":\n self._suites = data_services_suites\n\n self._dict_value = [value for value in self._suites.values()]\n self._dict_keys = [value for value in self._suites.keys()]\n print(self._dict_value)\n print(self._dict_keys)\n for host in self._dict_value:\n try:\n search = resource_page.search_host()\n self.driver.execute_script(\"arguments[0].click()\", search)\n #search.click()\n except ElementClickInterceptedException:\n search = resource_page.search_host()\n search.click()\n time.sleep(2)\n self._logger.info(f\"We are adding {host} in resource tab of Adios Page\")\n self.wait.until(expected_conditions.presence_of_element_located((AdiosPlusResourcesPage.host_name)))\n try:\n host_name = resource_page.enter_host_name()\n host_name.send_keys(host)\n except NoSuchElementException as ex:\n self.driver.get_screenshot_as_file(\"Host_error.png\")\n host_name = resource_page.enter_host_name()\n host_name.send_keys(host)\n\n confirm = resource_page.confirm_button()\n confirm.click()\n time.sleep(2)\n self._logger.info(f\"Successfully added {host} in resource tab of Adios Page\")\n\n def navigate_dispatch(self):\n element = self._home_adios_plus.navigate_despatch()\n self.driver.execute_script(\"arguments[0].click();\", element)\n self._logger.info(f\" Now when we have resources added. Will add suites to Dispatch tab\")\n\n self.wait.until(expected_conditions.presence_of_element_located((AdiosPlusDespatch.search)))\n self.wait.until(expected_conditions.element_to_be_clickable((AdiosPlusDespatch.search)))\n\n despatch_page = AdiosPlusDespatch(self.driver)\n search_button = despatch_page.search_button()\n search_button.click()\n\n handles = self.driver.window_handles\n self.driver.switch_to.window(handles[0])\n self._logger.info(\"Navigated to dispatch page of Adios Page\")\n for suite in self._dict_keys:\n self.wait.until(expected_conditions.element_to_be_clickable((AdiosPlusDespatch.clear_filter)))\n clear_button = despatch_page.clear_filter_button()\n clear_button.click()\n\n suite_name = despatch_page.enter_suite_name()\n suite_name.send_keys(suite)\n\n search_suite = despatch_page.search_suite_button()\n search_suite.click()\n\n self._logger.info(f\"Adding suite {suite} to adios Page\")\n\n self.wait.until(expected_conditions.presence_of_element_located((AdiosPlusDespatch.select_suite)))\n time.sleep(4)\n\n select_suite = despatch_page.select_suite_checkbox()\n select_suite[1].click()\n\n self.wait.until(expected_conditions.element_to_be_clickable((AdiosPlusDespatch.right_arrow)))\n\n right_arrow = despatch_page.select_right_arrow()\n right_arrow.click()\n\n select_suites = despatch_page.select_all_suites()\n select_suites[2].click()\n\n add_suites = despatch_page.add_suites_button()\n add_suites.click()\n\n self._logger.info(f\"Successfully added all suites to View\")\n\n for key, value in self._suites.items():\n suite_search = despatch_page.search_suite_to_run()\n self._logger.info(f'Selected suite {key} to run on host {value}')\n suite_search.send_keys(key)\n\n try:\n add_button = despatch_page.suite_add_button()\n except ElementClickInterceptedException as ex:\n add_button = despatch_page.suite_add_button()\n add_button.click()\n\n try:\n suite_run = despatch_page.select_suite_to_despatch()\n suite_run.click()\n except ElementClickInterceptedException as ex:\n suite_run = despatch_page.select_suite_to_despatch()\n suite_run.click()\n self.wait.until(expected_conditions.presence_of_element_located((AdiosPlusDespatch.test_cycle)))\n\n test_cycle = despatch_page.test_cycle_input()\n\n test_cycle.send_keys(self._test_cycle)\n\n test_cycle.send_keys(Keys.ENTER)\n time.sleep(1)\n qual = despatch_page.qual_input()\n\n qual.send_keys(self._qual)\n\n qual.send_keys(Keys.ENTER)\n time.sleep(2)\n\n host = despatch_page.host_input()\n host.send_keys(value)\n host.send_keys(Keys.ENTER)\n self.wait.until(expected_conditions.presence_of_element_located((AdiosPlusDespatch.host_select_message)))\n\n message = despatch_page.get_host_message()\n message = message.text\n self._logger.info(f' Currently status of host is {message}')\n if \"Currently validating\" in message:\n time.sleep(5)\n message = despatch_page.get_host_message()\n message = message.text\n if \"ready to run\" in message:\n self._logger.info(f'Despatcher state of Host {value} is ready')\n time.sleep(8)\n box = despatch_page.select_symm()\n box.send_keys(\"OLKCK\")\n box.send_keys(Keys.ENTER)\n time.sleep(2)\n\n self.wait.until(expected_conditions.element_to_be_clickable((AdiosPlusDespatch.run)))\n run_button = despatch_page.run_button()\n run_button.click()\n\n handles = self.driver.window_handles\n self.driver.switch_to.window(handles[0])\n time.sleep(5)\n ok_button = despatch_page.ok_button_function()\n ok_button.click()\n time.sleep(20)\n self.wait.until(expected_conditions.presence_of_element_located((AdiosPlusDespatch.after_run_message)))\n\n self.driver.get_screenshot_as_file(f'{key}.png')\n time.sleep(30)\n\n search_field = despatch_page.search_suite_text_field()\n\n clear_suite = despatch_page.clear_search_suite()\n clear_suite.click()\n\n despatch_page.deselect_suite_link().click()\n time.sleep(4)\n\n else:\n self._logger.info(f'Despatcher state of Host {value} is not in ready state')\n print(\"host is not in ready state\")\n clear_suite = despatch_page.clear_search_suite()\n clear_suite.click()\n print(\"Clearing suite\")\n time.sleep(5)\n\n deselect_suite = despatch_page.deselect_suite_link()\n deselect_suite.click()\n time.sleep(4)\n continue\n self.driver.close()\n\n\nobj = AdiosPlusMain()\nobj.parse_commandline_args()\nobj.adios_plus_login()\nobj.navigate_resources_page()\nobj.navigate_dispatch()\n\n\n\n\n\n","repo_name":"gargprankur/Selenium_Projects","sub_path":"PageObjectAdiosPlus/AdiosPlusMain.py","file_name":"AdiosPlusMain.py","file_ext":"py","file_size_in_byte":12006,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"11707904240","text":"import redis\nimport logging\nimport time\n\n\nSOCKET_TIMEOUT = 15000\nCONNECTION_MAX_ATTEMPTS = 5\n\n\ndef retry(times):\n def decorator(func):\n def wrapper(*args, **kwargs):\n attempt = 0\n while attempt < times:\n try:\n return func(*args, **kwargs)\n except Exception as e:\n logging.exception(\"Retry of %s faild: %s\" % (func.__name__, e))\n attempt += 1\n time.sleep(1)\n return func(*args, **kwargs)\n\n return wrapper\n\n return decorator\n\n\nclass Store(object):\n def __init__(self, host, port, socket_timeout=SOCKET_TIMEOUT):\n try:\n self.r = redis.Redis(host=host, port=port, socket_timeout=socket_timeout)\n except Exception as e:\n logging.exception(\"Can't init store: %s\" % e)\n\n @retry(CONNECTION_MAX_ATTEMPTS)\n def get(self, key):\n return self.r.get(key)\n\n @retry(CONNECTION_MAX_ATTEMPTS)\n def set(self, key, value, time):\n self.r.set(key, value, ex=time)\n\n def cache_get(self, key):\n try:\n return self.get(key)\n except Exception as e:\n logging.exception(\"Can't get from cache: %s\" % e)\n return None\n\n def cache_set(self, key, value, time):\n try:\n self.set(key, value, time)\n except Exception as e:\n logging.exception(\"Can't set to cache: %s\" % e)\n","repo_name":"tatiana-vakhrameeva/python-course","sub_path":"hw3-1/store.py","file_name":"store.py","file_ext":"py","file_size_in_byte":1447,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"5329222844","text":"# Read in assignment input and store in string\nwith open('day3_input') as f:\n contents = f.read()\n\n# Split input lines into array\ninputArray = contents.splitlines()\npriority = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ'\nprioritySum = 0\n\n# Itterate through the list, split the input string in half and find the character that appears in each half\n# After identifing the common character, determine it's priority value based on the priority string above \n# the characters priority is its position in the priority string starting with a in position 1\n# Add all of the priorites together\nfor i in inputArray:\n half = int(len(i)/2)\n compartmentOne = i[0:half]\n compartmentTwo = i[half:len(i)]\n sameItem = ''\n for i in compartmentOne:\n position = -1\n position = compartmentTwo.find(i)\n if position != -1:\n sameItem = compartmentTwo[position]\n prioritySum += priority.find(sameItem)+1\n\n# Print the sum of all priorities\nprint(prioritySum)","repo_name":"davidlukemt/advent-of-code","sub_path":"2022/day3/day3_part1.py","file_name":"day3_part1.py","file_ext":"py","file_size_in_byte":995,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"71032475768","text":"import copy\n\nimport torch\nimport torch.nn as nn\n\n\nclass WrapLayer(nn.Module):\n def __init__(self, layer, max_seq_len):\n super().__init__()\n self.layer = layer\n self.position_ids = nn.Parameter(\n torch.arange(max_seq_len).unsqueeze(0).view(-1, max_seq_len),\n requires_grad=False,\n )\n\n def forward(self, x):\n _, seq_len, h = x.shape\n return self.layer(x, position_ids=self.position_ids[:, :seq_len])[0]\n\n\nclass CastOutputToFloat(nn.Sequential):\n def forward(self, x):\n return super().forward(x).to(torch.float32)\n\n\nclass AdapterModel(nn.Module):\n def __init__(\n self,\n pretrained_model,\n max_seq_len,\n adapter_size,\n adapter_dropout=0.1,\n init_with_existing_tokens=False,\n ): # add cast to 8bit param\n super().__init__()\n # Copy token embedding layer from the pretrained model\n vocab_size = pretrained_model.config.vocab_size\n hidden_size = pretrained_model.config.hidden_size\n self.token_emb = nn.Embedding(vocab_size, hidden_size)\n self.token_emb.weight = copy.deepcopy(pretrained_model.gpt_neox.embed_in.weight)\n self.token_emb.weight.requires_grad = False\n pretrained_model.gpt_neox.embed_in = None\n\n # Initialize adapter from normal distribution\n self.adapter = nn.Parameter(\n torch.randn(adapter_size, hidden_size) * 0.002, requires_grad=True\n )\n self.adapter_dropout = nn.Dropout(adapter_dropout)\n\n # Freeze pretrained layers\n for param in pretrained_model.parameters():\n param.requires_grad = False\n if param.ndim == 1:\n # cast the small parameters (e.g. layernorm) to fp32 for stability\n param.data = param.data.to(torch.float32)\n\n # Make transformer layers sequential\n self.transformer_layers = nn.Sequential(\n *[\n WrapLayer(layer, max_seq_len)\n for layer in pretrained_model.gpt_neox.layers\n ]\n )\n\n # Output layers -- cast final output to fp32\n self.out = nn.Sequential(\n pretrained_model.gpt_neox.final_layer_norm,\n CastOutputToFloat(pretrained_model.embed_out),\n )\n\n def forward(self, x):\n bsz, _ = x.shape\n token_emb = self.token_emb(x) # bsz, inp_len, embed_dim\n adapter_emb = self.adapter_dropout(\n self.adapter.unsqueeze(0).repeat(bsz, 1, 1)\n ) # bsz, adapter_size, embed_dim\n seq = torch.cat([adapter_emb, token_emb], dim=1)\n seq = torch.utils.checkpoint.checkpoint_sequential(\n self.transformer_layers, 3, seq\n )\n return self.out(seq)\n","repo_name":"andersonbcdefg/instruct-pythia-ptuning","sub_path":"adapter.py","file_name":"adapter.py","file_ext":"py","file_size_in_byte":2741,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"24060419146","text":"#%%\n# Dependencies and Setup\nimport requests\nimport gmaps\n\n# Import API key\nfrom config import g_key\n\n# %%\n# Set the parameters to search for a hotel in Paris.\nparams = {\n \"radius\": 5000,\n \"types\": \"lodging\",\n \"key\": g_key,\n \"location\": \"48.8566, 2.3522\"}\n# Use base URL to search for hotels in Paris.\nbase_url = \"https://maps.googleapis.com/maps/api/place/nearbysearch/json\"\n# Make request and get the JSON data from the search.\nhotels = requests.get(base_url, params=params).json()\n\n# %%\n# Iterate through the DataFrame.\nfor index, row in hotel_df.iterrows():\n # Get the latitude and longitude.\n lat = row[\"Lat\"]\n lng = row[\"Lng\"]\n\n # Add the latitude and longitude to the params dictionary as values to the location key.\n params[\"location\"] = f\"{lat},{lng}\"\n\n # Use the search term: \"lodging\" and our latitude and longitude.\n base_url = \"https://maps.googleapis.com/maps/api/place/nearbysearch/json\"\n # Make request and get the JSON data from the search.\n hotels = requests.get(base_url, params=params).json()\n # Grab the first hotel from the results and store the name.\n hotel_df.loc[index, \"Hotel Name\"] = hotels[\"results\"][0][\"name\"]\n\n# %%\n","repo_name":"ebskii52/World_Weather_Analysis","sub_path":"Google_Nearby_Search.py","file_name":"Google_Nearby_Search.py","file_ext":"py","file_size_in_byte":1191,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"39242225005","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Sep 19 23:13:41 2020\n\n@author: Admin\n\"\"\"\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n#%%\n# Create a class for a block \nclass block(nn.Module):\n def __init__(self, in_channels, out_channels, identity_downsample = None, expansion = 1, stride = 1):\n super(block, self).__init__()\n self.expanded_outchannels = out_channels * expansion # Borrowing the idea of expansion factor from Resnet50-101-152\n \n self.conv1 = nn.Conv2d(in_channels, out_channels, kernel_size = 3, stride = stride, padding = 1)\n self.bn1 = nn.BatchNorm2d(out_channels)\n \n \n self.conv2 = nn.Conv2d(out_channels, self.expanded_outchannels, kernel_size = 3, stride = 1, padding = 1)\n self.bn2 = nn.BatchNorm2d(self.expanded_outchannels)\n \n self.identity_downsample = identity_downsample\n \n def forward(self, x):\n identity = x\n x = F.relu(self.bn1(self.conv1(x)))\n x = F.relu(self.bn2(self.conv2(x)))\n if self.identity_downsample is not None:\n identity = self.identity_downsample(identity)\n # print(\"identity.shape = \", identity.shape)\n # print(\"x.shape = \", x.shape)\n x += identity\n x = F.relu(x)\n return x\nclass ResNetCIFAR(nn.Module):\n def __init__(self,image_channels, num_classes, expansion, num_blocks_per_layer = 2):\n super(ResNetCIFAR, self).__init__()\n self.in_channels = 16 # meaning this is the first number of channels to upsample to from image_channels\n self.expansion = expansion\n self.num_blocks_per_layer = 2\n \n self.conv1 = nn.Conv2d(image_channels, 16, kernel_size = 3, stride = 1, padding = 1)\n self.bn1 = nn.BatchNorm2d(16) # \n \n # Resnet layers\n self.layerconv2 = self._make_layer(block, out_channels = 16, stride = 1)\n self.layerconv3 = self._make_layer(block, out_channels = 32, stride = 2)\n self.layerconv4 = self._make_layer(block, out_channels = 64, stride = 2)\n \n self.avgpool = nn.AdaptiveAvgPool2d((1,1))\n self.fc = nn.Linear(64 * self.expansion, num_classes)\n def forward(self, x):\n x = F.relu(self.bn1(self.conv1(x)))\n # print(\"Output shape after conv1: \", x.shape)\n x = self.layerconv2(x)\n x = self.layerconv3(x)\n x = self.layerconv4(x)\n \n x = self.avgpool(x)\n x = torch.flatten(x, 1)\n x = self.fc(x)\n \n return x\n def _make_layer(self, block, out_channels, stride):\n layers = []\n # Because any time this function _make_layer is called, this resulting layer will downsample the input.\n # Hence identity_downsample is always needed\n identity_downsample = nn.Sequential(\n nn.Conv2d(self.in_channels, out_channels * self.expansion, kernel_size = 1,\n stride = stride),\n nn.BatchNorm2d(out_channels * self.expansion)\n )\n # Creating first block for this layer\n layers.append(block(in_channels = self.in_channels, out_channels = out_channels, identity_downsample = identity_downsample, \\\n expansion = self.expansion, stride = stride))\n \n self.in_channels = out_channels * self.expansion\n \n # Creating subsequent blocks for this layer\n # For subsequent blocks, the input dimensions match the output dimensions, so no identity_downsample is needed,\n # meaning only perform simple addition of the input and the output\n for i in range(self.num_blocks_per_layer - 1):\n layers.append(block(self.in_channels, out_channels, expansion = self.expansion))\n \n return nn.Sequential(*layers)\n \n# resnet = ResNetCIFAR(image_channels = 3, num_classes = 100, expansion = 3, num_blocks_per_layer = 2)\n# x = torch.randn(3, 3, 32, 32)\n# y = resnet(x)","repo_name":"giaphattram/ResNet-CIFAR100-ImageClassification","sub_path":"model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":4045,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"35172967962","text":"#Name: Min Jung\r\n#Due Date:12/05/2021\r\n#Final Project: Lab 2\r\n\r\nclass Triangle:\r\n def __init__(self, base1, height1, base2, height2):\r\n self.base1 =base1\r\n self.height1= height1\r\n self.base2= base2\r\n self.height2 = height2\r\n def compare_area(self):\r\n self.area1 = 0.5 * self.base1 * self.height1\r\n self.area2 = 0.5 * self.base2 * self.height2\r\n if self.area1 > self.area2:\r\n print(f'Base:{self.base1:.2f}')\r\n print(f'Height:{self.height1:.2f}')\r\n print(f'Area:{self.area1:.2f}')\r\n elif self.area1 == self.area2:\r\n print(\"Both Triangles have the same area!\")\r\n print(f'Triangle 1 Area: {self.area1:.2f}')\r\n print(f'Triangle 2 Area: {self.area2:.2f}')\r\n else:\r\n print(f'Base:{self.base2:.2f}')\r\n print(f'Height:{self.height2:.2f}')\r\n print(f'Area:{self.area2:.2f}')\r\ndef main():\r\n base1= float(input())\r\n height1= float(input())\r\n base2= float(input())\r\n height2= float(input())\r\n \r\n area = Triangle(base1,height1,base2,height2)\r\n print(\"\\nTriangle with larger area:\")\r\n area.compare_area()\r\nmain()","repo_name":"minjung1004/CS-2520","sub_path":"Final Project/lab2.py","file_name":"lab2.py","file_ext":"py","file_size_in_byte":1188,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"43171645302","text":"#!/usr/bin/env python3\n\n'''\n 字段:学生姓名 班级 Linux PHP Python\n stu1 = {'id':1,'sname':'tom','bj':'1','Linux':100,}\n'''\n\nstu_info = []\nid = 0\n\ndef menu():\n print('''\n | ----------学生成绩系统--------|\n | |\n | ==========主功能菜单==========|\n | |\n | | \n | 1.录入学生成绩 |\n | 2.查询学生成绩 |\n | 3.删除学生的成绩 |\n | 4.修改学生的成绩 |\n | 5.展示所有学生成绩 |\n | 0.退出系统 |\n | |\n |-------------------------------|\n\n\n\n\n\n ''')\n\n#录入学生成绩\ndef add_info():\n while True:\n sname = input('输入学生的姓名:')\n if not sname:\n print('学生姓名不能为空')\n continue\n bj = input('输入学生的班级:')\n Linux = input('输入Linux成绩:')\n PHP = input('输入PHP成绩:')\n Python = input('输入Python成绩:')\n\n global id \n id += 1\n stu = {'id':id,'sname':sname,'bj':bj,'Linux':Linux,'PHP':PHP,'Python':Python}\n stu_info.append(stu)\n\n print(stu_info)\n key = input('是否继续录入y/n?')\n if key == 'y':\n continue\n else:\n break\n\n\n#显示所有学生的成绩\ndef show():\n '''\n 遍历列表,获取到每个学生的信息\n '''\n format_title = '{:^6}{:^12}\\t{:^12}{:^12}{:^12}{:^12}'\n format_data = '{:^6}{:^13}\\t{:^15}{:^13}{:^15}{:^14}'\n print(format_title.format('ID','姓名','班级','Linux成绩','PHP成绩','Python成绩'))\n\n for i in stu_info:\n id = i.get('id')\n sname = i.get('sname')\n bj = i.get('bj')\n Linux = i.get('Linux')\n PHP = i.get('PHP')\n Python = i.get('Python')\n print(format_data.format(id,sname,bj,Linux,PHP,Python))\n\ndef search():\n '''\n 根据名字查询学生的成绩\n\n '''\n \n format_title = '{:^6}{:^12}\\t{:^12}{:^12}{:^12}{:^12}'\n format_data = '{:^6}{:^13}\\t{:^15}{:^13}{:^15}{:^14}'\n sname = input('输入要查询学生的姓名:')\n\n print(format_title.format('ID','姓名','班级','Linux成绩','PHP成绩','Python成绩'))\n \n #提取到所有学生的名字\n name_list = [stu_info[i].get('sname') for i in range(len(stu_info))]\n if sname in name_list:\n for i in stu_info:\n if sname == i.get('sname'):\n id = i.get('id')\n sname = i.get('sanme')\n bj = i.get('bj')\n Linux = i.get('Linux')\n PHP = i.get('PHP')\n Python = i.get('Python')\n print(id,sname,bj,Linux,PHP,Python)\n else:\n print('学生名字不存在')\n\n\ndef delete():\n global id\n sname = input('请输入要删除学生得名字:')\n\n if stu_info:\n for i in stu_info:\n if i['sname'] == sname:\n stu_info.remove(i) #删除该字段\n print('删除成功')\n #修改剩下学生的id id - 1\n #for i,v in enumerate(stu_info):\n for i in range(len(stu_info)):\n id = i + 1\n stu_info[i]['id'] = id\n if not stu_info:\n id = 0\n show()\n\n#修改学生信息,只修改学生的成绩\ndef modify():\n sname = input()\n if stu_info: \n for i in stu_info:\n if key == '1':\n Linux = input('')\n i['Linux'] = Linux\n else:\n print('学生不存在')\n\n\ndef main():\n while True: \n menu()\n key = input('请选择功能:')\n if key == '1':\n add_info()\n if key == '2':\n search()\n if key == '3':\n delete()\n if key == '5':\n show()\n\n if key == '0':\n break\n\n\nmain()\n","repo_name":"sunyuanheng/000404","sub_path":"student.py","file_name":"student.py","file_ext":"py","file_size_in_byte":3974,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"38932665298","text":"from django.http import HttpResponse\nimport datetime\nfrom django.template import Template , Context\nfrom django.template.loader import get_template\nfrom django.shortcuts import render\n\n\nclass Persona(object):\n def __init__(self,nombre,apellido,edad,direccion):\n self.nombre = nombre\n self.apellido = apellido\n self.edad = edad\n self.direccion = direccion\n\n\n\n\ndef index(request):\n\n documento = ('',\n '',\n '',\n '',\n 'MI PRIMERA PAGINA WEB DJANGO',\n '',\n '

Hola esta es mi primera pagina web con django

',\n '

BIENVENIDOS

',\n '',\n '')\n\n return HttpResponse(documento)\n\n\ndef saludo(request):\n\n\n Datos = Persona(\"CAMILA\",\"LEAL\",34,\"GUACARA\")\n\n temas = ['administracion','matematicas','ingles','idiomas','php']\n\n #variables\n nombre = 'anderson'\n apellido = 'garcia'\n fecha = datetime.datetime.now()\n\n #-------------------------------------------------------------------------------------------------------------------#\n # doc_externo = open(\"D:\\PROYECTOS DJANGO\\proyecto1\\proyecto1\\Templates\\miPlantilla.html\")\n\n #creamos objeto tipo template\n\n # plt = Template(doc_externo.read())\n\n #cerramos el doc_externo\n\n # doc_externo.close()\n\n #creamos contexto\n\n # ctx = Context({\"nombre_persona\":Datos.nombre,\"apellido_persona\":Datos.apellido,\"fecha\":fecha,\"temas\":temas})\n\n #renderizamos\n\n # documento = ctx.render(ctx)\n # -----------------------------------------------------------------------------------------------------------------#\n\n\n\n ####### Cargar templates con Settings #######\n\n # TEMPLATES = get_template('miPlantilla.html')\n\n # DOCUMENTO_TEMPLATES = TEMPLATES.render({\"nombre_persona\":Datos.nombre,\"apellido_persona\":Datos.apellido,\"fecha\":fecha,\"temas\":temas})\n\n # return HttpResponse(DOCUMENTO_TEMPLATES)\n\n ####### Cargar templates con Shortcut Render #######\n\n return render(request,\"miPlantilla.html\",{\"nombre_persona\":Datos.nombre,\"apellido_persona\":Datos.apellido,\"fecha\":fecha,\"temas\":temas})\n\n\ndef damefecha(request):\n fecha = datetime.datetime.now()\n documento = '''\n \n \n \n MI PRIMERA PAGINA WEB DJANGO\n \n

Hola esta es la fecha actual: %s

\n

BIENVENIDOS

\n '\n ''' %fecha\n\n return HttpResponse(documento)\n\n\ndef calculaedad(request , ano):\n\n edadactual = 38\n periodo = ano - 2022\n\n edadfutura = edadactual+periodo\n\n documento ='''\n \n \n \n MI PRIMERA PAGINA WEB DJANGO\n \n

En el año: %s tendras %s años

\n

BIENVENIDOS

\n '\n ''' %(ano,edadfutura)\n\n return HttpResponse(documento)\n\n\ndef calculaedadparametros(request , edad, agno):\n\n periodo = agno - 2022\n\n edadfutura = edad+periodo\n\n documento ='''\n \n \n \n MI PRIMERA PAGINA WEB DJANGO\n \n

En el año: %s tendras %s años

\n

BIENVENIDOS

\n '\n ''' %(agno,edadfutura)\n\n return HttpResponse(documento)\n\n\n\n\ndef cursoC(request):\n fecha = datetime.datetime.now()\n\n return render(request,\"CursoC.html\", {\"fecha\":fecha})\n\ndef cursoCss(request):\n\n return render(request,\"Cursocss.html\")\n\n\n\n","repo_name":"ARSYSTEMAS/CURSO-DJANGO","sub_path":"proyecto1/proyecto1/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4862,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"7278418357","text":"import random\r\nnum=random.randint(1,10)\r\ncondition=True\r\nwhile condition:\r\n guess=int(input('請輸入猜的數字(介於0~10之間)'))\r\n if(guess<0 or guess>10):\r\n print('請輸入猜的數字(介於0~10之間)')\r\n elif(num==guess):\r\n print('你猜對了')\r\n condition=False\r\n else:\r\n print('你猜錯了')","repo_name":"charlie14516/AE402-python","sub_path":"猜數字_2.py","file_name":"猜數字_2.py","file_ext":"py","file_size_in_byte":347,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"14706245419","text":"\n#https://likegeeks.com/python-gui-examples-tkinter-tutorial/\n\nfrom tkinter import *\n\nwindow = Tk()\n\nwindow.title(\"Some guji\")\n\nwindow.geometry('350x200')\n\nlbl = Label(window,text = \"Heloo there\")#, font = (\"Arial Bold\", 50))\n\nlbl.grid(column=0,row=0)\n\ntxt = Entry(window, width = 10, state = 'normal') # disabled, readonly\ntxt.grid(column=1, row=0)\ntxt.focus()\n\ndef clicked():\n res = \"Welcome to \" + txt.get()\n lbl.configure(text = res)\n\nbtn = Button(window, text= \"Clikc me\", bg = \"green\", fg = \"grey\", command = clicked)\nbtn.grid(column=2,row=0)\n\n\n\nwindow.mainloop()\n\n\n\n###########################################################################################\n\n#Add a combobox widget\n\nfrom tkinter import *\n\nfrom tkinter.ttk import *\n\nwindow = Tk()\n\nwindow.title(\"Welcome to LikeGeeks app\")\n\nwindow.geometry('350x200')\n\ncombo = Combobox(window)\n\ncombo['values'] = (1,2,3,4,5,\"Text\")\n\ncombo.current(1) # set the selected item\n\ncombo.grid(column=0, row=0)\n\nlbl = Label(window,text = combo.get())#, font = (\"Arial Bold\", 50))\n\nlbl.grid(column=1,row=0)\n\nwindow.mainloop()\n\n###########################################################################################\n\n# Add a Checkbutton widget (Tkinter checkbox)\n\nfrom tkinter import *\n\nfrom tkinter.ttk import *\n\nwindow = Tk()\n\nwindow.title(\"Welcome to LikeGeeks app\")\n\nwindow.geometry('350x200')\n\nchk_state = BooleanVar()\n\nchk_state.set(True) # set check state\n\nchk = Checkbutton(window, text='Choose', var=chk_state)\n\nchk.grid(column=0, row=0)\n\nwindow.mainloop()\n\n###########################################################################################\n\n# Add radio buttons widgets\n\nfrom tkinter import *\n\nfrom tkinter.ttk import *\n\nwindow = Tk()\n\nwindow.title(\"Welcome to LikeGeeks app\")\n\nselected = IntVar()\n\nrad1 = Radiobutton(window, text='First', value=1, variable=selected)\n\nrad2 = Radiobutton(window, text='Second', value=2, variable=selected)\n\nrad3 = Radiobutton(window, text='Third', value=3, variable=selected)\n\n\ndef clicked():\n print(selected.get())\n\n\nbtn = Button(window, text=\"Click Me\", command=clicked)\n\nrad1.grid(column=0, row=0)\n\nrad2.grid(column=1, row=0)\n\nrad3.grid(column=2, row=0)\n\nbtn.grid(column=3, row=0)\n\nwindow.mainloop()\n\n\n\n# Create a MessageBox\n\nfrom tkinter import *\n\nwindow = Tk()\n\nwindow.title(\"Welcome to LikeGeeks app\")\n\nwindow.geometry('350x200')\n\n\n#def clicked():\n #messagebox.showinfo('Message title', 'Message content')\n #messagebox.showwarning('Message title', 'Message content')\n #messagebox.showerror('Message title', 'Message content')\n\n\n # res = messagebox.askquestion('Message title', 'Message content')\n # res = messagebox.askyesno('Message title', 'Message content')\n # res = messagebox.askyesnocancel('Message title', 'Message content')\n # res = messagebox.askokcancel('Message title', 'Message content')\n # res = messagebox.askretrycancel('Message title', 'Message content')\n\ndef clicked():\n\n return messagebox.askyesnocancel('Message title', 'Message content')\n\nbtn = Button(window, text='Click here', command=clicked)\n\nbtn.grid(column=0, row=0)\n\nlbl = Label(window,text = clicked())#, font = (\"Arial Bold\", 50))\n\nlbl.grid(column=1,row=0)\n\nwindow.mainloop()\n\n###########################################################################################\n\n# Add a SpinBox (numbers widget)\n\nfrom tkinter import *\n\nwindow = Tk()\n\nwindow.title(\"Welcome to LikeGeeks app\")\n\nwindow.geometry('350x200')\n\nspin = Spinbox(window, from_=0, to=100, width=5)\n\nspin.grid(column=0, row=0)\n\nwindow.mainloop()\n\n###########################################################################################\n\n# Progress Bar\n\n\nfrom tkinter import *\n\nfrom tkinter.ttk import Progressbar\n\nfrom tkinter import ttk\n\nwindow = Tk()\n\nwindow.title(\"Welcome to LikeGeeks app\")\n\nwindow.geometry('350x200')\n\nstyle = ttk.Style()\n\nstyle.theme_use('default')\n\nstyle.configure(\"black.Horizontal.TProgressbar\", background='black')\n\nbar = Progressbar(window, length=200, style='black.Horizontal.TProgressbar')\n\nbar['value'] = 70\n\nbar.grid(column=0, row=0)\n\nwindow.mainloop()\n\n###########################################################################################\n\n# Add a Menu bar\n\nfrom tkinter import *\n\nfrom tkinter import Menu\n\nwindow = Tk()\n\nwindow.title(\"Welcome to LikeGeeks app\")\n\nmenu = Menu(window)\n\nnew_item = Menu(menu)\n\nnew_item.add_command(label='New')\n\nnew_item.add_separator()\n\nnew_item.add_command(label='Edit')\n\nnew_item2 = Menu(menu)\n\nnew_item2.add_command(label='New2')\n\nnew_item2.add_separator()\n\nnew_item2.add_command(label='Edit2')\n\nmenu.add_cascade(label='File', menu=new_item)\nmenu.add_cascade(label='Options', menu=new_item2)\n\nwindow.config(menu=menu)\n\nwindow.mainloop()\n\n###########################################################################################\n\n# Add Widgets\n\nfrom tkinter import *\n\nfrom tkinter import ttk\n\nwindow = Tk()\n\nwindow.title(\"Welcome to LikeGeeks app\")\n\ntab_control = ttk.Notebook(window)\n\ntab1 = ttk.Frame(tab_control)\n\ntab2 = ttk.Frame(tab_control)\n\ntab_control.add(tab1, text='First')\n\ntab_control.add(tab2, text='Second')\n\nlbl1 = Label(tab1, text='label1')\n\nlbl1.grid(column=0, row=0)\n\nlbl2 = Label(tab2, text='label2')\n\nlbl2.grid(column=0, row=0)\n\ntab_control.pack(expand=1, fill='both')\n\nwindow.mainloop()","repo_name":"macicekm/PyCourses","sub_path":"Tkinter2.py","file_name":"Tkinter2.py","file_ext":"py","file_size_in_byte":5270,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"74933734967","text":"from sprite_object import *\nfrom npc import *\nfrom map import *\nimport random\nimport math\n\nclass ObjectHandler:\n def __init__(self, game):\n # Sprite variables\n self.npc_sprite_path = 'resources/sprites/npc/'\n self.static_sprite_path = 'resources/sprites/static_sprites/'\n self.anim_sprite_path = 'resources/sprites/animated_sprites/'\n\n self.game = game\n self.sprite_list = []\n self.npc_list = []\n self.alive_npc_list = []\n self.npc_positions = {}\n self.lastRespawned = pg.time.get_ticks()\n self.killed = 0\n\n self.gameMap = Map(game)\n self.map_size = self.gameMap.get_size()\n\n self.hpRestored = False\n self.dmgIncreased = False\n\n # Sprites on the Map\n self.add_sprite(SpriteObject(game))\n for i in range(0, self.randomNum(10, 30)):\n newX = self.randomNum(0, self.map_size[0])\n newY = self.randomNum(0, self.map_size[1])\n self.add_sprite(AnimatedSprite(game, pos=(newX - 0.5, newY - 0.5)))\n\n # Little bit more interesting spawning, but also more problematic\n print(\"MAP Size X: \" + str(self.map_size[0]) + \" | Y: \" + str(self.map_size[1]) + \" | Empty: \" + str(self.gameMap.world_empty_space))\n\n cloRangeNPC = 0\n while cloRangeNPC < 10:\n newX = self.randomNum(4, math.floor(self.map_size[0] / 4))\n newY = self.randomNum(4, math.floor(self.map_size[1] / 6))\n if not self.gameMap.isWall(newX, newY):\n self.add_npc(NPC(self.game, pos=(float(newX + 0.5), float(newY + 0.5))))\n cloRangeNPC = cloRangeNPC + 1\n\n midRangeNPC = 0\n while midRangeNPC < 60:\n newX = self.randomNum(math.floor(self.map_size[0] / 4) + 1, math.floor(self.map_size[0] / 2))\n newY = self.randomNum(math.floor(self.map_size[1] / 6) + 1, math.floor(self.map_size[1] / 3))\n if not self.gameMap.isWall(newX, newY):\n self.add_npc(NPC(self.game, pos=(float(newX + 0.5), float(newY + 0.5))))\n midRangeNPC = midRangeNPC + 1\n\n farRangeNPC = 0\n while farRangeNPC < 40:\n newX = self.randomNum(math.floor(self.map_size[0] / 2) + 1, self.map_size[0])\n newY = self.randomNum(math.floor(self.map_size[1] / 3) + 1, self.map_size[1])\n if not self.gameMap.isWall(newX, newY):\n self.add_npc(NPC(self.game, pos=(float(newX + 0.5), float(newY + 0.5))))\n farRangeNPC = farRangeNPC + 1\n\n def killReward(self):\n reward = self.killed\n if reward > 15 and not self.hpRestored:\n self.game.player.set_health(200)\n self.game.sound.hpHealed.play()\n self.hpRestored = True\n\n if reward > 20 and not self.dmgIncreased:\n self.game.weapon.set_damage(100)\n self.game.sound.dmgIncrease.play()\n self.dmgIncreased = True\n\n def update(self):\n self.killReward()\n self.npc_positions = {npc.map_pos for npc in self.npc_list if npc.alive}\n [sprite.update() for sprite in self.sprite_list]\n for npc in self.npc_list:\n npc.update()\n\n for npc in self.alive_npc_list:\n if not npc.isAlive():\n self.alive_npc_list.pop(self.alive_npc_list.index(npc))\n self.killed = self.killed + 1\n\n time_now = pg.time.get_ticks()\n if time_now - self.lastRespawned > 15000:\n while True:\n newX = self.randomNum(0, self.map_size[0])\n newY = self.randomNum(0, self.map_size[1])\n if not self.gameMap.isWall(newX, newY):\n self.add_npc(NPC(self.game, pos=(float(newX + 0.5), float(newY + 0.5))))\n break\n self.lastRespawned = time_now\n\n def add_npc(self, npc):\n self.npc_list.append(npc)\n self.alive_npc_list.append(npc)\n\n def add_sprite(self, sprite):\n self.sprite_list.append(sprite)\n\n def randomNum(self, minNum, maxNum):\n return random.randint(minNum, maxNum)\n","repo_name":"DomasBar/FinalProjectGame","sub_path":"object_handler.py","file_name":"object_handler.py","file_ext":"py","file_size_in_byte":4095,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"77"} +{"seq_id":"74148229687","text":"# -*- coding: UTF-8 -*-\nclass LRUCache:\n\n def __init__(self, capacity):\n \"\"\"\n 146.LRU缓存机制(review)\n acity: int\n \"\"\"\n self.hash = {}\n self.cur = capacity\n self.list = []\n\n def get(self, key):\n \"\"\"\n :type key: int\n :rtype: int\n \"\"\"\n if key in self.list:\n self.list.remove(key)\n self.list.insert(0, key)\n return self.hash.get(key)\n else:\n return -1\n\n def put(self, key, value):\n \"\"\"\n :type key: int\n :type value: int\n :rtype: void\n \"\"\"\n if self.hash.get(key):\n self.list.remove(key)\n self.list.insert(0, key)\n elif self.cur == 0: # cur减少到0的时候就开始往外弹过期的\n self.hash.pop(self.list.pop())\n self.list.insert(0, key)\n else:\n self.cur -= 1 # cur不为0就减1\n self.list.insert(0, key)\n\n self.hash[key] = value\n\n\nif __name__ == '__main__':\n cache = LRUCache(1)\n cache.put(2, 1)\n cache.put(1, 1)\n cache.put(2, 3)\n cache.put(4, 1)\n print(cache.get(1))\n","repo_name":"fuyao-w/PYAlgorithmsAndDataStructures","sub_path":"dataStructure/LRUCache.py","file_name":"LRUCache.py","file_ext":"py","file_size_in_byte":1169,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"74947974327","text":"\"\"\"\nOpenCV resize\ncomparing various methods of resize interpolation\n\"\"\"\nfrom __future__ import print_function\nimport argparse\n\nimport os\nimport cv2\nimport numpy as np\n\n\nif __name__ == '__main__':\n # Parse\n parser = argparse.ArgumentParser(description='OpenCV resize comparison')\n parser.add_argument('--input', '-i', default=None, help='input file path')\n parser.add_argument('--output', '-o', default=None, help='output folder directory')\n args = parser.parse_args()\n\n filepath = os.path.dirname(os.path.realpath(__file__))\n\n if args.input is not None:\n photo_file_path = args.input\n else:\n photo_file_path = os.path.join(filepath, '../../assets/compare/0/photo0_xinput.jpg')\n\n if args.output is not None:\n output_dir = args.output\n else:\n output_dir = os.path.join(filepath, '../../assets/compare/0')\n\n input_img = cv2.imread(photo_file_path, cv2.IMREAD_COLOR)\n input_image_height = input_img.shape[0]\n input_image_width = input_img.shape[1]\n output_image_height = 2 * input_image_height\n output_image_width = 2 * input_image_width\n\n scaled_input_img = cv2.resize(input_img, (output_image_width, output_image_height), interpolation=cv2.INTER_NEAREST)\n cv2.imwrite(os.path.join(output_dir, 'nearest.jpg'), scaled_input_img)\n scaled_input_img = cv2.resize(input_img, (output_image_width, output_image_height), interpolation=cv2.INTER_LINEAR)\n cv2.imwrite(os.path.join(output_dir, 'linear.jpg'), scaled_input_img)\n scaled_input_img = cv2.resize(input_img, (output_image_width, output_image_height), interpolation=cv2.INTER_AREA)\n cv2.imwrite(os.path.join(output_dir, 'area.jpg'), scaled_input_img)\n scaled_input_img = cv2.resize(input_img, (output_image_width, output_image_height), interpolation=cv2.INTER_CUBIC)\n cv2.imwrite(os.path.join(output_dir, 'cubic.jpg'), scaled_input_img)\n scaled_input_img = cv2.resize(input_img, (output_image_width, output_image_height), interpolation=cv2.INTER_LANCZOS4)\n cv2.imwrite(os.path.join(output_dir, 'lanczos.jpg'), scaled_input_img)\n\n\n","repo_name":"corochann/SeRanet","sub_path":"src/tools/opencv_resize.py","file_name":"opencv_resize.py","file_ext":"py","file_size_in_byte":2079,"program_lang":"python","lang":"en","doc_type":"code","stars":82,"dataset":"github-code","pt":"77"} +{"seq_id":"23567931321","text":"import pandas as pd\nimport numpy as np\n\n# import requests\n# from urllib3 import request\n# import json\nimport sqlalchemy as sa\n\n# import psycopg2\n\nfrom io import StringIO\nimport csv\n\nfrom win10toast import ToastNotifier\n\nimport road_index_calculations as calc\n\ntoast = ToastNotifier()\ntoast.show_toast(\n \"SCRIPT RUNNING\",\n \"Inserting records from FulcrumApp\",\n duration=10,\n)\n\nTABLE = \"road_visual_assessment_view\"\nCREATED_TABLE = \"road_visual_assessment_created\"\n\nSCHEMA = \"assessment\"\n\nCSV = r\"https://web.fulcrumapp.com/shares/48fc49435c5a0199.csv\"\nCSV_ANCILLARY = (\n r\"https://web.fulcrumapp.com/shares/48fc49435c5a0199.csv?child=ancillary_assets\"\n)\n\nJSON = r\"https://web.fulcrumapp.com/shares/48fc49435c5a0199.json\"\n\nGEOJSON = r\"https://web.fulcrumapp.com/shares/48fc49435c5a0199.geojson\"\nGEOJSON_ANCILLARY = (\n r\"https://web.fulcrumapp.com/shares/48fc49435c5a0199.geojson?child=ancillary_assets\"\n)\n\nDB_NAME = \"wc_asset_management\"\nDB_USER = \"postgres\"\nDB_PASS = \"post@dmin100!\"\nDB_HOST = \"10.73.1.2\"\nDB_PORT = \"5436\"\n\nENGINE_URL = sa.engine.URL.create(\n \"postgresql\",\n username=DB_USER,\n password=DB_PASS,\n host=DB_HOST,\n port=DB_PORT,\n database=DB_NAME,\n)\n\nENGINE = sa.create_engine(\n ENGINE_URL\n)\n\nPROJECT = 'ODM RRAMS 2021 - 2023'\n\ndef get_int_columns():\n cols_qry = \"\"\"select column_name\n from information_schema.columns\n where table_schema = 'assessment' and table_name = 'road_visual_assessment' \n and data_type in ('integer', 'smallint', 'bigint');\"\"\"\n cols = pd.read_sql_query(cols_qry, ENGINE)\n cols = list(cols['column_name'])\n return cols\n\ndef psql_insert_copy(table, conn, keys, data_iter):\n \"\"\"\n Execute SQL statement inserting data\n\n Parameters\n ----------\n table : pandas.io.sql.SQLTable\n conn : sqlalchemy.engine.Engine or sqlalchemy.engine.Connection\n keys : list of str\n Column names\n data_iter : Iterable that iterates the values to be inserted\n \"\"\"\n # gets a DBAPI connection that can provide a cursor\n dbapi_conn = conn.connection\n with dbapi_conn.cursor() as cur:\n s_buf = StringIO()\n writer = csv.writer(s_buf)\n writer.writerows(data_iter)\n s_buf.seek(0)\n\n columns = \", \".join('\"{}\"'.format(k) for k in keys)\n if table.schema:\n table_name = \"{}.{}\".format(table.schema, table.name)\n else:\n table_name = table.name\n\n sql = \"COPY {} ({}) FROM STDIN WITH CSV\".format(table_name, columns)\n cur.copy_expert(sql=sql, file=s_buf)\n\ndef main():\n try:\n\n df = pd.read_csv(CSV, low_memory=False)\n ancillary_data = pd.read_csv(CSV_ANCILLARY, low_memory=False)\n\n try:\n df.rename(columns = {'project':'project_name', 'kerbs': 'kerbs_degree'}, inplace = True)\n except:\n pass\n df['kerbs_degree'].loc[df['kerbs_degree']=='N'] = np.nan\n\n cols = pd.read_sql_query(\"select * from assessment.road_visual_assessment limit 1;\", ENGINE)\n cols = list(cols.columns)\n\n int_cols = get_int_columns()\n int_cols = [i for i in int_cols if i in df.columns]\n\n inspected = df[df[\"status\"] == \"inspected\"]\n inspected[\"segment_id\"] = inspected[\"asset_id\"]\n \n # inspected = calc.main(inspected)\n\n created = df[df[\"status\"] == \"created\"]\n created[\"segment_id\"] = created[\"fulcrum_id\"]\n\n # created = calc.main(created)\n\n inspected.drop('asset_id', axis=1, inplace=True)\n\n inspected[int_cols] = inspected[int_cols].astype(int).fillna(0)\n created[int_cols] = created[int_cols].astype(int).fillna(0)\n\n inspected = inspected[inspected.columns.intersection(cols)]\n created = created[created.columns.intersection(cols)]\n\n inspected.to_sql(\n TABLE,\n ENGINE,\n schema=SCHEMA,\n if_exists=\"append\",\n index=False,\n method=psql_insert_copy,\n )\n created.to_sql(\n TABLE,\n ENGINE,\n schema=SCHEMA,\n if_exists=\"append\",\n index=False,\n method=psql_insert_copy,\n )\n\n # try:\n # conn = psycopg2.connect(\n # dbname=\"asset_management_master\",\n # user=\"postgres\",\n # password=\"$admin\",\n # host=\"localhost\",\n # port=5432,\n # )\n # conn.set_session(autocommit=True)\n # cur = conn.cursor()\n # cur.callproc(\n # \"assessment.rva_indices\",\n # )\n # except (Exception, psycopg2.DatabaseError) as e:\n # print(e)\n # finally:\n # if conn is not None:\n # conn.close()\n\n # try:\n # conn = psycopg2.connect(\n # dbname=\"asset_management_master\",\n # user=\"postgres\",\n # password=\"$admin\",\n # host=\"localhost\",\n # port=5432,\n # )\n # conn.set_session(autocommit=True)\n # cur = conn.cursor()\n # cur.callproc(\n # \"assessment.rva_indices_2\",\n # )\n # except (Exception, psycopg2.DatabaseError) as e:\n # print(e)\n # finally:\n # if conn is not None:\n # conn.close()\n\n toast.show_toast(\n \"SCRIPT RAN SUCCESSFULLY\",\n \"Inserting records from FulcrumApp\",\n duration=10,\n )\n except:\n toast.show_toast(\n \"SOMETHING WENT WRONG - PLEASE CHECK IMPORT FUNCTION\",\n \"Inserting records from FulcrumApp\",\n duration=10,\n )\n\n\nif __name__ == '__main__':\n main()","repo_name":"brandtosaurus/inspections","sub_path":"import_inspection_from_fulcrum.py","file_name":"import_inspection_from_fulcrum.py","file_ext":"py","file_size_in_byte":5578,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"31142554303","text":"from django.apps import apps\nfrom django.shortcuts import render, redirect\nfrom django.views import View\n\n\nclass BaseGenericView(View):\n template_name = None\n object_name = None\n model_class = None\n context = None\n values = None\n\n def get_for_single(self, model_class, id_object):\n return model_class.objects.get(id=id_object)\n\n def get(self, request, *args, **kwargs):\n method = kwargs.get('method', 'get')\n self.object_name = kwargs['object_name']\n self.model_class = apps.get_model(app_label='app', model_name=self.object_name)\n self.context = self.model_class.get_context() if hasattr(self.model_class, 'get_context') else {}\n if kwargs['pk'] is not None:\n self.context['object'] = self.get_for_single(self.model_class, kwargs['pk'])\n if method == 'list':\n self.context['object_list'] = self.get_for_list()\n self.get_template_name(method)\n return render(request, self.template_name, self.context)\n\n def get_for_list(self):\n return self.model_class.objects.all()\n\n def get_template_name(self, method):\n if method == 'delete':\n post_fix = '_delete.html'\n elif method == 'list':\n post_fix = '_list.html'\n else:\n post_fix = '_form.html'\n\n self.template_name = 'app/' + self.object_name.lower() + post_fix\n\n def post(self, request, *args, **kwargs):\n object_name = kwargs['object_name']\n self.model_class = apps.get_model(app_label='app', model_name=object_name)\n if kwargs['pk'] is None:\n self.set_values(request)\n self.create(request, *args, **kwargs)\n elif request.POST.get('delete'):\n self.delete(*args, **kwargs)\n else:\n self.set_values(request)\n self.edit(request, *args, **kwargs)\n return redirect('detail', method='list', object_name=object_name)\n\n def create(self, request, *args, **kwargs):\n model_object = self.model_class(**self.values)\n model_object.save()\n\n def edit(self, request, *args, **kwargs):\n model_object = self.model_class.objects.filter(id=kwargs['pk'])\n model_object.update(**self.values)\n \n def delete(self, *args, **kwargs):\n model_object = self.model_class.objects.get(id=kwargs['pk'])\n model_object.delete()\n\n def set_values(self, request):\n post_items = dict(request.POST)\n fields = [field.name for field in self.model_class._meta.get_fields()]\n self.values = {field: request.POST[field] for field in fields\n if field != 'id' if field in post_items.keys()}\n","repo_name":"Karolucha/pmb","sub_path":"adminek/views/generic_views.py","file_name":"generic_views.py","file_ext":"py","file_size_in_byte":2671,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"6484559014","text":"# -*- coding: utf-8 -*-\nfrom simple_perms import PermissionLogic, register\n\nfrom helpers.mixins import BasicPermissionLogicMixin\n\n\nclass TagPermissionLogic(BasicPermissionLogicMixin, PermissionLogic):\n def view(self, user, tag, *args):\n \"\"\"\n Permissions for viewing/editing Tag\n \"\"\"\n if user.is_anonymous or user.is_client:\n return False\n\n if user.is_administrator:\n return True\n\n if user.is_manager:\n return False\n\n if user.is_advisor:\n return tag.owning_group == user.group\n\n return self.admin_permission(user, tag, *args)\n\n def create(self, user, tag, *args):\n \"\"\"\n Permissions for creating Tag\n \"\"\"\n if user.is_anonymous or user.is_client:\n return False\n\n if user.is_administrator:\n return True\n\n if user.is_manager or user.is_advisor:\n if tag.owning_group.pk == user.group.pk:\n return True\n\n return self.admin_permission(user, tag, *args)\n\n change = view\n delete = view\n\n\nregister(\"tag\", TagPermissionLogic)\nregister(\"fac/tag\", TagPermissionLogic)\n","repo_name":"alexandrenorman/mixeur","sub_path":"fac/perms/tag_perm.py","file_name":"tag_perm.py","file_ext":"py","file_size_in_byte":1162,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"36704320289","text":"from typing import List\n\n\nclass Solution:\n def isPalindrome(self, s: str) -> bool:\n left, right = 0, len(s) - 1\n while left < right:\n while left < right and not s[left].isalnum():\n left += 1\n while left < right and not s[right].isalnum():\n right -= 1\n if s[left].upper() != s[right].upper():\n return False\n left += 1\n right -= 1\n return True\n\n\nif __name__ == \"__main__\":\n S = Solution()\n print(S.isPalindrome(\"A man, a plan, a canal: Panama\"))\n","repo_name":"JasmineRain/Algorithm","sub_path":"Python/Double Pointers/125_Easy_验证回文串.py","file_name":"125_Easy_验证回文串.py","file_ext":"py","file_size_in_byte":574,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"15224525144","text":"\nfrom django.core.management.base import BaseCommand, CommandError\nfrom django.conf import settings\n\nfrom uuid import uuid4\nimport tinys3\nimport os\n\nAWS_ACCESS_KEY_ID = 'AKIAJOAPGZD2SYWXWBWQ'\nAWS_SECRET_ACCESS_KEY = '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'\n\n\ndef key_name_for_path(p):\n s = str(uuid4())\n basename = os.path.basename(p)\n if '.' in basename:\n s += '.' + basename.split('.')[-1]\n return s\n\n\nclass Command(BaseCommand):\n def add_arguments(self, parser):\n parser.add_argument('image_path', type=str)\n\n def handle(self, *args, **options):\n image_path = options['image_path']\n key_name = key_name_for_path(image_path)\n print('Uploading \"%s\" to \"%s\" on S3...' % (image_path, key_name))\n\n conn = tinys3.Connection(AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY, default_bucket='siphon-assets')\n with open(image_path, 'rb') as fp:\n result = conn.upload(key_name, fp, public=True)\n\n print('Done.')\n print('\\n--> %s' % result.url)\n","repo_name":"siphoncode/siphon-web","sub_path":"siphon/web/apps/core/management/commands/image_to_s3.py","file_name":"image_to_s3.py","file_ext":"py","file_size_in_byte":1023,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"40749440139","text":"from typing import Optional\n# Definition for singly-linked list.\nclass ListNode:\n def __init__(self, val=0, next=None):\n self.val = val\n self.next = next\n\nclass Solution:\n def reorderList(self, head: Optional[ListNode]) -> None:\n \"\"\"\n Do not return anything, modify head in-place instead.\n \"\"\"\n mid = self.findMiddle(head)\n left = head\n right = mid.next\n mid.next = None\n right = self.reverseList(right)\n return self.mergeTwoLists(left, right)\n \n def findMiddle(self, head):\n slow = fast = head\n while fast.next and fast.next.next:\n slow = slow.next\n fast = fast.next.next\n return slow\n \n def reverseList(self, head):\n cur = None\n while head:\n new_head = head.next\n head.next = cur\n cur = head\n head = new_head\n return cur\n \n def mergeTwoLists(self, left, right):\n head = ListNode()\n cur = head\n while left and right:\n new_left, new_right = left.next, right.next\n cur.next = left\n cur.next.next = right\n cur = right\n left, right = new_left, new_right\n if left:\n cur.next = left\n if right:\n cur.next = right\n return head.next","repo_name":"zt5rice/LC-archive","sub_path":"0143. Reorder List/143.Reorder-List.py","file_name":"143.Reorder-List.py","file_ext":"py","file_size_in_byte":1356,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"74581835127","text":"'''\nLeia um caractere maiúsculo, que indica uma operação que deve ser realizada e uma matriz M[12][12].\nEm seguida, calcule e mostre a soma ou a média considerando somente aqueles elementos que estão na \nárea superior da matriz, conforme ilustrado abaixo (área verde).\n'''\n\ndef criar_matriz(lin,col):\n mat = []\n for i in range(lin):\n mat.append([0] * col)\n return mat\n\ndef ler_matriz(mat,lin,col):\n for i in range(lin):\n for j in range(col):\n mat[i][j] = float(input())\n\nsoma = 0\nO = input(\"\")\nM = criar_matriz(12,12)\nler_matriz(M, 12, 12)\n\nfor i in range (5):\n for j in range (11,0,-1):\n if j > i and j-i >= 1 and i + j < 11:\n soma = soma + M[i][j]\n\nif O == \"s\" or O == \"S\":\n print(round(soma, 1))\n\nelse:\n print(round(soma/30,1))","repo_name":"weep-dev/Python","sub_path":"Exercise2/AreaSuperior.py","file_name":"AreaSuperior.py","file_ext":"py","file_size_in_byte":765,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"32273118328","text":"# -*- coding: utf-8 -*-\n\n\"\"\" OpenStreetMap related tools\n\nAmong available tools:\n- Use Overpass API to query OSM database and convert JSON\nresponse to geopandas dataframe (thanks to https://github.com/yannforget/OSMxtract for inspiration !)\n\"\"\"\nimport geojson\nimport geopandas as gpd\nfrom osmnx import geocode_to_gdf\nfrom osmnx.downloader import overpass_request\n\nfrom gistools.exceptions import QlQueryError\nfrom gistools.geometry import merge\nfrom osmnx.settings import default_crs\nfrom shapely.geometry import LineString, Point, MultiLineString, Polygon, MultiPolygon, MultiPoint\n\nfrom gistools.utils.check.value import check_string\n\nGEOMETRY_CLASS = {'linestring': (LineString, MultiLineString), 'polygon': (Polygon, MultiPolygon),\n 'point': (Point, MultiPoint)}\n\n\ndef _to_point_features(json):\n \"\"\" Read json response and extract point geometries\n\n :param json: JSON response from overpass API\n :return: GeoJSON FeatureCollection\n \"\"\"\n features = []\n elements = [e for e in json['elements'] if e['type'] == 'node']\n for elem in elements:\n coords = [elem['lon'], elem['lat']]\n features.append(geojson.Feature(id=elem['id'], geometry=Point(coords), properties=_feature_tags(elem)))\n\n return geojson.FeatureCollection(features)\n\n\ndef _to_features(json, geometry_type):\n \"\"\" Read json response and extract (multi)linestring/polygon geometries\n\n :param json: json response\n :param geometry_type: {'linestring', 'polygon'}\n :return:\n \"\"\"\n features = []\n\n if geometry_type == 'linestring':\n elements = [e for e in json['elements'] if e['type'] == 'way' or e['type'] == 'relation']\n else:\n elements = [e for e in json['elements'] if e['type'] == 'way' or (e['type'] == 'relation' and e['tags']['type']\n in ('multipolygon', 'boundary'))]\n\n for elem in elements:\n\n if elem['type'] == 'way':\n coords = [[node['lon'], node['lat']] for node in elem['geometry']]\n try:\n geom = GEOMETRY_CLASS[geometry_type][0](coords) # LineString, Polygon\n except ValueError:\n pass\n else:\n features.append(geojson.Feature(id=elem['id'], geometry=geom, properties=_feature_tags(elem)))\n\n elif elem['type'] == 'relation':\n collection = []\n for member in elem['members']:\n if member['type'] == 'way':\n member_coords = [(node['lon'], node['lat']) for node in member['geometry']]\n collection.append(LineString(member_coords))\n geom_collection = merge(collection)\n\n if geom_collection:\n try:\n geom = GEOMETRY_CLASS[geometry_type][1]([GEOMETRY_CLASS[geometry_type][0](line) for line in\n geom_collection]) # MultiLineString, MultiPolygon\n except ValueError:\n pass\n else:\n features.append(geojson.Feature(id=elem['id'], geometry=geom, properties=_feature_tags(elem)))\n\n return geojson.FeatureCollection(features)\n\n\ndef _feature_tags(json_element):\n \"\"\" Update feature tags to set OSM ID and type\n\n :param json_element: 'elements' feature in JSON dict response\n :return:\n \"\"\"\n if 'id' not in json_element['tags'].keys():\n tags = dict(osm_id=json_element['id'], **json_element['tags'])\n else:\n tags = json_element['tags']\n\n # Add osm type to attributes\n tags.update(osm_type=json_element['type'])\n\n return tags\n\n\ndef download_osm_features(place, osm_type, tag, values=None, by_poly=True, timeout=180):\n \"\"\" Download OSM features within given place\n\n :param place: single place name query (e.g: \"London\", \"Bonn\", etc.)\n :param osm_type: OSM geometry type str ('node', 'way', 'relation')\n :param tag: OSM tag to query\n :param values: str/list of possible values for the provided OSM tag\n :param by_poly: if True, retrieve features within polygon's list of coordinates, otherwise use bounds\n :param timeout:\n :return:\n \"\"\"\n gdf_geometry = geocode_to_gdf(place)\n\n try:\n geometry = gdf_geometry.geometry[0]\n except AttributeError: # Empty GeoDataFrame\n return None\n\n responses = []\n\n if by_poly:\n polygon_coord_strs = get_polygons_coordinates(geometry)\n for poly_coord_str in polygon_coord_strs:\n query = ql_query(osm_type, tag, values, polygon_coord=poly_coord_str, timeout=timeout)\n responses.append(overpass_request(data={'data': query}))\n else:\n query = ql_query(osm_type, tag, values, bounds=geometry.bounds, timeout=timeout)\n responses.append(overpass_request(data={'data': query}))\n\n return responses\n\n\ndef get_polygons_coordinates(geometry):\n \"\"\"\n Extract exterior coordinates from polygon(s) to pass to OSM in a query by\n polygon. Ignore the interior (\"holes\") coordinates.\n\n Parameters\n ----------\n geometry : shapely Polygon or MultiPolygon\n the geometry to extract exterior coordinates from\n\n Returns\n -------\n polygon_coord_strs : list\n\n Note\n ----\n Function from osmnx package version 0.10 (https://github.com/gboeing/osmnx)\n \"\"\"\n\n # extract the exterior coordinates of the geometry to pass to the API later\n polygons_coords = []\n if isinstance(geometry, Polygon):\n x, y = geometry.exterior.xy\n polygons_coords.append(list(zip(x, y)))\n elif isinstance(geometry, MultiPolygon):\n for polygon in geometry:\n x, y = polygon.exterior.xy\n polygons_coords.append(list(zip(x, y)))\n else:\n raise TypeError('Geometry must be a shapely Polygon or MultiPolygon')\n\n # convert the exterior coordinates of the polygon(s) to the string format\n # the API expects\n polygon_coord_strs = []\n for coords in polygons_coords:\n s = ''\n separator = ' '\n for coord in list(coords):\n # round floating point lats and longs to 6 decimal places (ie, ~100 mm),\n # so we can hash and cache strings consistently\n s = '{}{}{:.6f}{}{:.6f}'.format(s, separator, coord[1], separator, coord[0])\n polygon_coord_strs.append(s.strip(separator))\n\n return polygon_coord_strs\n\n\ndef json_to_geodataframe(response, geometry_type):\n \"\"\" Convert JSON responses to\n\n :param response: json response\n :param geometry_type: type of geometry to extract ('point', 'linestring', 'polygon', 'multipolygon')\n :return:\n \"\"\"\n geometry_type = check_string(geometry_type, ('point', 'linestring', 'polygon'))\n\n if geometry_type == 'point':\n return gpd.GeoDataFrame.from_features(_to_point_features(response), crs=default_crs)\n else:\n return gpd.GeoDataFrame.from_features(_to_features(response, geometry_type), crs=default_crs)\n\n\ndef ql_query(osm_type, tag, values=None, bounds=None, polygon_coord=None, timeout=180):\n \"\"\" QL query (thanks to https://github.com/yannforget/OSMxtract for inspiration !)\n\n :param osm_type: OSM geometry type str {'node', 'way', 'relation', 'nwr'}\n :param tag: OSM tag to query\n :param values: str/list of possible values for the provided OSM tag\n :param bounds: geometry bounds\n :param polygon_coord: location's polygon list of coordinates\n :param timeout:\n :return:\n \"\"\"\n osm_type = check_string(osm_type, ('node', 'way', 'relation', 'nwr'))\n\n if isinstance(values, str):\n values = [values]\n\n if bounds and not polygon_coord:\n west, south, east, north = bounds\n boundary = f'({south:.6f},{west:.6f},{north:.6f},{east:.6f})'\n elif polygon_coord and not bounds:\n boundary = f'(poly:\"{polygon_coord}\")'\n else:\n raise QlQueryError(\"Must define either geometry bounds or polygon coordinates\")\n\n if values:\n if len(values) > 1:\n tags = f'[\"{ tag }\"~\"{ \"|\".join(values) }\"]'\n else:\n tags = f'[\"{ tag }\"=\"{ values[0] }\"]'\n else:\n tags = f'[\"{tag}\"]'\n\n return f'[out:json][timeout:{timeout}];{osm_type}{tags}{boundary};out geom;'\n","repo_name":"benjaminpillot/gis-tools","sub_path":"gistools/osm.py","file_name":"osm.py","file_ext":"py","file_size_in_byte":8207,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"77"} +{"seq_id":"25975327922","text":"from asyncio import TimeoutError\nfrom TicTacToe import TicTacToe\n\nROUND_TIME_LIMIT = 40.0\nTOO_SLOW_MESSAGE = \"too slow darling -_-\"\nGAME_OVER_MESSAGE = \"GAME OVER\"\n\n\nasync def start_game(client, channel, user):\n game = TicTacToe()\n # user will play the game through a message and its discord-reacts.\n empty_board = str(game)\n game_message = await channel.send(empty_board)\n for square in game.board:\n await game_message.add_reaction(square)\n # start game loop\n await get_user_reaction(client, channel, user, game, game_message)\n\n\nasync def get_user_reaction(client, channel, user, game, game_message):\n # user must react within time limit.\n try:\n user_reaction, user = await client.wait_for(\n 'reaction_add',\n timeout=ROUND_TIME_LIMIT,\n check=lambda r, u: game.is_position_empty(str(r.emoji)) and u == user)\n # user took too long\n except TimeoutError:\n await channel.send(TOO_SLOW_MESSAGE)\n # user chose a react in time\n else:\n await update_game_message(client, channel, user, user_reaction, game, game_message)\n\n\nasync def update_game_message(client, channel, user, user_reaction, game, game_message):\n \"\"\"Update the original game-message with the new board.\"\"\"\n user_move = str(user_reaction.emoji)\n game.update_board(user_move=user_move)\n await game_message.edit(content=str(game))\n if not game.is_over():\n await get_user_reaction(client, channel, user, game, game_message)\n else:\n await channel.send(GAME_OVER_MESSAGE)\n","repo_name":"benji1123/002-Discord-Bot","sub_path":"TicTacToeHandler.py","file_name":"TicTacToeHandler.py","file_ext":"py","file_size_in_byte":1555,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"37036067303","text":"\"\"\"Python Script Template.\"\"\"\nimport seaborn as sns\nfrom exps.utilities import get_exact_reps, get_exact_q_reps, get_sample_qreps\n\npalette = sns.color_palette(n_colors=10)\n\n\n\"\"\"Python Script Template.\"\"\"\nimport seaborn as sns\nfrom exps.utilities import get_exact_reps, get_exact_q_reps\n\npalette = sns.color_palette(n_colors=10)\n\n\ndef get_eta_agents(env, eta, alpha, *args, **kwargs):\n \"\"\"Return agents that need the model.\"\"\"\n agents = {\n \"ExactQREPS-0.001\": get_exact_q_reps(\n env, eta=0.001 * eta, alpha=1.0, *args, **kwargs\n ),\n \"ExactQREPS-0.01\": get_exact_q_reps(\n env, eta=0.01 * eta, alpha=1.0, *args, **kwargs\n ),\n \"ExactQREPS-0.1\": get_exact_q_reps(\n env, eta=0.1 * eta, alpha=1.0, *args, **kwargs\n ),\n \"ExactQREPS-1\": get_exact_q_reps(env, eta=eta, alpha=1.0, *args, **kwargs),\n \"ExactQREPS-10\": get_exact_q_reps(\n env, eta=10 * eta, alpha=1.0, *args, **kwargs\n ),\n }\n return agents\n\n\ndef get_linestyle(name: str):\n \"\"\"Get agent linestyle.\"\"\"\n if \"QREPS\" in name:\n return \"solid\"\n else:\n return \"dashed\"\n\n\ndef get_color(name: str):\n \"\"\"Get plot color.\"\"\"\n if \"-100\" in name:\n return palette[1]\n elif \"-10\" in name:\n return palette[2]\n elif \"-1\" in name:\n return palette[0]\n elif \"-0.1\" in name:\n return palette[3]\n elif \"-0.01\" in name:\n return palette[4]\n elif \"-0.001\" in name:\n return palette[8]\n","repo_name":"sebascuri/qreps","sub_path":"exps/effect_of_eta_on_q/utilities.py","file_name":"utilities.py","file_ext":"py","file_size_in_byte":1514,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"77"} +{"seq_id":"3441337869","text":"import os.path\n\nfrom aws_cdk.aws_s3_assets import Asset\nfrom constructs import Construct\n\nfrom aws_cdk import (\n # Duration,\n CfnOutput,\n Stack,\n aws_kms as kms,\n)\n\ndirname = os.path.dirname(__file__)\n\nclass BaseKmsStack(Stack):\n\n @property\n def kms(self):\n return self._kms\n\n # @property\n # def alias(self):\n # return self._alias\n\n def __init__(self, scope: Construct, construct_id: str, prefix_name: str, **kwargs) -> None:\n super().__init__(scope, construct_id, **kwargs)\n\n self._kms = kms.Key(\n self, f\"{prefix_name.capitalize()}KmsKey\",\n description=\"base kms key\",\n alias=\"base_kms_key\"\n )\n\n # self._alias = kms.Alias(\n # self, \"Alias\",\n # alias_name=\"base_kms_key\",\n # target_key=self._kms\n # )\n\n CfnOutput(self, f\"{prefix_name.capitalize()}KmsName\", value=self._kms.key_arn)\n # CfnOutput(self, \"BaseKmsArn\", value=self._alias.key_arn)\n","repo_name":"smarkin-repository/cdk_experiments","sub_path":"base_account_setup/base_account_setup/base_kms_stack.py","file_name":"base_kms_stack.py","file_ext":"py","file_size_in_byte":999,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"34446651714","text":"\"\"\" This module contains the error-related constants and classes. \"\"\"\n\nfrom collections import defaultdict, namedtuple, MutableMapping\nfrom copy import copy\nfrom .utils import compare_paths_lt, quote_string\n\n\n\"\"\"\nError definition constants\n\nEach distinguishable error is defined as a two-value-tuple that holds\na *unique* error id as integer and the rule as string that can cause it.\nThe attributes are accessible as properties ``id`` and ``rule``.\nThe names do not contain a common prefix as they are supposed to be referenced\nwithin the module namespace, e.g. errors.CUSTOM\n\"\"\"\n\nErrorDefinition = namedtuple('cerberus_error', 'code, rule')\n\n# custom\nCUSTOM = ErrorDefinition(0x00, None)\n\n# existence\nDOCUMENT_MISSING = ErrorDefinition(0x01, None) # issues/141\nDOCUMENT_MISSING = \"document is missing\"\nREQUIRED_FIELD = ErrorDefinition(0x02, 'required')\nUNKNOWN_FIELD = ErrorDefinition(0x03, None)\nDEPENDENCIES_FIELD = ErrorDefinition(0x04, 'dependencies')\nDEPENDENCIES_FIELD_VALUE = ErrorDefinition(0x05, 'dependencies')\nEXCLUDES_FIELD = ErrorDefinition(0x06, 'excludes')\n\n# shape\nDOCUMENT_FORMAT = ErrorDefinition(0x21, None) # issues/141\nDOCUMENT_FORMAT = \"'{0}' is not a document, must be a dict\"\nEMPTY_NOT_ALLOWED = ErrorDefinition(0x22, 'empty')\nNOT_NULLABLE = ErrorDefinition(0x23, 'nullable')\nBAD_TYPE = ErrorDefinition(0x24, 'type')\nBAD_TYPE_FOR_SCHEMA = ErrorDefinition(0x25, 'schema')\nITEMS_LENGTH = ErrorDefinition(0x26, 'items')\nMIN_LENGTH = ErrorDefinition(0x27, 'minlength')\nMAX_LENGTH = ErrorDefinition(0x28, 'maxlength')\n\n\n# color\nREGEX_MISMATCH = ErrorDefinition(0x41, 'regex')\nMIN_VALUE = ErrorDefinition(0x42, 'min')\nMAX_VALUE = ErrorDefinition(0x43, 'max')\nUNALLOWED_VALUE = ErrorDefinition(0x44, 'allowed')\nUNALLOWED_VALUES = ErrorDefinition(0x45, 'allowed')\nFORBIDDEN_VALUE = ErrorDefinition(0x46, 'forbidden')\nFORBIDDEN_VALUES = ErrorDefinition(0x47, 'forbidden')\n\n# other\nNORMALIZATION = ErrorDefinition(0x60, None)\nCOERCION_FAILED = ErrorDefinition(0x61, 'coerce')\nRENAMING_FAILED = ErrorDefinition(0x62, 'rename_handler')\nREADONLY_FIELD = ErrorDefinition(0x63, 'readonly')\nSETTING_DEFAULT_FAILED = ErrorDefinition(0x64, 'default_setter')\n\n# groups\nERROR_GROUP = ErrorDefinition(0x80, None)\nMAPPING_SCHEMA = ErrorDefinition(0x81, 'schema')\nSEQUENCE_SCHEMA = ErrorDefinition(0x82, 'schema')\nKEYSCHEMA = ErrorDefinition(0x83, 'keyschema')\nVALUESCHEMA = ErrorDefinition(0x84, 'valueschema')\nBAD_ITEMS = ErrorDefinition(0x8f, 'items')\n\nLOGICAL = ErrorDefinition(0x90, None)\nNONEOF = ErrorDefinition(0x91, 'noneof')\nONEOF = ErrorDefinition(0x92, 'oneof')\nANYOF = ErrorDefinition(0x93, 'anyof')\nALLOF = ErrorDefinition(0x94, 'allof')\n\n\n\"\"\" SchemaError messages \"\"\"\n\nSCHEMA_ERROR_DEFINITION_TYPE = \\\n \"schema definition for field '{0}' must be a dict\"\nSCHEMA_ERROR_MISSING = \"validation schema missing\"\n\n\n\"\"\" Error representations \"\"\"\n\n\nclass ValidationError:\n \"\"\" A simple class to store and query basic error information. \"\"\"\n def __init__(self, document_path, schema_path, code, rule, constraint,\n value, info):\n self.document_path = document_path\n \"\"\" The path to the field within the document that caused the error.\n Type: :class:`tuple` \"\"\"\n self.schema_path = schema_path\n \"\"\" The path to the rule within the schema that caused the error.\n Type: :class:`tuple` \"\"\"\n self.code = code\n \"\"\" The error's identifier code. Type: :class:`int` \"\"\"\n self.rule = rule\n \"\"\" The rule that failed. Type: `string` \"\"\"\n self.constraint = constraint\n \"\"\" The constraint that failed. \"\"\"\n self.value = value\n \"\"\" The value that failed. \"\"\"\n self.info = info\n \"\"\" May hold additional information about the error.\n Type: :class:`tuple` \"\"\"\n\n def __eq__(self, other):\n \"\"\" Assumes the errors relate to the same document and schema. \"\"\"\n return hash(self) == hash(other)\n\n def __hash__(self):\n \"\"\" Expects that all other properties are transitively determined. \"\"\"\n return hash(self.document_path) ^ hash(self.schema_path) \\\n ^ hash(self.code)\n\n def __lt__(self, other):\n if self.document_path != other.document_path:\n return compare_paths_lt(self.document_path, other.document_path)\n else:\n return compare_paths_lt(self.schema_path, other.schema_path)\n\n def __repr__(self):\n return \"{class_name} @ {memptr} ( \" \\\n \"document_path={document_path},\" \\\n \"schema_path={schema_path},\" \\\n \"code={code},\" \\\n \"constraint={constraint},\" \\\n \"value={value},\" \\\n \"info={info} )\"\\\n .format(class_name=self.__class__.__name__, memptr=hex(id(self)), # noqa\n document_path=self.document_path,\n schema_path=self.schema_path,\n code=hex(self.code),\n constraint=quote_string(self.constraint),\n value=quote_string(self.value),\n info=self.info)\n\n @property\n def child_errors(self):\n \"\"\"\n A list that contains the individual errors of a bulk validation error.\n \"\"\"\n return self.info[0] if self.is_group_error else None\n\n @property\n def definitions_errors(self):\n \"\"\" Dictionary with errors of an *of-rule mapped to the index of the\n definition it occurred in. Returns :obj:`None` if not applicable.\n \"\"\"\n if not self.is_logic_error:\n return None\n\n result = defaultdict(list)\n for error in self.child_errors:\n i = error.schema_path[len(self.schema_path)]\n result[i].append(error)\n return result\n\n @property\n def is_group_error(self):\n \"\"\" ``True`` for errors of bulk validations. \"\"\"\n return bool(self.code & ERROR_GROUP.code)\n\n @property\n def is_logic_error(self):\n \"\"\" ``True`` for validation errors against different schemas with\n *of-rules. \"\"\"\n return bool(self.code & LOGICAL.code - ERROR_GROUP.code)\n\n @property\n def is_normalization_error(self):\n \"\"\" ``True`` for normalization errors. \"\"\"\n return bool(self.code & NORMALIZATION.code)\n\n\nclass ErrorList(list):\n \"\"\" A list for :class:`~cerberus.errrors.ValidationError` instances that\n can be queried with the ``in`` keyword for a particular error code. \"\"\"\n def __contains__(self, error_definition):\n for code in (x.code for x in self):\n if code == error_definition.code:\n return True\n return False\n\n\nclass ErrorTreeNode(MutableMapping):\n __slots__ = ('descendants', 'errors', 'parent_node', 'path', 'tree_root')\n\n def __init__(self, path, parent_node):\n self.parent_node = parent_node\n self.tree_root = self.parent_node.tree_root\n self.path = path[:len(self.parent_node.path) + 1]\n self.errors = ErrorList()\n self.descendants = {}\n\n def __add__(self, error):\n self.add(error)\n return self\n\n def __delitem__(self, key):\n del self.descendants[key]\n\n def __iter__(self):\n return iter(self.errors)\n\n def __getitem__(self, item):\n return self.descendants.get(item)\n\n def __len__(self):\n return len(self.errors)\n\n def __setitem__(self, key, value):\n self.descendants[key] = value\n\n def __str__(self):\n return str(self.errors) + ',' + str(self.descendants)\n\n @property\n def depth(self):\n return len(self.path)\n\n @property\n def tree_type(self):\n return self.tree_root.tree_type\n\n def add(self, error):\n error_path = self._path_of_(error)\n\n key = error_path[self.depth]\n if key not in self.descendants:\n self[key] = ErrorTreeNode(error_path, self)\n\n if len(error_path) == self.depth + 1:\n self[key].errors.append(error)\n self[key].errors.sort()\n if error.is_group_error:\n for child_error in error.info[0]:\n self.tree_root += child_error\n else:\n self[key] += error\n\n def _path_of_(self, error):\n return getattr(error, self.tree_type + '_path')\n\n\nclass ErrorTree(ErrorTreeNode):\n \"\"\" Base class for :class:`~cerberus.errors.DocumentErrorTree` and\n :class:`~cerberus.errors.SchemaErrorTree`. \"\"\"\n def __init__(self, errors=[]):\n self.parent_node = None\n self.tree_root = self\n self.path = ()\n self.errors = []\n self.descendants = {}\n for error in errors:\n self += error\n\n def add(self, error):\n \"\"\" Add an error to the tree.\n\n :param error: :class:`~cerberus.errors.ValidationError`\n \"\"\"\n if not self._path_of_(error):\n self.errors.append(error)\n self.errors.sort()\n else:\n super(ErrorTree, self).add(error)\n\n def fetch_errors_from(self, path):\n \"\"\" Returns all errors for a particular path.\n\n :param path: :class:`tuple` of :term:`hashable` s.\n :rtype: :class:`~cerberus.errors.ErrorList`\n \"\"\"\n node = self.fetch_node_from(path)\n if node is not None:\n return node.errors\n else:\n return ErrorList()\n\n def fetch_node_from(self, path):\n \"\"\" Returns a node for a path.\n\n :param path: Tuple of :term:`hashable` s.\n :rtype: :class:`~cerberus.errors.ErrorTreeNode` or :obj:`None`\n \"\"\"\n context = self\n for key in path:\n context = context[key]\n if context is None:\n return None\n return context\n\n\nclass DocumentErrorTree(ErrorTree):\n \"\"\" Implements a dict-like class to query errors by indexes following the\n structure of a validated document. \"\"\"\n tree_type = 'document'\n\n\nclass SchemaErrorTree(ErrorTree):\n \"\"\" Implements a dict-like class to query errors by indexes following the\n structure of the used schema. \"\"\"\n tree_type = 'schema'\n\n\nclass BaseErrorHandler:\n \"\"\" Base class for all error handlers.\n Subclasses are identified as error-handlers with an instance-test. \"\"\"\n def __init__(self, *args, **kwargs):\n \"\"\" Optionally initialize a new instance. \"\"\"\n pass\n\n def __call__(self, errors):\n \"\"\" Returns errors in a handler-specific format.\n\n :param errors: An object containing the errors.\n :type errors: :term:`iterable` of\n :class:`~cerberus.errors.ValidationError` instances or a\n :class:`~cerberus.Validator` instance\n \"\"\"\n raise NotImplementedError\n\n def __iter__(self):\n \"\"\" Be a superhero and implement an iterator over errors. \"\"\"\n raise NotImplementedError\n\n def add(self, error):\n \"\"\" Add an error to the errors' container object of a handler.\n\n :param error: The error to add.\n :type error: :class:`~cerberus.errors.ValidationError`\n \"\"\"\n raise NotImplementedError\n\n def emit(self, error):\n \"\"\" Optionally emits an error in the handler's format to a stream.\n Or light a LED, or even shut down a power plant.\n\n :param error: The error to emit.\n :type error: :class:`~cerberus.errors.ValidationError`\n \"\"\"\n pass\n\n def end(self, validator):\n \"\"\" Gets called when a validation ends.\n\n :param validator: The calling validator.\n :type validator: :class:`~cerberus.Validator` \"\"\"\n pass\n\n def extend(self, errors):\n \"\"\" Adds all errors to the handler's container object.\n\n :param errors: The errors to add.\n :type errors: :term:`iterable` of\n :class:`~cerberus.errors.ValidationError` instances\n \"\"\"\n for error in errors:\n self.add(error)\n\n def start(self, validator):\n \"\"\" Gets called when a validation starts.\n\n :param validator: The calling validator.\n :type validator: :class:`~cerberus.Validator`\n \"\"\"\n pass\n\n\nclass ToyErrorHandler(BaseErrorHandler):\n def __call__(self, *args, **kwargs):\n raise RuntimeError('This is not supposed to happen.')\n\n def clear(self):\n pass\n\n\nclass BasicErrorHandler(BaseErrorHandler):\n \"\"\" Models cerberus' legacy. Returns a :class:`dict`. \"\"\"\n messages = {0x00: \"{0}\",\n\n 0x01: \"document is missing\",\n 0x02: \"required field\",\n 0x03: \"unknown field\",\n 0x04: \"field '{0}' is required\",\n 0x05: \"depends on these values: {constraint}\",\n 0x06: \"{0} must not be present with '{field}'\",\n\n 0x21: \"'{0}' is not a document, must be a dict\",\n 0x22: \"empty values not allowed\",\n 0x23: \"null value not allowed\",\n 0x24: \"must be of {constraint} type\",\n 0x25: \"must be of dict type\",\n 0x26: \"length of list should be {constraint}, it is {0}\",\n 0x27: \"min length is {constraint}\",\n 0x28: \"max length is {constraint}\",\n\n 0x41: \"value does not match regex '{constraint}'\",\n 0x42: \"min value is {constraint}\",\n 0x43: \"max value is {constraint}\",\n 0x44: \"unallowed value {value}\",\n 0x45: \"unallowed values {0}\",\n 0x46: \"unallowed value {value}\",\n 0x47: \"unallowed values {0}\",\n\n 0x61: \"field '{field}' cannot be coerced: {0}\",\n 0x62: \"field '{field}' cannot be renamed: {0}\",\n 0x63: \"field is read-only\",\n 0x64: \"default value for '{field}' cannot be set: {0}\",\n\n 0x81: \"mapping doesn't validate subschema: {0}\",\n 0x82: \"one or more sequence-items don't validate: {0}\",\n 0x83: \"one or more keys of a mapping don't validate: \"\n \"{0}\",\n 0x84: \"one or more values in a mapping don't validate: {0}\",\n 0x85: \"one or more sequence-items don't validate: {0}\",\n\n 0x91: \"one or more definitions validate\",\n 0x92: \"none or more than one rule validate\",\n 0x93: \"no definitions validate\",\n 0x94: \"one or more definitions don't validate\"\n }\n\n def __init__(self, tree=None):\n self.tree = {} if tree is None else tree\n\n def __call__(self, errors=None):\n if errors is not None:\n self.clear()\n self.extend(errors)\n return self.tree\n\n def add(self, error):\n if error.code not in self.messages and not error.is_group_error:\n return\n elif error.is_group_error:\n self.insert_group_error(error)\n else:\n field = error.document_path[-1] if error.document_path else None\n self.insert_error(error.document_path,\n self.format_message(field, error))\n\n def clear(self):\n self.tree = {}\n\n def format_message(self, field, error):\n return self.messages[error.code]\\\n .format(*error.info, constraint=error.constraint,\n field=field, value=error.value)\n\n def insert_error(self, path, node):\n \"\"\" Adds an error or sub-tree to :attr:tree.\n\n :param path: Path to the error.\n :type path: Tuple of strings and integers.\n :param node: An error message or a sub-tree.\n :type node: String or dictionary.\n \"\"\"\n field = path[0]\n if len(path) == 1:\n if field in self.tree:\n self.tree[field].append(node)\n else:\n self.tree[field] = [node]\n elif len(path) >= 1:\n if field not in self.tree:\n self.tree[field] = [{}]\n subtree = self.tree[field][-1]\n\n if subtree:\n new = self.__class__(tree=copy(subtree))\n else:\n new = self.__class__()\n new.insert_error(path[1:], node)\n subtree.update(new.tree)\n\n def insert_group_error(self, error):\n if error.is_logic_error:\n self.insert_logic_error(error)\n\n for error in error.child_errors:\n if error.is_group_error:\n self.insert_group_error(error)\n else:\n field = error.document_path[-1] if error.document_path else None\n self.insert_error(error.document_path,\n self.format_message(field, error))\n\n def insert_logic_error(self, error):\n path = error.document_path + (error.rule, )\n self.insert_error(path, self.format_message(None, error))\n for i in error.definitions_errors:\n for child_error in error.definitions_errors[i]:\n field = child_error.document_path[-1]\n path = child_error.document_path[:-1] + \\\n ('definition %s' % i, field)\n self.insert_error(path, self.format_message(field, child_error)) # noqa\n\n def start(self, validator):\n self.clear()\n\n\nclass SchemaErrorHandler(BasicErrorHandler):\n messages = BasicErrorHandler.messages.copy()\n messages[0x03] = \"unknown rule\"\n","repo_name":"dfci/matchminer-api","sub_path":"cerberus1/errors.py","file_name":"errors.py","file_ext":"py","file_size_in_byte":17351,"program_lang":"python","lang":"en","doc_type":"code","stars":15,"dataset":"github-code","pt":"77"} +{"seq_id":"35340950350","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"\ntest_django-table-select-widget\n------------\n\nTests for `django-table-select-widget` models module.\n\"\"\"\n\nimport django\nfrom django import forms\nfrom django.test import TestCase\n\nfrom model_mommy import mommy\n\nfrom table_select_widget import TableSelectMultiple\n\nfrom .models import Choice\n\n\nclass TestTableSelectWidget(TestCase):\n def setUp(self):\n mommy.make(\n \"Choice\",\n name=\"Choice 1\",\n description=\"Choice 1 description\",\n choice_type__name=\"Foo type\",\n )\n\n maxDiff = None\n\n def test_widget(self):\n class ChoiceForm(forms.Form):\n choice_field = forms.ModelMultipleChoiceField(\n queryset=Choice.objects.all(),\n widget=TableSelectMultiple(\n item_attrs=[\n 'name',\n 'description',\n ],\n ),\n )\n render = ChoiceForm().as_p()\n self.assertHTMLEqual(\n '

'\n ' '\n ' '\n ' '\n ' '\n ' '\n ' '\n ' '\n ' '\n ' '\n ' '\n ' '\n ' '\n ' '\n ' '\n ' '\n '
NameDescription
'\n ' '\n ' Choice 1Choice 1 description
'\n ' '\n '

'.format(\"required\" if django.VERSION > (1,10,0) else \"\"),\n render,\n )\n\n def test_widget_datatables(self):\n class ChoiceForm(forms.Form):\n choice_field = forms.ModelMultipleChoiceField(\n queryset=Choice.objects.all(),\n widget=TableSelectMultiple(\n item_attrs=['name'],\n enable_datatables=True,\n ),\n )\n render = ChoiceForm().as_p()\n self.assertTrue(\"$('#choice_field').DataTable({\" in render)\n\n def test_widget_datatables_options(self):\n \"\"\" Test setting additional options \"\"\"\n class ChoiceForm(forms.Form):\n choice_field = forms.ModelMultipleChoiceField(\n queryset=Choice.objects.all(),\n widget=TableSelectMultiple(\n item_attrs=['name'],\n enable_datatables=True,\n datatable_options={\n 'language': {'url': 'foo.js'},\n },\n ),\n )\n render = ChoiceForm().as_p()\n self.assertTrue('\"language\": {\"url\": \"foo.js\"}' in render)\n\n def test_widget_bootstrap(self):\n class ChoiceForm(forms.Form):\n choice_field = forms.ModelMultipleChoiceField(\n queryset=Choice.objects.all(),\n widget=TableSelectMultiple(\n item_attrs=['name'],\n bootstrap_style=True,\n ),\n )\n render = ChoiceForm().as_p()\n self.assertTrue(\"form-check-input\" in render)\n self.assertTrue(\"table table-sm table-bordered\" in render)\n\n def test_widget_shift_select(self):\n class ChoiceForm(forms.Form):\n choice_field = forms.ModelMultipleChoiceField(\n queryset=Choice.objects.all(),\n widget=TableSelectMultiple(\n item_attrs=['name'],\n enable_shift_select=True,\n ),\n )\n render = ChoiceForm().as_p()\n self.assertTrue(\"$.fn.shiftClick = function () {\" in render)\n\n def test_widget_related(self):\n \"\"\" Test, that function on related field is called \"\"\"\n class ChoiceForm(forms.Form):\n choice_field = forms.ModelMultipleChoiceField(\n queryset=Choice.objects.all(),\n widget=TableSelectMultiple(\n item_attrs=[('choice_type__get_name', 'Type')],\n ),\n )\n render = ChoiceForm().as_p()\n self.assertTrue(\"Type\" in render)\n self.assertTrue(\"Type: Foo type\" in render)\n\n def test_widget_none(self):\n \"\"\" If value of variable is null, render it as blank cell \"\"\"\n mommy.make(\n \"Choice\",\n name=\"Choice 2\",\n description=None,\n )\n class ChoiceForm(forms.Form):\n choice_field = forms.ModelMultipleChoiceField(\n queryset=Choice.objects.all(),\n widget=TableSelectMultiple(\n item_attrs=['description'],\n ),\n )\n render = ChoiceForm().as_p()\n self.assertTrue(\"\" in render)\n","repo_name":"willardmr/DjangoTableSelectMultipleWidget","sub_path":"tests/test_widget.py","file_name":"test_widget.py","file_ext":"py","file_size_in_byte":5528,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"77"} +{"seq_id":"39554429093","text":"import gc\nimport logging\nimport math\nimport sys\nfrom concurrent import futures\nfrom functools import partial\n\nimport boto3\nimport numpy as np\nimport rasterio\nfrom PIL import Image, ImageDraw\nfrom sat_giffer import settings\nfrom rasterio import transform\nfrom rasterio.session import AWSSession\nfrom rasterio.vrt import WarpedVRT\nfrom rasterio.warp import calculate_default_transform, Resampling\n\nsession = rasterio.Env(\n AWSSession(aws_access_key_id=settings.AWS_KEY, aws_secret_access_key=settings.AWS_SECRET)) if 'test' not in \\\n sys.argv[0] else None\nMAX_WORKERS = 2\n\n\ndef get_cropped_data_from_bucket(band, key, bounds, vrt_params, out_crs):\n \"\"\"\n Recovered the data for a given band for a given scene\n :param band: Number of the band of interest\n :param key: Tile location on AWS\n :param bounds: bounding box of the area of interest\n :param vrt_params: meta dictionary for resulting fle\n :param out_crs: output coordinate system\n :return: the cropped data from the band for the bounds\n \"\"\"\n f = key + 'B0%s.jp2' % band\n with session:\n with rasterio.open(f) as src:\n logging.info('Getting data for file {f}'.format(f=f))\n vrt_transform, vrt_width, vrt_height = get_vrt_transform(src, bounds, bounds_crs=out_crs)\n vrt_width = round(vrt_width)\n vrt_height = round(vrt_height)\n vrt_params.update(\n dict(transform=vrt_transform, width=vrt_width, height=vrt_height)\n )\n with WarpedVRT(src, **vrt_params) as vrt:\n logging.info('Getting data from bucket for {f}'.format(f=f))\n data = vrt.read(\n out_shape=(1, vrt_height, vrt_width),\n resampling=Resampling.bilinear,\n indexes=[1],\n )\n gc.collect()\n return data\n\n\ndef rgb_for_key(key, bounds=None, vrt_params=None, out_crs=None):\n \"\"\"\n Loops over Blue, Green and Red Sentinel bands to build a color image\n :param key:\n :param bounds:\n :param vrt_paramsdjango-extensions==2.1.5:\n :param out_crs:\n :return:\n \"\"\"\n bands = ['2', '3', '4']\n logging.info('Getting data for key {key}'.format(key=key))\n _worker = partial(get_cropped_data_from_bucket, key=key, bounds=bounds, vrt_params=vrt_params, out_crs=out_crs)\n with futures.ProcessPoolExecutor(max_workers=3) as executor:\n try:\n data = np.concatenate(list(executor.map(_worker, bands)))\n except:\n return\n gc.collect()\n reshaped_data = np.zeros((data.shape[1], data.shape[2], data.shape[0]))\n for i in range(3):\n reshaped_data[:, :, abs(i - 2)] = data[i, :, :]\n return reshaped_data\n\n\ndef get_vrt_transform(src, bounds, bounds_crs='epsg:3857'):\n \"\"\"Calculate VRT transform.\n Attributes\n ----------\n src : rasterio.io.DatasetReader\n Rasterio io.DatasetReader object\n bounds : list\n Bounds (left, bottom, right, top)\n bounds_crs : str\n Coordinate reference system string (default \"epsg:3857\")\n Returns\n -------\n vrt_transform: Affine\n Output affine transformation matrix\n vrt_width, vrt_height: int\n Output dimensions\n \"\"\"\n dst_transform, _, _ = calculate_default_transform(src.crs,\n bounds_crs,\n src.width,\n src.height,\n *src.bounds)\n w, s, e, n = bounds\n vrt_width = math.ceil((e - w) / dst_transform.a)\n vrt_height = math.ceil((s - n) / dst_transform.e)\n\n vrt_transform = transform.from_bounds(w, s, e, n, vrt_width, vrt_height)\n\n return vrt_transform, vrt_width, vrt_height\n\n\ndef get_utm_srid(lat, lon):\n \"\"\"\n Calculate which utm zone the AOI should fall into\n :param lat: Latitude in WGS84\n :param lon: Longitude in WGS84\n :return: Integer EPSG code\n \"\"\"\n return int(32700 - round((45 + lat) / 90, 0) * 100 + round((183 + lon) / 6, 0))\n\n\ndef make_gif(keys, data, toa):\n \"\"\"\n Combine the data into a single array\n :param keys: Location of the tiles on AWS\n :param data: The image arrays\n :param toa: toa True/False\n :return: Data with dates embedded on the gif\n \"\"\"\n drawn = []\n for fn, i in zip(keys, data):\n if i is None:\n continue\n if len(np.where(i[:, :, 2] == 0)[0]) > i[:, :, 2].size * 0.8:\n continue\n if len(np.where(i[:, :, 2] > 2000)[0]) < i[:, :, 2].size * 0.2:\n i = np.hstack((np.zeros((i.shape[0], 100, 3)), i))\n im = Image.fromarray(np.clip((i * 255 / 2000), 0, 255).astype(np.uint8))\n draw = ImageDraw.Draw(im)\n if toa:\n draw.text((20, 50), '%s' % '-'.join(fn.split('/')[-5:-2]), fill=(255, 255, 255, 255))\n else:\n draw.text((20, 50), '%s' % '-'.join(fn.split('/')[-6:-3]), fill=(255, 255, 255, 255))\n drawn.append(np.array(im))\n return drawn\n\n\ndef upload_file_to_s3(body):\n \"\"\"\n Uploads a given file to s3\n :param body: filename\n :return: None\n \"\"\"\n s3_client = boto3.Session(settings.AWS_KEY, settings.AWS_SECRET).client('s3', region_name='eu-central-1')\n s3_client.upload_file(Filename='gifs/%s.gif' % body, Bucket='sat-giffer', Key='gifs/%s.gif' % body,\n ExtraArgs={'ACL': 'public-read'})\n\n\ndef get_s3_urls(first_tile, search_results, toa):\n \"\"\"\n Get a filtered list of S3 URIs given a tile id and search results\n :param first_tile: first tile to appear in the search\n :param search_results: full results of the search\n :param toa: whether to attempt to retrieve toa/boa data\n :return: list of s3 URIs\n \"\"\"\n if toa:\n keys = [i['properties']['s3URI'] for i in search_results if\n first_tile in i['properties']['s3URI']]\n else:\n keys = [i['properties']['s3URI'].replace('l1c', 'l2a') + 'R10m/' for i in search_results if\n first_tile in i['properties']['s3URI']]\n return keys\n\n\ndef get_data_for_keys(bounds, keys, out_crs, vrt_params):\n \"\"\"\n Get RGB data from AWS given a list of keys\n :param bounds: bounding box of AOI\n :param keys: List of S3 URIS\n :param out_crs: output crs\n :param vrt_params: params for transformation\n :return: the data array\n \"\"\"\n with futures.ProcessPoolExecutor(max_workers=MAX_WORKERS) as executor:\n _worker = partial(rgb_for_key, bounds=bounds, vrt_params=vrt_params, out_crs=out_crs)\n data = list(executor.map(_worker, keys))\n gc.collect()\n return data\n","repo_name":"JamesOConnor/sat_giffer","sub_path":"src/giffer.py","file_name":"giffer.py","file_ext":"py","file_size_in_byte":6793,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"77"} +{"seq_id":"36477757734","text":"from baconlang.baconlang_syntax_error import BACONLangSyntaxError\n\n\ndef parse(raw):\n symbols = [\n symbol.strip()\n for symbol in raw.replace(\"[\", \"[,\").replace(\"]\", \",]\").split(\",\")\n ]\n\n for idx, symbol in enumerate(symbols):\n if symbol is \"[\" or symbol is \"]\":\n continue\n\n if len(symbol) and symbol[0] is '\"' and symbol[-1] is '\"':\n symbols[idx] = symbol[1:-1]\n\n else:\n # Only valid strings are legal symbols\n raise BACONLangSyntaxError(symbol)\n\n return symbols\n","repo_name":"baconlang/python","sub_path":"baconlang/parse.py","file_name":"parse.py","file_ext":"py","file_size_in_byte":553,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"77"} +{"seq_id":"3945605319","text":"from django.conf.urls import url\nfrom . import views\n\napp_name=\"UniversityInfo\"\n\nurlpatterns = [\n\n url(r'^$', views.DepartmentList,name=\"dept_list\"),\n url(r'^about/$', views.About,name=\"about\"),\n url(r'^contact/$', views.Contact,name=\"contact\"),\n url(r'^department/(?P[\\w-]+)/$', views.DepartmentDetail,name=\"dept_detail\"),\n url(r'^department/(?P[\\w-]+)/(?P[\\w-]+)/$', views.StudentList,name=\"student_list\"),\n url(r'^department/(?P[\\w-]+)/(?P[\\w-]+)/(?P[\\w-]+)/$', views.StudentDetail,name=\"student_detail\"),\n]\n","repo_name":"raselcse07/University","sub_path":"src/UniversityInfo/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":604,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"28129676287","text":"import numpy as np\n\n\ndef getTV_unit_test(shufIdx, X, y, Xp, yp) :\n\tidx = np.argwhere(shufIdx == 100)\n\tidx = np.asscalar(idx)\n\tassert(np.array_equal(X[idx], Xp[100]))\n\tassert(np.array_equal(y[idx], yp[100]))\n\ndef getTrainValidSet(X, y, r) :\n # X should have size [dataNumximgSizeximgSizeximgChan]\n # y should have size [dataNumxnb_class]\n assert(X.shape[0] == y.shape[0])\n # shufIdx = np.random.permutation(X.shape[0])\n shufIdx = range(X.shape[0])\n np.random.shuffle(shufIdx)\n Xp = X\n yp = y\n X = X[shufIdx]\n y = y[shufIdx]\n # getTV_unit_test(shufIdx, X, y, Xp, yp)\n\n mid= int(np.floor(X.shape[0]*(1.-r)))\n # Return values: X_trian, X_valid, y_train, y_valid\n return (X[0:mid], X[mid:], y[0:mid], y[mid:])\n\ndef parsePara(paraFile=\"./paras\") :\n\tpara = dict()\n\twith open(\"./paras\", \"r\") as pf :\n\t\tfor l in pf :\n\t\t\t# Remove white space\n\t\t\tl = l.replace(\" \", \"\")\n\t\t\t# Remove comments\n\t\t\tcont = l.split(\"#\")\n\n\t\t\tif cont[0] != \"\" :\n\t\t\t\tcontArr = cont[0].split(\"=\")\n\t\t\t\tnum = contArr[1]\n\t\t\t\tnum = float(num) if \".\" in num else int(num)\n\t\t\t\tpara[contArr[0]] = num\n\n\treturn para\n","repo_name":"andrewccchan/ML2016","sub_path":"hw3/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":1111,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"44479302615","text":"import sqlite3\n\n\ndef cursor_and_conn_to_db():\n tournament_table = \"pokemon_tournament.db\"\n conn = sqlite3.connect(tournament_table)\n cursor = conn.cursor()\n return cursor, conn\n\n\ndef close_conn(conn):\n conn.commit()\n conn.close()\n\n\ndef create_tables():\n cursor, conn = cursor_and_conn_to_db()\n\n # Create the Players table if it doesn't exist\n cursor.execute(\n \"\"\"\n CREATE TABLE IF NOT EXISTS Players (\n id INTEGER PRIMARY KEY,\n name TEXT NOT NULL,\n created TIMESTAMP DEFAULT CURRENT_TIMESTAMP,\n updated TIMESTAMP DEFAULT CURRENT_TIMESTAMP\n )\n \"\"\"\n )\n\n cursor.execute(\n \"\"\"\n CREATE TABLE IF NOT EXISTS Tournaments (\n id INTEGER PRIMARY KEY,\n created TIMESTAMP DEFAULT CURRENT_TIMESTAMP,\n updated TIMESTAMP DEFAULT CURRENT_TIMESTAMP\n )\n \"\"\"\n )\n\n cursor.execute(\n \"\"\"\n CREATE TABLE IF NOT EXISTS Rounds (\n round_id INTEGER PRIMARY KEY,\n tournament_id INTEGER,\n player1_id INTEGER,\n player2_id INTEGER,\n winner_id INTEGER,\n loser_id INTEGER,\n round_number INTEGER,\n round_date TIMESTAMP DEFAULT CURRENT_TIMESTAMP,\n FOREIGN KEY (tournament_id) REFERENCES Tournaments (id),\n FOREIGN KEY (player1_id) REFERENCES Players (id),\n FOREIGN KEY (player2_id) REFERENCES Players (id),\n FOREIGN KEY (winner_id) REFERENCES Players (id),\n FOREIGN KEY (loser_id) REFERENCES Players (id)\n )\n \"\"\"\n )\n\n close_conn(conn)\n\n\ndef get_player_name(player_id):\n cursor, conn = cursor_and_conn_to_db()\n player = cursor.execute(\n \"SELECT name FROM Players WHERE id = ?\", (player_id,)\n ).fetchone()\n player_name = player[0] if player else None\n\n close_conn(conn)\n return player_name\n\n\ndef add_players(players: list):\n cursor, conn = cursor_and_conn_to_db()\n\n for name in players:\n # Check if the player already exists\n cursor.execute(\"SELECT id FROM Players WHERE name = ?\", (name,))\n existing_player = cursor.fetchone()\n\n if not existing_player:\n cursor.execute(\"INSERT INTO Players (name) VALUES (?)\", (name,))\n\n close_conn(conn)\n\n\ndef init_db():\n create_tables()\n\n players = [\"Nathan\", \"Angelina\", \"Gma V\", \"Gwen\", \"Toby\", \"Louis\"]\n add_players(players)\n\n\ndef get_player_stats(player_id):\n cursor, conn = cursor_and_conn_to_db()\n cursor.execute(\n \"\"\"\n SELECT\n wins,\n losses,\n CASE\n WHEN wins = 0 AND losses = 0 THEN 0\n WHEN losses = 0 THEN 100\n ELSE CAST(wins AS FLOAT) / (wins + losses) * 100\n END AS win_loss_ratio\n FROM (\n SELECT\n IFNULL(COUNT(CASE WHEN winner_id = ? THEN 1 END), 0) AS wins,\n IFNULL(COUNT(CASE WHEN loser_id = ? THEN 1 END), 0) AS losses\n FROM Rounds\n WHERE (player1_id = ? OR player2_id = ?)\n AND (winner_id IS NOT NULL OR loser_id IS NOT NULL)\n AND tournament_id IS NOT NULL\n )\n \"\"\",\n (player_id, player_id, player_id, player_id),\n )\n\n wins, losses, win_loss_ratio = cursor.fetchone()\n close_conn(conn)\n return wins, losses, win_loss_ratio\n\n\ndef get_players():\n cursor, conn = cursor_and_conn_to_db()\n\n cursor.execute(\"SELECT id, name FROM Players\")\n players = cursor.fetchall()\n\n close_conn(conn)\n return players\n\n\ndef print_all_tournaments():\n cursor, conn = cursor_and_conn_to_db()\n\n cursor.execute(\"SELECT * FROM Tournaments\")\n tournaments = cursor.fetchall()\n\n print(\"List of all tournaments:\")\n for tournament_info in tournaments:\n print(tournament_info)\n\n close_conn(conn)\n\n\ndef create_tournament_and_return_id():\n cursor, conn = cursor_and_conn_to_db()\n\n cursor.execute(\"INSERT INTO Tournaments DEFAULT VALUES\")\n tournament_id = cursor.lastrowid\n\n close_conn(conn)\n\n return tournament_id\n\n\ndef get_tournament_rankings(tournament_id):\n cursor, conn = cursor_and_conn_to_db()\n cursor.execute(\n \"\"\"\n SELECT\n p.name,\n IFNULL(COUNT(CASE WHEN r.winner_id = p.id THEN 1 END), 0) AS wins,\n IFNULL(COUNT(CASE WHEN r.loser_id = p.id THEN 1 END), 0) AS losses,\n CASE\n WHEN IFNULL(COUNT(CASE WHEN r.loser_id = p.id THEN 1 END), 0) = 0 THEN 0\n ELSE IFNULL(COUNT(CASE WHEN r.winner_id = p.id THEN 1 END), 0) / IFNULL(COUNT(CASE WHEN r.loser_id = p.id THEN 1 END), 0)\n END AS win_loss_ratio\n FROM Players p\n LEFT JOIN Rounds r ON p.id = r.winner_id OR p.id = r.loser_id\n WHERE r.tournament_id = ?\n GROUP BY p.id, p.name\n ORDER BY wins DESC, losses ASC\n \"\"\",\n (tournament_id,),\n )\n\n rankings = []\n for row in cursor.fetchall():\n player_name, wins, losses, win_loss_ratio = row\n rankings.append((player_name, wins, losses, win_loss_ratio))\n\n close_conn(conn)\n return rankings\n\n\ndef get_rounds_without_winner():\n cursor, conn = cursor_and_conn_to_db()\n\n cursor.execute(\n \"\"\"\n SELECT round_id, round_number, player1_id, player2_id\n FROM Rounds\n WHERE winner_id IS NULL\n \"\"\"\n )\n\n rounds = cursor.fetchall()\n\n close_conn(conn)\n return rounds\n\n\ndef create_round(tournament_id, round_number, player1_id, player2_id):\n cursor, conn = cursor_and_conn_to_db()\n cursor.execute(\n \"\"\"\n INSERT INTO Rounds (tournament_id, round_number, player1_id, player2_id)\n VALUES (?, ?, ?, ?)\n \"\"\",\n (tournament_id, round_number, player1_id, player2_id),\n )\n\n close_conn(conn)\n\n\ndef update_winner(round_id, winner_id, loser_id):\n cursor, conn = cursor_and_conn_to_db()\n\n cursor.execute(\n \"UPDATE Rounds SET winner_id = ?, loser_id = ? WHERE round_id = ?\",\n (winner_id, loser_id, round_id),\n )\n\n close_conn(conn)\n\n\ndef print_all_rounds():\n cursor, conn = cursor_and_conn_to_db()\n\n cursor.execute(\"SELECT * FROM Rounds\")\n rounds = cursor.fetchall()\n\n print(\"List of all rounds:\")\n for round_info in rounds:\n print(round_info)\n\n close_conn(conn)\n\n\ndef debug():\n print(\"Rounds\")\n print_all_rounds()\n print(\"Tournaments\")\n print_all_tournaments()\n","repo_name":"GearsandKeys/Tournament_CLI","sub_path":"data_access.py","file_name":"data_access.py","file_ext":"py","file_size_in_byte":6448,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"44250012387","text":"import subprocess\nimport openai\nfrom dotenv import load_dotenv\nimport ast\nimport sys\nimport re\nimport os\n\n# Load the OpenAI API key from the .env file\nload_dotenv()\nopenai.api_key = os.getenv(\"OPENAI_API_KEY\")\n\n# Function to generate code using GPT-3.5-turbo\ndef generate_code(prompt):\n # Create a chat completion with the OpenAI API\n response = openai.ChatCompletion.create(\n model=\"gpt-3.5-turbo\",\n messages=[\n {\"role\": \"system\", \"content\": \"You are a professional full stack developer.\"},\n {\"role\": \"user\", \"content\": prompt},\n ]\n )\n # Return the content of the response\n return response['choices'][0]['message']['content']\n\n# Function to validate generated code\ndef validate_code(code):\n try:\n # Parse the code using ast.parse to validate its syntax\n ast.parse(code)\n return True\n except SyntaxError as e:\n print(f\"Syntax error in generated code: {e}\")\n if not fix_code(code, e):\n return False\n return validate_code(code)\n\n\"\"\"\nGiven a piece of code and an error message, this function extracts the line number from the error message,\nconstructs a prompt for ChatGPT with the code and the error message, sends the prompt to ChatGPT, and extracts\nthe fixed code from the response. It then checks if the fixed code is valid by compiling it, and returns the\nfixed code if it is valid, or False otherwise.\n:param code: A string representing the code to be fixed.\n:param error: An error message indicating the error in the code.\n:return: Either a string representing the fixed code, or False.\n\"\"\"\ndef fix_code(code, error):\n # Extract line number from error message\n line_num = int(re.search(r\"line (\\d+)\", str(error)).group(1))\n\n # Construct prompt for ChatGPT\n prompt = f\"Fix the error in line {line_num}: {error}\\nCode:\\n{code}\"\n\n # Send prompt to ChatGPT\n response = openai.Completion.create(\n engine=\"davinci-codex\",\n prompt=prompt,\n max_tokens=1024,\n n=1,\n stop=None,\n temperature=0.8,\n )\n\n # Extract fixed code from response\n fixed_code = response.choices[0].text.strip()\n\n # Check if the fixed code is valid\n try:\n compile(fixed_code, \"\", \"exec\")\n except SyntaxError:\n return False\n\n # Return fixed code\n return fixed_code\n\n\n\n# Function to execute the generated code\ndef execute_code(file_name):\n try:\n # Execute the code with subprocess.check_output\n output = subprocess.check_output([sys.executable, file_name], stderr=subprocess.STDOUT)\n return output\n except subprocess.CalledProcessError as e:\n error_message = e.output.decode()\n print(f\"An error occurred while executing the code: {error_message}\")\n sys.exit(1)\n\n# Function to create a Python file and write the generated code to it\ndef create_and_run_file(code):\n file_name = input(\"Enter the name of the Python file you'd like to create (e.g., script.py): \")\n with open(file_name, 'w') as f:\n f.write(code)\n print(f\"Created file '{file_name}'\")\n return file_name\n\n# Main function\ndef main():\n # Print the contents of the current directory\n print(\"Current directory contents:\")\n for file_name in os.listdir():\n print(f\" {file_name}\")\n \n # Ask the user for their initial prompt\n initial_prompt = input(\"Please enter your initial request for the script to create and run: \")\n \n # Generate the code\n code = generate_code(initial_prompt)\n\n # Print the generated code\n print(\"Generated Code:\")\n print(\"----------------\")\n print(code)\n print(\"----------------\")\n\n # Validate the generated code\n if not validate_code(code):\n print(\"Code validation failed.\")\n sys.exit(1)\n\n # Ask the user for approval to execute the code\n approval = input(\"Do you approve execution of this code? (yes/no): \")\n\n if approval.lower() == \"yes\":\n # Create a file with the generated code\n file_name = create_and_run_file(code)\n # Execute the generated code\n result = execute_code(file_name)\n # Print the output of the code execution\n print('Output:', result.decode())\n else:\n print(\"Execution cancelled.\")\n\n# Run the main function\nif __name__ == \"__main__\":\n main()\n","repo_name":"TheSnowGuru/InterpreterGPT","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4323,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"77"} +{"seq_id":"37795083122","text":"#Exercício Python 082: Crie um programa que vai ler vários números e colocar em uma lista. Depois disso, crie duas listas extras que vão conter apenas os valores pares e os valores ímpares digitados, respectivamente. Ao final, mostre o conteúdo das três listas geradas.\n\n\noriginal_list = []\nwhile True:\n try:\n original_list.append(int(input('Enter a valid number: ')))\n except:\n print('Invalid character typed. Please, try again!')\n continue\n option = ' '\n while option not in 'YyNn':\n option = str(input('Would you like to continue? [Y/N]: '))\n if option in 'Nn':\n break\neven_values = []\nodd_values = []\nfor i, v in enumerate(original_list):\n if v % 2 == 0:\n even_values.append(v)\n elif v % 2 == 1:\n odd_values.append(v)\nprint(original_list)\nprint(even_values)\nprint(odd_values)","repo_name":"Matheusfarmaceutico/Exercicios-Python","sub_path":"Exercícios do Guanabara sendo refeitos em 2022/Revisaoguanabara/ex82.py","file_name":"ex82.py","file_ext":"py","file_size_in_byte":856,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"42696432547","text":"#####\n# ogo_fe_pistoia_tabulate.py\n#\n# This script collects all the FE Pistoia results from the specified directory.\n#\n#####\n#\n# Andrew Michalski\n# University of Calgary\n# Biomedical Engineering Graduate Program\n# September 6, 2019\n# Modified to Py3: March 25, 2020\n#####\n\nscript_version = 1.0\n\n##\n# Import the required modules\nimport os\nimport sys\nimport glob\nimport argparse\nimport time\nimport pandas as pd\nfrom datetime import date\nfrom collections import OrderedDict\n\n\nfrom ogo.util.echo_arguments import echo_arguments\n\n##\ndef fePistoiaTab(args):\n\n parser = argparse.ArgumentParser(\n description=\"\"\"This script collects all the FE Pistoia results from the specified directory. INPUT: Data_Directory; OUTPUT: TXT file of results\"\"\")\n\n parser.add_argument(\"analysis_directory\",\n help = \"Path/To/Data/Directory\")\n\n parser.add_argument(\"--model\", type = int,\n default = 1,\n help = \"Set the bone value to analyze. 1 = RT_FEMUR_SF;\\n2 = RT_FEMUR_SLS;\\n3 = LT_FEMUR_SF;\\n4 = LT_FEMUR_SLS;\\n6 = L4_SPINE_VC.\\n(Default: %(default)s)\")\n\n ##\n # Collect the input arguments\n args = parser.parse_args()\n directory = args.analysis_directory\n model = args.model\n\n ##\n # Read the files in the Directory\n files = []\n\n if model == 1:\n model_filename ='*_RT_FEMUR_SF_PISTOIA.txt'\n model_filename2 ='_RT_FEMUR_SF_PISTOIA.txt'\n fileName = \"RT_FEMUR_SF_PISTOIA_Results.txt\"\n model_type = 'RT FEMUR SF Failure Load (N)'\n elif model == 2:\n model_filename = '*_RT_FEMUR_SLS_PISTOIA.txt'\n model_filename2 = '_RT_FEMUR_SLS_PISTOIA.txt'\n fileName = \"RT_FEMUR_SLS_PISTOIA_Results.txt\"\n model_type = 'RT FEMUR SLS Failure Load (N)'\n elif model == 3:\n model_filename ='*_LT_FEMUR_SF_PISTOIA.txt'\n model_filename2 ='_LT_FEMUR_SF_PISTOIA.txt'\n fileName = \"LT_FEMUR_SF_PISTOIA_Results.txt\"\n model_type = 'LT FEMUR SF Failure Load (N)'\n elif model == 4:\n model_filename = '*_LT_FEMUR_SLS_PISTOIA.txt'\n model_filename2 = '_LT_FEMUR_SLS_PISTOIA.txt'\n fileName = \"LT_FEMUR_SLS_PISTOIA_Results.txt\"\n model_type = 'LT FEMUR SLS Failure Load (N)'\n elif model == 6:\n model_filename = '*_L4_FE_PISTOIA.txt'\n model_filename2 = '_L4_FE_PISTOIA.txt'\n fileName = \"L4_SPINE_FE_PISTOIA_Results.txt\"\n model_type = 'L4 SPINE Failure Load (N)'\n else:\n print(\"Model value set is not defined. Ending script.\")\n sys.exit()\n\n ##\n # Create dataframe for output\n df = pd.DataFrame(columns = ['ID', model_type])\n\n ##\n # Read and extract the data\n os.chdir(directory)\n files = sorted(glob.glob(model_filename))\n k = 0\n for i in files:\n ID = i.replace(model_filename2, \"\")\n\n # Check to see if the file is empty and add to talbe if needed\n if os.stat(i).st_size == 0:\n x_fl = \"\"\n y_fl = \"\"\n z_fl = \"\"\n if model == 1:\n df.loc[k] = [ID, y_fl]\n if model == 2:\n df.loc[k] = [ID, z_fl]\n if model == 3:\n df.loc[k] = [ID, y_fl]\n if model == 4:\n df.loc[k] = [ID, z_fl]\n if model == 6:\n df.loc[k] = [ID, z_fl]\n k = k + 1\n continue\n\n # open each txt file and extract the failure loads\n lines = [line.rstrip('\\n') for line in open(i)]\n lines = lines[11].replace(\" \", \"\")\n lines = lines.replace(\"Failureload(RF*factor):\", \"\")\n index_1 = lines.find(\"E\")\n index_2 = lines.find(\"E\", index_1 + 1)\n index_3 = lines.find(\"E\", index_2 + 1)\n indices = [index_1, index_2, index_3]\n x_fl = lines[0:index_1+4]\n y_fl = lines[index_1+4:index_2+4]\n z_fl = lines[index_2+4:]\n\n\n if model == 1:\n df.loc[k] = [ID, y_fl]\n if model == 2:\n df.loc[k] = [ID, z_fl]\n if model == 3:\n df.loc[k] = [ID, y_fl]\n if model == 4:\n df.loc[k] = [ID, z_fl]\n if model == 6:\n df.loc[k] = [ID, z_fl]\n k = k + 1\n\n ##\n # Write Output TXT file of dataframe\n df.to_csv(fileName, sep = '\\t', index = False, header = True)\n\n print(\"Script Complete.\")\n\n\ndef main():\n description = '''\n This script collects all the FE Pistoia results from the specified directory. \n \n INPUT: Data_Directory; \n OUTPUT: TXT file of results\n \n '''\n\n\n # Setup argument parsing\n parser = argparse.ArgumentParser(\n formatter_class=argparse.RawTextHelpFormatter,\n prog=\"ogoFePistoiaTabulate\",\n description=description\n )\n\n parser.add_argument(\"analysis_directory\",\n help = \"Path/To/Data/Directory\")\n\n parser.add_argument(\"--model\", type = int,\n default = 1,\n help = \"Set the bone value to analyze. 1 = RT_FEMUR_SF;\\n2 = RT_FEMUR_SLS;\\n3 = LT_FEMUR_SF;\\n4 = LT_FEMUR_SLS;\\n6 = L4_SPINE_VC.\\n(Default: %(default)s)\")\n\n\n # Parse and display\n args = parser.parse_args()\n \n print(echo_arguments('fe_Pistoia_Tabulate', vars(args)))\n\n # Run program\n fePistoiaTab(args)\n\nif __name__ == '__main__':\n main()","repo_name":"Bonelab/Ogo","sub_path":"ogo/cli/ref/FePistoiaTabulate.py","file_name":"FePistoiaTabulate.py","file_ext":"py","file_size_in_byte":5210,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"36779879770","text":"import math\nfrom Algorithms.valManip import *\n\nclass Search(object):\n \"\"\"binary search of an array\"\"\"\n\n def bSearchAnimeList(list, animeName=None, anime_id=None):\n\n if(animeName is not None):\n \n target = valManip.makeCompareable(animeName)\n \n\n return Search.linearSearch(list, animeName=target)\n\n elif(anime_id is not None):\n\n start = 0\n middle = math.floor(len(list['entries'])/2)\n end = len(list['entries']) - 1\n counter = 0\n\n target = anime_id\n\n while(True):\n middle = (start + end) // 2\n midpoint = list['entries'][middle]['media']['id']\n\n if(midpoint < target):\n start = middle + 1\n elif(midpoint > target):\n end = middle - 1\n else:\n return middle\n counter += 1\n\n if(counter >= len(list['entries'])): #performs a linear search if binary search is not working\n return Search.linearSearch(list, anime_id=target)\n\n pass\n\n def linearSearch(list, animeName=None, anime_id=None):\n \n listLen = len(list['entries'])\n\n if(animeName is not None):\n\n for x in range(0, listLen):\n listVal = list['entries'][x]['media']['title']['userPreferred']\n\n if(valManip.makeCompareable(listVal) == valManip.makeCompareable(animeName)): #returns index if value is found\n #print(\"OMG THE BINARY SEARCH BROKE!!!\")\n return list['entries'][x]\n\n elif(anime_id is not None):\n \n for x in range(0, listLen):\n listVal = list['entries'][x]['media']['id']\n\n if(listVal == anime_id): #returns index if value is found\n #print(\"OMG THE BINARY SEARCH BROKE!!!\")\n return list['entries'][x]\n \n return None\n","repo_name":"backedman/animeScores","sub_path":"Algorithms/Search.py","file_name":"Search.py","file_ext":"py","file_size_in_byte":1976,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"22549548431","text":"# -*- coding: utf-8 -*-\n# @Time : 2021/6/15 9:20\n# @Author : Saiterlz from lanzhou\n# @Email : kinekok@163.com\n# @File : create_nameTBC.py\n# @Software: PyCharm\ndef replaceFomat(text: str, word: str, n: int, reverse=False):\n \"\"\"\n 对文本中的指定单词进行格式化的替换/替回\n :param text: 要替换的文本\n :param word: 目标单词\n :param n: 目标单词的序号\n :param reverse: 是否进行替回\n :return: 替换后的文本\n \"\"\"\n # 构造【中间变量】\n new_text = text[:]\n fmt = \"<{}>\".format(n)\n # 替换\n if reverse is False:\n new_text = new_text.replace(word, fmt) # 格式化替换\n return new_text\n # 替回\n elif reverse is True:\n new_text = new_text.replace(fmt, word) # 去格式化替换\n return new_text\n # 要求非法,引发异常\n else:\n raise TypeError\n\n\ndef replaceMulti(text: str, olds: list, news: list):\n \"\"\"\n 一次替换多组字符串\n :param text: 要替换的文本\n :param olds: 旧字符串列表\n :param news: 新字符串列表\n :return: 替换后的文本\n \"\"\"\n if len(olds) != len(news):\n raise IndexError\n else:\n new_text = text[:]\n # 格式化替换\n i = 0 # 单词计数器\n for word in olds:\n i += 1\n new_text = replaceFomat(new_text, word, i)\n # 去格式化替回\n i = 0 # 归零\n for word in news:\n i += 1\n new_text = replaceFomat(new_text, word, i, True)\n # 返回替换好的文本\n return new_text\n\n\ndef test2(strtext):\n temp = strtext.strip()\n olds = ['\"', \",\", \"[\", \"]\"]\n news = [\"\", \"\", \"\", \"\"]\n result = replaceMulti(temp, olds, news)\n strfull = result.split('=')\n print(strfull)\n strFormat = strfull[1].strip() + ':' + strfull[0].strip() +'\\n'\n return strFormat\n\n\nadd_txt = []\nwith open('nameTBCold.txt', mode='r', encoding='utf-8') as f:\n for i in f.readlines():\n print(i)\n with open('nameTBC2.txt', mode='a+', encoding='utf-8') as h:\n h.write(test2(i))\n","repo_name":"saiterlz/TSM_Export_Excel-master","sub_path":"create_nameTBC.py","file_name":"create_nameTBC.py","file_ext":"py","file_size_in_byte":2120,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"30999973543","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\n\nimport cv2\nimport numpy as np\n\n\n# In[2]:\n\n\nimg1 = np.zeros((300, 300), dtype=\"uint8\")\ncv2.rectangle(img1, (100, 100), (250, 250), 255, -1)\ncv2.imshow(\"pic.jpg\", img1)\n\n\n# In[3]:\n\n\nimg2 = np.zeros((300, 300), dtype=\"uint8\")\ncv2.circle(img2, (150, 150), 90, 255, -1)\ncv2.imshow(\"car.webp\", img2)\n \n\n\n# In[4]:\n\n\nrect_and_circle = cv2.bitwise_and(img1,img2)\ncv2.imshow(\"AND operation\",rect_and_circle)\n \n\n\n# In[5]:\n\n\nrect_or_circle = cv2.bitwise_or(img1,img2)\ncv2.imshow(\"OR operation\",rect_or_circle)\n\n\n# In[6]:\n\n\nrect_xor_circle = cv2.bitwise_xor(img1,img2)\ncv2.imshow(\"XOR Operation\",rect_xor_circle)\n\n\n# In[7]:\n\n\nrect_xor_circle2 = cv2.bitwise_xor(img1,img2)\ncv2.imshow(\"XOR Operation\",rect_xor_circle2)\n\n\n# In[8]:\n\n\nrect_xor_circle2 = cv2.bitwise_xor(img1,img2)\ncv2.imshow(\"NOT Operation\",rect_xor_circle)\n\n\n# In[9]:\n\n\ncv2.waitKey(0)\ncv2.destroyAllWindows()\n\n","repo_name":"harishnk443/image-processing","sub_path":"BITWISE OPERATION.py","file_name":"BITWISE OPERATION.py","file_ext":"py","file_size_in_byte":910,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"42546163700","text":"\n# imports Adventure() class from respective file\nfrom miniA5Adventurer import Adventurer\n\n# main execution outlines the program route by calling default values.\n# A name is given and health is increased and the new values are displayed.\n\n\ndef start():\n\n myAdventurer = Adventurer()\n\n print(\"The default values are:\", myAdventurer.name, myAdventurer.health)\n\n myAdventurer.name = \"Balin\"\n\n myAdventurer.gainLevel()\n\n print(\"New values are:\", myAdventurer.name, myAdventurer.health)\n\n\nstart()\n","repo_name":"ZilRahman/PythonCourse","sub_path":"A5/miniA5Contents/miniA5Start.py","file_name":"miniA5Start.py","file_ext":"py","file_size_in_byte":507,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"71836374649","text":"from matplotlib import transforms\nimport numpy as np\nimport torch \nfrom torchvision import transforms, datasets\nfrom torch.utils.data import random_split\nimport matplotlib.pyplot as plt\nfrom torchvision.datasets import ImageFolder\nimport torch.nn as nn\nimport torch.nn.functional as F \n\n\nclass Shift_Net(nn.Module):\n def __init__(self, pars):\n super(Shift_Net, self).__init__()\n ks=(5,5)\n ps=np.int32(5)\n self.mid_layer=256\n # Two successive convolutional layers.\n # Two pooling layers that come after convolutional layers.\n # Two dropout layers.\n self.conv1 = nn.Conv2d(3, 32, kernel_size=ks[1],padding='same')\n self.pool1= nn.MaxPool2d(kernel_size=10,stride=2)\n self.conv2 = nn.Conv2d(32, 64, kernel_size=ks[1],padding='same')\n self.drop2 = nn.Dropout2d(p=0.2)\n self.pool2=nn.MaxPool2d(kernel_size=5,stride=2)\n self.conv3 = nn.Conv2d(64, 64, kernel_size=ks[1],padding='same')\n self.pool3=nn.MaxPool2d(kernel_size=2,stride=3)\n self.drop_final=nn.Dropout(p=0.2)\n self.total_pars = 0\n\n self.first=True\n if self.first:\n self.forward(torch.zeros((1,)+pars.inp_dim))\n \n self.optimizer = torch.optim.SGD(self.parameters(), lr = 0.001)\n self.criterion=nn.CrossEntropyLoss()\n \n def forward(self, x):\n x = self.conv1(x)\n \n # Apply relu to a pooled conv1 layer.\n x = F.relu(self.pool1(x))\n # Apply relu to a pooled conv2 layer with a drop layer inbetween.\n x = self.drop2(F.relu(self.pool2(self.conv2(x))))\n x = F.relu(self.pool3(self.conv3(x)))\n if self.first:\n self.first=False\n self.inp=x.shape[1]*x.shape[2]*x.shape[3]\n # Compute dimension of output of x and setup a fully connected layer with that input dim \n # pars.mid_layer output dim. Then setup final 3 node output layer.\n print('input dimension to fc1',self.inp)\n if self.mid_layer is not None:\n self.fc1 = nn.Linear(self.inp, self.mid_layer)\n self.fc_final = nn.Linear(self.mid_layer, 3)\n else:\n self.fc1=nn.Identity()\n self.fc_final = nn.Linear(self.inp, 3)\n x = x.reshape(-1, self.inp)\n x = self.fc1(x)\n x = self.fc_final(x)\n return x\n \n # Run the network on the data, compute the loss, compute the predictions and compute classification rate/\n def get_acc_and_loss(self, data, targ):\n output = self.forward(data)\n loss = self.criterion(output, targ)\n pred = torch.max(output,1)[1]\n #print(f'Prediction is {pred}')\n correct = torch.eq(pred,targ).sum()\n \n return loss,correct\n \n # Compute classification and loss and then do a gradient step on the loss.\n def run_grad(self,data,targ):\n self.optimizer.zero_grad()\n loss, correct=self.get_acc_and_loss(data,targ)\n loss.backward()\n self.optimizer.step()\n \n return loss, correct\n\n\n# An object containing the relevant parameters for running the experiment.\nclass par(object):\n def __init__(self):\n self.batch_size=1000\n self.step_size=.001\n self.num_epochs=20\n self.numtrain=55000\n self.minimizer=\"Adam\"\n self.data_set=\"mnist\"\n self.model_name=\"model\"\n self.dropout=0.\n self.dim=32\n self.pool_size=2\n self.kernel_size=5\n self.mid_layer=256\n self.use_gpu=False\n ","repo_name":"jsjung00/rps","sub_path":"model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":3563,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"21297996340","text":"\"\"\"\nMR-FSK Modulator\n\"\"\"\n\nimport numpy as np\nfrom enum import Enum\nfrom ..tools.bits import from_bitstring, to_binary_array, check_binary_array\nfrom ..tools import operations\n\nfrom colorama import Fore\n\n\nclass Modulation(Enum):\n FSK2 = 1\n FSK4 = 2\n\n\n# SFD Values as a function of modulation, phyMRFSKSFD and coded/uncoded\n# Key is (modulation, phyMRFSKSFD, is_coded)\n# See tables 131 and 132 of 802.15.4g-2012\nSFD = {\n # Table 131\n (Modulation.FSK2, 0, True): from_bitstring('0110 1111 0100 1110'),\n (Modulation.FSK2, 0, False): from_bitstring('1001 0000 0100 1110'),\n (Modulation.FSK2, 1, True): from_bitstring('0110 0011 0010 1101'),\n (Modulation.FSK2, 1, False): from_bitstring('0111 1010 0000 1110'),\n # Table 132\n (Modulation.FSK4, 0, True): from_bitstring('0111 1101 1111 1111 0111 0101 1111 1101'),\n (Modulation.FSK4, 0, False): from_bitstring('1101 0111 0101 0101 0111 0101 1111 1101'),\n (Modulation.FSK4, 1, True): from_bitstring('0111 1101 0101 1111 0101 1101 1111 0111'),\n (Modulation.FSK4, 1, False): from_bitstring('0111 1111 1101 1101 0101 0101 1111 1101'),\n}\n\n# preamble field as a function of modulation\nPREAMBLE_SEQUENCE = {\n Modulation.FSK2: np.tile(np.array([0, 1]), 4),\n Modulation.FSK4: np.tile(np.array([0, 1, 1, 1]), 4)\n}\n\nPHR_LENGTH = 2\n\n# Tail bits based on the memory state of the RSC encoder\nRSC_TAIL_BITS = {\n 0b000: np.array([0, 0, 0]),\n 0b001: np.array([1, 0, 0]),\n 0b010: np.array([1, 1, 0]),\n 0b011: np.array([0, 1, 0]),\n 0b100: np.array([1, 1, 1]),\n 0b101: np.array([0, 1, 1]),\n 0b110: np.array([0, 0, 1]),\n 0b111: np.array([1, 0, 1]),\n}\n# Tail bits for the NRNSC encoder\nNRNSC_TAIL_BITS = np.array([0, 0, 0])\n\n\nclass Mr_fsk_modulator:\n def __init__(self,\n symbolRate : int,\n FSKModulationIndex : int,\n phyMRFSKSFD : int,\n modulation : str,\n phyFSKFECEnabled : bool,\n phyFSKFECScheme : int,\n macFCSType : int,\n phyFSKScramblePSDU : bool,\n phyFSKFECInterleavingRSC : bool,\n phyFSKPreambleLength : int = 4,\n verbose = False):\n \"\"\"\n Creates an instance of a MR-FSK modulator\n\n Parameters\n ----------\n symbolRate : int\n Number of symbols per second, if a float is supplied it will be converted to int\n FSKModulationIndex : float\n FSK Modulation index\n phyMRFSKSFD : int\n Selection of the SFD group (See table 131 of 802.15.4g)\n phyFSKPreambleLength : int\n Length of the preamble\n modulation : str\n Modulation type : \"2FSK\" or \"4FSK\"\n phyFSKFECEnabled : bool\n Enable FEC encoding (True) or not (False)\n phyFSKFECScheme : int\n Configures the FEC mode. 0 for NRNSC and 1 for RSC\n macFCSType : int\n Lengths of the FCS 0 -> 4, 1 -> 2\n FCS Type describing the length of transmitted FCS.\n phyFSKScramblePSDU : bool\n Enable (True) or disable (False) the whitening of the PSDU\n phyFSKFECInterleavingRSC : bool\n Enable (True) interleaving for RSC or disable (False)\n\n \"\"\"\n # Checks\n if isinstance(symbolRate, float):\n symbolRate = int(symbolRate)\n elif not isinstance(symbolRate, int):\n raise TypeError(\"symbolRate must be an integer\")\n if symbolRate <= 0:\n raise ValueError(\"symbolRate must be a positive integer value\")\n\n if not (isinstance(FSKModulationIndex, float) or isinstance(FSKModulationIndex, int)):\n raise TypeError(\"FSKModulationIndex must be a number\")\n if not (0.25 <= FSKModulationIndex <= 2.5):\n raise ValueError(f\"FSKModulationIndex ({FSKModulationIndex}) must be between 0.25 and 2.5\")\n\n if isinstance(phyMRFSKSFD, int):\n if phyMRFSKSFD not in [0, 1]:\n raise ValueError(\"phyMRFSKSFD should be 0 or 1\")\n else:\n raise TypeError(\"Invalid phyMRFSKSFD type. It should be int\")\n\n if isinstance(phyFSKPreambleLength, int):\n if not (4 <= phyFSKPreambleLength <= 1000):\n raise ValueError(\n \"phyFSKPreambleLength value is invalid. The range is 4-1000 (See Table 71)\")\n else:\n raise TypeError(\"phyFSKPreambleLength must be an integer\")\n\n if isinstance(modulation, str):\n if modulation not in [\"2FSK\", \"4FSK\"]:\n raise ValueError(\n \"Invalid modulation type. It should be \\\"2FSK\\\" or \\\"4FSK\\\"\")\n else:\n raise TypeError(\"Invalid modulation type. It should be str\")\n\n if not isinstance(phyFSKFECEnabled, bool):\n raise TypeError(\"phyFSKFECEnabled should be of type bool\")\n\n if not isinstance(phyFSKFECScheme, int):\n raise TypeError(\"phyFSKFECScheme should be of type int\")\n elif phyFSKFECScheme not in [0, 1]:\n raise ValueError(\"phyFSKFECScheme should be 0 or 1\")\n\n if not isinstance(macFCSType, int):\n raise TypeError(\"FCS_length should be of type int\")\n elif macFCSType not in [0, 1]:\n raise ValueError(\"FCS_length should be 0 or 1\")\n\n if isinstance(phyFSKScramblePSDU, int):\n phyFSKScramblePSDU = bool(phyFSKScramblePSDU)\n elif not isinstance(phyFSKScramblePSDU, bool):\n raise TypeError(\"phyFSKScramblePSDU should be of type bool\")\n\n if not isinstance(phyFSKFECInterleavingRSC, bool):\n raise TypeError(\"phyFSKFECInterleavingRSC should be of type bool\")\n\n self._symbol_rate = symbolRate\n self._FSKModulationIndex = FSKModulationIndex\n self._macFCSType = macFCSType\n self._phyFSKFECEnabled = phyFSKFECEnabled\n self._phyMRFSKSFD = phyMRFSKSFD\n self._phyFSKPreambleLength = phyFSKPreambleLength\n self._modulation = Modulation.FSK2 if modulation == \"2FSK\" else Modulation.FSK4\n self._phyFSKScramblePSDU = phyFSKScramblePSDU\n self._phyFSKFECInterleavingRSC = phyFSKFECInterleavingRSC\n self._phyFSKFECScheme = phyFSKFECScheme\n self._verbose = verbose\n\n def _bin(self, number, width=8, MSB_first=True):\n \"\"\"\n Converts a number to binary representation with LSB first\n\n Parameters\n ----------\n number : int\n\n Returns\n -------\n output : ndarray\n Array of bits\n \"\"\"\n\n return np.array([int(x) for x in np.binary_repr(number, width)[::(1 if MSB_first else -1)]])\n\n def _SHR(self):\n \"\"\"\n Returns SHR bitstream\n\n Returns\n -------\n signal : ndarray\n SHR bitstream\n \"\"\"\n\n signal = np.concatenate([\n np.tile(PREAMBLE_SEQUENCE[self._modulation],\n self._phyFSKPreambleLength),\n SFD[(self._modulation, self._phyMRFSKSFD, self._phyFSKFECEnabled)]\n ]).astype(int)\n return signal\n\n def _PHR_mode_switch(self, modeSwitchParameterEntry, new_mode_fec, PAGE, MOD, MD):\n \"\"\"\n Returns a PHR bitstream for mode_switch\n\n modeSwitchParameterEntry : int\n Mode switch operation (0-3)\n new_mode_fec : bool\n Signal that the packet following is encoded using FEC\n\n \"\"\"\n\n signal = np.zeros([PHR_LENGTH], dtype=int)\n # See figure 115\n signal[0] = 1 # MS\n signal[1:2+1] = self._bin(modeSwitchParameterEntry, 2)\n signal[3] = new_mode_fec # FEC\n signal[4] = PAGE # New Mode\n signal[5] = self._bin(MOD, 2) # New Mode\n signal[7:10+1] = self._bin(MD, 4) # New Mode\n PC = np.logical_xor.reduce(signal[:10+1])\n # BCH(15,11) code\n g = np.poly1d([1, 0, 0, 1, 1])\n\n print(Fore.RED + f\"Warning : BCH checksum isn't implemented (replaced with zeros)\" + Fore.RESET)\n B = np.array([0, 0, 0, 0])\n signal[11:14+1] = self._bin(B, 4)\n signal[15] = PC\n\n return signal\n\n def _PHR(self, message_length):\n \"\"\"\n Returns PHR bitstream\n\n Parameters\n ----------\n message_length : int\n Length of the PSDU (prior to FEC encoding) in octets\n\n\n Returns\n -------\n signal : ndarray\n SHR bitstream\n \"\"\"\n\n signal = np.zeros([PHR_LENGTH * 8], dtype=int)\n # See Figure 114\n signal[0] = 0 # MS\n signal[1:2+1] = 0 # Reserved\n signal[3] = self._macFCSType # FCS type\n signal[4] = 1 if self._phyFSKScramblePSDU else 0 # DW\n signal[5:] = self._bin(message_length, 11) # L\n\n return signal\n\n def _FEC(self, data, tail = True, pad = True):\n \"\"\"\n Apply FEC encoding to the data (PHR + PSDU) and appends tail and pad bits\n\n Parameters\n ----------\n data : ndarray\n Bitstream of the message to encode\n tail : bool\n Enable tail (True by default)\n pad : bool\n Enable pad (True by default)\n \"\"\"\n\n def M_iter_RSC(M, bi):\n # Extract bits\n M0, M1, M2 = (M >> 2) & 0b001, (M >> 1) & 0b001, M & 0b001\n # output values\n bi = int(bi)\n ui0 = bi\n ui1 = (bi ^ M0 ^ M1 ^ M2) ^ M1 ^ M2\n # Update M\n M0, M1, M2 = bi ^ M0 ^ M1 ^ M2, M0, M1\n\n return (M0 << 2) | (M1 << 1) | M2, ui0, ui1\n\n def M_iter_NRNSC(M, bi):\n # Extract bits\n M0, M1, M2 = (M >> 2) & 0b001, (M >> 1) & 0b001, M & 0b001\n bi = int(bi)\n ui0 = not (bi ^ M0 ^ M1 ^ M2)\n ui1 = not (bi ^ M1 ^ M2)\n\n M0, M1, M2 = bi ,M0, M1\n\n return (M0 << 2) | (M1 << 1) | M2, ui0, ui1\n\n\n # PAD_BITS are derived from figures 121 and 122. It looks like they could be set arbitrarily\n if (data.size//8) % 2 == 0:\n # is even\n # L_PAD = 13\n PAD_BITS = np.array([0, 1, 0, 1, 1, 0, 0, 0, 0, 1, 0, 1, 1])\n else:\n # L_PAD = 5\n PAD_BITS = np.array([0, 1, 0, 1, 1])\n\n M = 0b000\n\n encoded_PHR_PSDU = []\n for bi in data:\n # Split M into its bit values\n if self._phyFSKFECScheme:\n M, ui0, ui1 = M_iter_RSC(M, bi)\n else:\n M, ui0, ui1 = M_iter_NRNSC(M, bi)\n\n\n encoded_PHR_PSDU.append(ui1)\n encoded_PHR_PSDU.append(ui0)\n\n # Add tails bits and pad bits\n TAIL_BITS = RSC_TAIL_BITS[M] if self._phyFSKFECScheme else NRNSC_TAIL_BITS\n if tail:\n for bi in TAIL_BITS:\n if self._phyFSKFECScheme:\n M, ui0, ui1 = M_iter_RSC(M, bi)\n else:\n M, ui0, ui1 = M_iter_NRNSC(M, bi)\n\n encoded_PHR_PSDU.append(ui1)\n encoded_PHR_PSDU.append(ui0)\n if pad:\n for bi in PAD_BITS:\n if self._phyFSKFECScheme:\n M, ui0, ui1 = M_iter_RSC(M, bi)\n else:\n M, ui0, ui1 = M_iter_NRNSC(M, bi)\n\n encoded_PHR_PSDU.append(ui1)\n encoded_PHR_PSDU.append(ui0)\n\n return np.array(encoded_PHR_PSDU).astype(np.uint8)\n\n def _interleaver(self, data):\n \"\"\"\n Applies interleaver to the data\n\n Parameters\n ----------\n data : ndarray\n Input message (bitstream)\n\n Returns\n -------\n output : ndarray\n \"\"\"\n output = np.zeros_like(data)\n\n # NOTE : Each permutation is applied on a pair of bits\n\n BLOCK_SIZE = 16\n\n k = np.arange(BLOCK_SIZE, dtype=int)\n t = (15 - 4 * np.mod(k, 4) - np.floor(k / 4)).astype(int)\n\n for i, block in enumerate(data.reshape(-1, BLOCK_SIZE * 2)):\n new_data = np.zeros_like(block)\n for ki, ti in zip(k, t):\n new_data[ti*2:ti*2 + 2] = block[ki*2:ki*2 + 2]\n output[i*BLOCK_SIZE*2:(i+1)*BLOCK_SIZE*2] = new_data\n\n return output\n\n def _FSKModulator(self, message : np.ndarray, samplesPerSymbol : int):\n \"\"\"\n FSK modulation of the given message.\n\n # 2FSK modulation : \n the symbols are placed at +- df.\n 0 -> -fdev\n 1 -> +fdev\n \n # 4FSK modulation :\n the symbols are placed at +- df and +- df/3\n\n 01 -> -fdev\n 00 -> -fdev/3\n 10 -> +fdev/3\n 11 -> +fdev\n\n Parameters\n ----------\n message : np.ndarray\n Message bitstream\n samplesPerSymbol : int\n Number of IQ samples per symbol\n\n Returns\n -------\n output : ndarray\n Complex output signal\n f : float\n Sampling frequency\n \"\"\"\n # Frequency deviation (from the center)\n deltaF = self._symbol_rate * self._FSKModulationIndex / 2\n if self._modulation == Modulation.FSK2:\n fdev = deltaF\n else:\n fdev = 3 * deltaF\n\n mod = {\n # 2FSK\n '0' : -fdev,\n '1' : +fdev,\n # 4FSK\n '01' : -fdev,\n '00' : -fdev/3,\n '10' : +fdev/3,\n '11' : +fdev\n }\n\n step = 1 if self._modulation == Modulation.FSK2 else 2\n # Create a frequency deviation signal\n freqs = []\n for val in message.reshape(-1,step):\n key = ''.join([str(x) for x in val])\n freqs.append(mod[key])\n\n # Generate I and Q from the frequency deviation\n \n # Symbol period\n Ts = 1/self._symbol_rate\n\n f = np.repeat(freqs, samplesPerSymbol)\n dt = (Ts / samplesPerSymbol)\n t = np.arange(0, dt * f.size, dt)\n\n IQ = np.exp(2*np.pi*f*1j*t)\n\n return IQ, 1/dt\n\n\n\n def bitsToIQ(self, bits):\n \"\"\"\n Encodes the given binary message with MR-FSK modulator\n\n Parameters\n ----------\n bits : ndarray or list\n Message to encode (PSDU)\n\n Returns\n -------\n signal : ndarray\n output bitstream\n f : float\n signal frequency\n \"\"\"\n bits = check_binary_array(bits)\n\n self._PHR_PSDU = np.concatenate(\n [self._PHR(bits.size // 8), bits])\n\n # Symbol_length is the number of bits coded for a single symbol. If FEC is disabled, there's one bit per symbol. If FEC is enabled, there's two bits per symbol\n\n if self._phyFSKFECEnabled:\n symbol_length = 2\n self._PHR_PSDU_encoded = self._FEC(self._PHR_PSDU)\n\n if self._phyFSKFECInterleavingRSC:\n self._PHR_PSDU_interleaved = self._interleaver(\n self._PHR_PSDU_encoded)\n self._PHR_PSDU = self._PHR_PSDU_interleaved\n else:\n # Data is unchanged (encoding only)\n self._PHR_PSDU = self._PHR_PSDU_encoded\n else:\n # Do not change anything\n symbol_length = 1\n\n # Apply data whitening (or not)\n if self._phyFSKScramblePSDU:\n PSDU_start = PHR_LENGTH * symbol_length * 8\n\n self._PHR_PSDU_scrambled = self._PHR_PSDU.copy()\n\n self._PHR_PSDU_scrambled[PSDU_start:] = operations.scrambler(\n self._PHR_PSDU_scrambled[PSDU_start:])\n self._PHR_PSDU = self._PHR_PSDU_scrambled\n # Generate output signal\n \n self._binarySignal = np.concatenate([\n self._SHR(),\n self._PHR_PSDU\n ])\n\n # TODO : change this\n samplesPerSymbol = 20\n\n return self._FSKModulator(self._binarySignal, samplesPerSymbol)\n\n def bytesToIQ(self, bytes):\n \"\"\"\n Encodes the given message (list of bytes) with MR-FSK modulator\n\n Parameters\n ----------\n bytes : ndarray or list or bytes\n Message to encode (PSDU) as a list of bytes\n\n Returns\n -------\n signal : ndarray\n output bitstream\n f : float\n signal frequency\n \"\"\"\n message_bin = to_binary_array(bytes)\n return self.bitsToIQ(message_bin)\n\n def modeSwitchToIQ(self, modeSwitchParameterEntry, new_mode_fec):\n \"\"\"\n\n new_mode_fec : bool\n Signal that the packet following\n\n modeSwitchParameterEntry : int\n Mode switch operation (0-3)\n \"\"\"\n\n if isinstance(modeSwitchParameterEntry, int):\n if not (0 <= modeSwitchParameterEntry <= 3):\n raise ValueError(\n \"Invalid modeSwitchParameterEntry value. It should be between 0 and 3\")\n else:\n raise TypeError(\n \"Invalid modeSwitchParameterEntry type. It should be int\")\n\n def _print_verbose(self, message: str):\n \"\"\"\n Prints additionnal information if the verbose flag is True\n \"\"\"\n if(self._verbose):\n print(message)\n","repo_name":"SebastienDeriaz/sun_phy","sub_path":"sun_phy/mr_fsk/mr_fsk_modulator.py","file_name":"mr_fsk_modulator.py","file_ext":"py","file_size_in_byte":17002,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"77"} +{"seq_id":"1718887827","text":"import random\nimport pennylane as qml\nimport json\nimport sys\nfrom pennylane import numpy as np\nfrom time import localtime, strftime\nfrom plotting import plot_curves\nfrom data import load_mnist, mnist_apn_generator\nfrom data import load_mnist_ae\nfrom data import load_breast_cancer_lju, bc_apn_generator\nfrom data import load_moons_dataset, moons_apn_generator\nfrom evaluation import evaluate\n\n\nwith open('hyperparameters.json') as json_file:\n hp = json.load(json_file)\nprint(hp)\n\nif len(sys.argv) == 2:\n hp[\"output_qubits\"] = int(sys.argv[1])\n\nstarting_time = strftime(\"%Y-%m-%d_%H-%M-%S\", localtime())\n\nnp.random.seed(hp[\"seed\"])\nrandom.seed(hp[\"seed\"])\n\n\ndef circuit(params, data):\n qml.templates.embeddings.AngleEmbedding(\n features=data, wires=range(hp[\"data_qubits\"]), rotation=\"X\"\n )\n\n for layer in range(hp[\"layers\"]):\n for wire in range(hp[\"qubits\"]):\n qml.RX(params[layer][wire][0], wires=wire)\n qml.RY(params[layer][wire][1], wires=wire)\n for wire in range(0, hp[\"qubits\"] - 1, 2):\n qml.CZ(wires=[wire, wire + 1])\n for wire in range(1, hp[\"qubits\"] - 1, 2):\n qml.CZ(wires=[wire, wire + 1])\n return [qml.expval(qml.PauliZ(i)) for i in range(hp[\"output_qubits\"])]\n # return [qml.expval(qml.PauliZ(2*x) @ qml.PauliZ((2*x)+1))\n # for x in range(hp[\"output_qubits\"])]\n\n\ndef triplet_loss(params, qNode, anchor, positive, negative, alpha):\n a_value = qNode(params, anchor)\n p_value = qNode(params, positive)\n n_value = qNode(params, negative)\n\n dist_a_p = np.linalg.norm(a_value - p_value)**2\n dist_a_n = np.linalg.norm(a_value - n_value)**2\n\n return max(dist_a_p - dist_a_n + alpha, 0.0)\n\n\ndef train():\n assert(hp[\"dataset\"] in [\"mnist\", \"mnist_ae\", \"bc\", \"moons\"])\n dev = qml.device('default.qubit', wires=hp[\"qubits\"], shots=hp[\"shots\"])\n \n qNode = qml.QNode(func=circuit, device=dev)\n\n stepsize = hp[\"start_stepsize\"]\n optimizer = qml.AdamOptimizer(stepsize)\n\n def cost_fn(params):\n return triplet_loss(params, qNode, anchor,\n positive, negative, hp[\"alpha\"])\n\n params = np.random.uniform(low=-np.pi, high=np.pi,\n size=(hp[\"layers\"], hp[\"qubits\"], 2)\n )\n\n if hp[\"dataset\"] == \"mnist\":\n train_x, train_y, test_x, test_y = load_mnist(seed=hp[\"seed\"],\n train_size=hp[\"train_size\"],\n test_size=hp[\"test_size\"],\n classes=hp[\"classes\"],\n wires=hp[\"data_qubits\"]\n )\n\n apn_generator = mnist_apn_generator(train_x,\n train_y,\n n_cls=len(hp[\"classes\"])\n )\n elif hp[\"dataset\"] == \"mnist_ae\":\n train_x, train_y, test_x, test_y = load_mnist_ae(train_size=hp[\"train_size\"],\n test_size=hp[\"test_size\"],\n classes=hp[\"classes\"],\n wires=hp[\"data_qubits\"]\n )\n\n apn_generator = mnist_apn_generator(train_x,\n train_y,\n n_cls=len(hp[\"classes\"])\n )\n elif hp[\"dataset\"] == \"bc\":\n train_x, train_y, test_x, test_y = load_breast_cancer_lju(hp[\"train_size\"],\n hp[\"test_size\"]\n )\n apn_generator = bc_apn_generator(train_x,\n train_y\n )\n elif hp[\"dataset\"] == \"moons\":\n train_x, train_y, test_x, test_y = load_moons_dataset(hp[\"train_size\"],\n hp[\"test_size\"]\n )\n apn_generator = moons_apn_generator(train_x,\n train_y\n )\n hp[\"classes\"] = [\"Moon 1\", \"Moon 2\"]\n\n accuracys = []\n dbis = []\n losses = []\n gradients = []\n\n for step in range(hp[\"steps\"] + 1):\n anchor, positive, negative = next(apn_generator)\n\n params, c = optimizer.step_and_cost(cost_fn, params)\n\n print(f\"step {step:{len(str(hp['steps']))}}| cost {c:8.5f}\")\n\n losses.append(c)\n\n if step % hp[\"grads_every\"] == 0:\n g, _ = optimizer.compute_grad(cost_fn, (params,), {}, None)\n gradients.append(np.average(np.abs(g)))\n # print(\"Gradients\", g[0][0, 0, 0])\n # return g[0][0, 0, 0]\n\n if step % hp[\"test_every\"] == 0:\n accuracy, dbi = evaluate(hp[\"dataset\"], train_x, train_y,\n test_x, test_y,\n qNode, params, step,\n hp[\"classes\"], hp[\"output_qubits\"]\n )\n accuracys.append((step, accuracy))\n dbis.append((step, dbi))\n print(\"Accuracys:\\n\", accuracys)\n\n # if (step+1) % hp[\"update_sz_every\"] == 0:\n # stepsize *= hp[\"sz_factor\"]\n # optimizer.stepsize = stepsize\n # print(\"Updated stepsize to\", stepsize)\n\n if accuracys:\n print(\"Accuracys:\\n\", accuracys)\n top_acc = max(np.array(accuracys)[:, 1])\n print(\"Maximum: \", top_acc)\n hp[\"top_acc\"] = float(top_acc)\n\n if dbis:\n print(\"DBIs:\\n\", dbis)\n top_dbi = min(np.array(dbis)[:, 1])\n print(\"Minimum:\", top_dbi)\n hp[\"top_dbi\"] = float(top_dbi)\n\n if gradients:\n print(\"Gradients Avg: \", np.average(gradients))\n\n name = f\"{starting_time}_{hp['output_qubits']}\"\n with open(f\"./trainings/{name}.json\", \"w\") as json_file:\n json.dump(hp, json_file)\n np.savez(f\"./trainings/{name}.npz\",\n accuracys=accuracys,\n dbis=dbis,\n losses=losses,\n gradients=gradients,\n params=params\n )\n\n # plot_curves(np.array(accuracys),\n # np.array(dbis),\n # np.array(losses),\n # f\"Qubits: {hp['qubits']}, \" +\n # f\"Layers: {hp['layers']}, \" +\n # f\"Classes: {hp['classes']}, \" +\n # f\"Output_dim: {hp['output_qubits']}\"\n # )\n \n\nif __name__ == \"__main__\":\n # n_layers = [5, 25, 50, 75, 100]\n # n_qubits = [4, 6, 8, 10, 12]\n # seeds = list(range(100))\n\n # gradients = np.zeros((len(n_layers), len(n_qubits), len(seeds)))\n\n # for il, l in enumerate(n_layers):\n # for iq, q in enumerate(n_qubits):\n # for js, s in enumerate(seeds):\n # hp[\"layers\"] = l\n # hp[\"qubits\"] = q\n # hp[\"seeds\"] = s\n # print(l, q, s)\n # grad = train()\n # gradients[il, iq, js] = grad\n\n # np.savez(f\"./trainings/{starting_time}_grads.npz\", grads=gradients,\n # n_layers=np.array(n_layers),\n # n_qubits=np.array(n_qubits),\n # seeds=np.array(seeds))\n # print(gradients)\n\n train()\n","repo_name":"cirKITers/quantum-triplet-loss","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":7773,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"16653818936","text":"#vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4\nfrom matplotlib import pyplot as plt\nimport numpy as np\nfrom matplotlib import rc\nfrom os.path import expanduser\nfrom pandas import DataFrame\nfrom collections import defaultdict\n\nBASE_PATH = expanduser('./fig')\n\ndef plot_detection_timeliness_hist_1hr(X):\n plt.clf()\n t = np.arange(min(X), max(X)+1, 60) # 6 hours period\n plt.hist(X)\n plt.ylabel(r'frequency')\n plt.xlabel(r'time (minute)')\n plt.xticks(t)\n # plt.title(r\"\\TeX\\ is Number \"\n # r\"$\\displaystyle\\sum_{n=1}^\\infty\\frac{-e^{i\\pi}}{2^n}$!\",\n # fontsize=16, color='gray')\n plt.tight_layout()\n fig = plt.gcf()\n fig.savefig('{}/{}'.format(BASE_PATH,\n 'graph_detection_timeliness_hist_1hr.pdf'))\n\ndef plot_detection_timeliness_hist(X):\n X /= 3600\n plt.clf()\n t = np.arange(min(X), max(X)+1, 12) # 6 hours period\n plt.hist(X)\n plt.ylabel(r'frequency')\n plt.xlabel(r'time (hr)')\n plt.xticks(t)\n # plt.title(r\"\\TeX\\ is Number \"\n # r\"$\\displaystyle\\sum_{n=1}^\\infty\\frac{-e^{i\\pi}}{2^n}$!\",\n # fontsize=16, color='gray')\n plt.tight_layout()\n fig = plt.gcf()\n fig.savefig('{}/{}'.format(BASE_PATH,\n 'graph_detection_timeliness_hist.pdf'))\n\ndef plot_detection_timeliness(X):\n X /= 3600\n plt.clf()\n t = np.arange(min(X), max(X)+1, 12) # 6 hours period\n X_ = np.sort(X)\n X_ /= X_.sum()\n CY = np.cumsum(X_)\n plt.plot(np.sort(X),CY,'r')\n plt.ylabel(r'empirical cdf')\n plt.xlabel(r'time (hr)')\n plt.xticks(t)\n # plt.title(r\"\\TeX\\ is Number \"\n # r\"$\\displaystyle\\sum_{n=1}^\\infty\\frac{-e^{i\\pi}}{2^n}$!\",\n # fontsize=16, color='gray')\n plt.tight_layout()\n fig = plt.gcf()\n fig.savefig('{}/{}'.format(BASE_PATH,\n 'graph_detection_timeliness.pdf'))\n\ndef plot_detection_accuracy(X):\n plt.clf()\n df2 = DataFrame(X, columns=['detected', 'not detected'])\n ax = df2.plot(kind='bar', stacked=True, color='k');\n ax.set_ylabel('# compromised user')\n ax.set_xlabel('incident id')\n plt.tight_layout()\n plt.yticks([1,2,3,4])\n for container in ax.containers:\n if container.get_label() == 'detected':\n plt.setp(container, color='0.75')\n else:\n plt.setp(container, color='0')\n # ax.legend()\n plt.legend(loc='upper left')\n fig = plt.gcf()\n fig.savefig('{}/{}'.format(BASE_PATH,\n 'graph_detection_accuracy.pdf'))\n \n\n# def plot_sequencing_accuracy(X, idx, name):\n# \"\"\"\n# length of the array must equal length of the idx\n# \"\"\"\n# df2 = DataFrame(X, index=idx,columns=['correct', 'incorrect'])\n# df2.plot(kind='bar', stacked=True)\n\n# fig = plt.gcf()\n# fig.savefig('{}/{}'.format(BASE_PATH,\n# 'graph_sequencing_accuracy_%s.pdf' % name))\n\n \ndef plot_sequencing_accuracy(conf_arr, idx, name, fontsize=12):\n norm_conf = []\n for i in conf_arr:\n a = 0\n tmp_arr = []\n a = sum(i,0)\n for j in i:\n tmp_arr.append(float(j)/float(a))\n norm_conf.append(tmp_arr)\n plt.clf()\n fig = plt.figure()\n ax = fig.add_subplot(111)\n ax.xaxis.tick_top()\n [ax.grid(False) for ax in fig.axes]\n res = ax.imshow(np.array(norm_conf), origin='upper', cmap=plt.cm.jet_r, interpolation='nearest')\n for i, cas in enumerate(conf_arr):\n for j, c in enumerate(cas):\n plt.text(i, j, c, horizontalalignment='center', verticalalignment='center',fontsize=fontsize, color='white')\n cb = fig.colorbar(res)\n width = len(conf_arr)\n height = len(conf_arr[0])\n plt.xticks(range(width), idx[:width])\n plt.yticks(range(height), idx[:height])\n fig.savefig('{}/{}'.format(BASE_PATH,\n 'graph_sequencing_accuracy_%s.pdf' % name))\n\ndef plot_detection_accuracy_helper(result):\n # detection accuracy\n d = defaultdict()\n for row in result.iterrows():\n r = row[1]\n if (d.has_key(r.incident) == False):\n d[r.incident] = defaultdict()\n \n if (d[r.incident].has_key('hit') == False):\n d[r.incident]['hit'] = 0\n \n if (d[r.incident].has_key('miss') == False):\n d[r.incident]['miss'] = 0\n\n if r.accuracy == 1:\n d[r.incident]['hit'] += 1\n else:\n d[r.incident]['miss'] += 1\n \n detection_accuracy = []\n for incident in d:\n detection_accuracy.append([d[incident]['hit'], d[incident]['miss']])\n \n plot_detection_accuracy(np.array(detection_accuracy, dtype=int))\n \ndef plot_detection_timeliness_helper(result):\n # detection accuracy\n plot_detection_timeliness(np.array(result.detection_timeliness, dtype=float))\n # print zip(result.num_event, result.labeling_time)\n # print result.detection_timeliness, result.preemption_timeliness, result.attack_duration\n \ndef plot_preemption_cdf(df):\n df_filtered = df[df['accuracy'] == 1]\n df = df_filtered\n # raise SystemExit\n \n plt.clf()\n \n X = np.array([x/3600 if x >=0 and x/3600 < 48 else 48 for x in df.preemption_timeliness], dtype=float)\n try:\n plt.hist(X, cumulative=True, label='preemption', color='0')\n except ValueError:\n pass\n \n plt.ylim(14,24)\n plt.xlabel('preemption timeliness (hr)')\n plt.ylabel('cumulative count')\n plt.legend(loc='upper left')\n fig = plt.gcf()\n [ax.grid(False) for ax in fig.axes]\n fig.savefig('{}/{}'.format(BASE_PATH,\n 'graph_preemption_cdf.pdf'))\n \ndef plot_detection_cdf(df):\n df_filtered = df[df['accuracy'] == 1]\n df = df_filtered\n # raise SystemExit\n \n plt.clf()\n \n X = np.array([x/3600 if x >=0 and x/3600 < 48 else 48 for x in df.detection_timeliness], dtype=float)\n try:\n ax = plt.hist(X, cumulative=True, label='detection', color='0')\n except ValueError:\n pass\n \n plt.ylim(14,24)\n plt.xlabel('detection timeliness (hr)')\n plt.ylabel('cumulative count')\n plt.legend(loc='upper left')\n fig = plt.gcf()\n [ax.grid(False) for ax in fig.axes]\n fig.savefig('{}/{}'.format(BASE_PATH,\n 'graph_detection_cdf.pdf'))\n \ndef plot_detection_and_preemption_timeline(df):\n plt.clf()\n timeline = []\n for row in df.iterrows():\n r = row[1]\n if r.detection_timeliness!= -1:\n sum = r.detection_timeliness + r.preemption_timeliness\n if (sum != 0):\n ratio = r.detection_timeliness/sum\n timeline.append(ratio)\n else:\n timeline.append(1)\n\n fig, ax = plt.subplots()\n\n count = 0\n for user in timeline:\n if count == 0:\n plt.bar(left=0, width=user, height=0.8, bottom=count, color='0.75', label='not_malicious')\n plt.bar(left=user, width=1.02-user, height=0.8, bottom=count, color='0', label='malicious')\n else:\n plt.bar(left=0, width=user, height=0.8, bottom=count, color='0.75')\n plt.bar(left=user, width=1.02-user, height=0.8, bottom=count, color='0')\n count = count + 1\n ax.set_yticks(np.arange(0, len(timeline), 5), minor=False)\n ax.set_xticks(np.arange(0, 2), minor=False)\n ax.grid(False)\n \n # for container in ax.containers:\n # if container.get_label() == 'malicious':\n # plt.setp(container, hatch='xxx')\n # else:\n # plt.setp(container, hatch='ooo')\n # ax.legend()\n \n plt.axis([0,1.03,-1,len(timeline)])\n plt.xlabel('time (normalized)')\n plt.ylabel('user id')\n plt.legend(loc='upper left')\n plt.tight_layout()\n fig = plt.gcf()\n fig.savefig('{}/{}'.format(BASE_PATH,\n 'graph_detection_and_preemption_timeline.pdf'))\n \ndef plot_labeling_time(df):\n plt.clf()\n plt.scatter(df.num_event, df.labeling_time)\n pars= np.polyfit(df.num_event, df.labeling_time, 1)\n fitted_y = np.polyval(pars,df.num_event)\n plt.plot(df.num_event, fitted_y, 'r--')\n plt.xlabel('number of events')\n plt.ylabel('labeling time (s)')\n\n # t = np.array(df.num_event)\n # xn = np.array(df.labeling_time)\n # #Linear regressison -polyfit - polyfit can be used other orders polys\n # (ar,br)=polyfit(t,xn,1)\n # xr=polyval([ar,br],t)\n # #compute the mean square error\n\n # t = (xr-xn)**2\n # n = len(t)\n # err=sqrt(t.sum()/n)\n # print len(t), err\n\n\n # print df.accuracy\n \n fig = plt.gcf()\n fig.savefig('{}/{}'.format(BASE_PATH,\n 'graph_labeling_time.pdf'))\n \ndef plot_result(result):\n \"\"\"\n result: list of incident_results\n [\n (user, accuraty (0 - not detected or 1 - detected), detection timeliness in second\n ]\n \"\"\"\n plot_detection_accuracy_helper(result)\n plot_detection_cdf(result)\n plot_preemption_cdf(result)\n plot_detection_and_preemption_timeline(result)\n plot_labeling_time(result)\n\n # # detection timeliness\n # detection_timeliness = []\n # for incident in result:\n # for incident_result in incident:\n # timeliness = incident_result[2]\n # if (timeliness > 0):\n # detection_timeliness.append(timeliness) # timeliness\n\n # plot_detection_timeliness(np.array(detection_timeliness, dtype=float))\n # plot_detection_timeliness_hist(np.array(detection_timeliness, dtype=float))\n # plot_detection_timeliness_hist_1hr(np.array([ x < 3600 for x in detection_timeliness], dtype=float))\n\ndef plot_variation(df):\n plt.clf()\n plt.hist(df.accuracy_list[0], color='k')\n plt.xlabel('detect at event number')\n plt.ylabel('count')\n plt.xticks([3, 4, 5, 6]) # TODO FIX THIS\n fig = plt.gcf()\n fig.savefig('{}/{}'.format(BASE_PATH,\n 'graph_variation.pdf'))\n","repo_name":"ncsa/AttackTagger","sub_path":"src/plot.py","file_name":"plot.py","file_ext":"py","file_size_in_byte":9865,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"41051139713","text":"import math\nimport torch\nfrom overrides import overrides\nimport torch.distributed as dist\nimport vae_lm.nn.utils as util\nfrom torch_nlp_utils.metrics import Metric\n\n\nclass Average(Metric):\n \"\"\"Simple metric to average results over passed tensor values.\"\"\"\n\n def __init__(self) -> None:\n self._total_value = 0.0\n self._count = 0\n\n @overrides\n def __call__(self, value: torch.Tensor) -> None:\n _total_value = list(util.unwrap_to_tensors(value))[0]\n _count = 1\n if util.dist_available():\n device = util.int_to_device(\n -1 if dist.get_backend() != \"nccl\" else torch.cuda.current_device()\n )\n count = torch.tensor(_count, device=device)\n total_value = torch.tensor(_total_value, device=device)\n # Reduce from all processes\n dist.all_reduce(count, op=dist.ReduceOp.SUM)\n dist.all_reduce(total_value, op=dist.ReduceOp.SUM)\n _count = count.item()\n _total_value = total_value.item()\n self._count += _count\n self._total_value += _total_value\n\n @overrides\n def get_metric(self, reset: bool = False):\n \"\"\"Average of accumulated values.\"\"\"\n average_value = self._total_value / self._count if self._count > 0 else 0.0\n if reset:\n self.reset()\n return float(average_value)\n\n @overrides\n def reset(self):\n self._total_value = 0.0\n self._count = 0\n\n\nclass Perplexity(Average):\n \"\"\"\n Perplexity is a common metric used for evaluating how well a language model\n predicts a sample.\n\n Notes\n -----\n Assumes negative log likelihood loss of each batch (base e). Provides the\n average perplexity of the batches.\n \"\"\"\n\n @overrides\n def get_metric(self, reset: bool = False) -> float:\n \"\"\"The accumulated perplexity.\"\"\"\n average_loss = super().get_metric(reset)\n if average_loss == 0:\n return 0.0\n # Exponentiate the loss to compute perplexity\n return math.exp(average_loss)\n","repo_name":"Nemexur/nonauto-lm","sub_path":"vae_lm/training/metrics.py","file_name":"metrics.py","file_ext":"py","file_size_in_byte":2065,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"77"} +{"seq_id":"41389947972","text":"from bd import obtener_conexion\r\n\r\ndef insertar_escuela(nombre,descripcion, estado, nombre1):\r\n conexion = obtener_conexion()\r\n with conexion.cursor() as cursor:\r\n cursor.execute(\"INSERT INTO ESCUELA(idEscuela,nombre,descripcion, estado, idFacultad) VALUES (%s,%s,%s, %s, %s)\",\r\n (obtener_ultimoid(),nombre,descripcion, estado, obtener_idfacultad(nombre1)))\r\n conexion.commit()\r\n conexion.close()\r\n\r\ndef obtener_idfacultad(nombre):\r\n conexion = obtener_conexion()\r\n id=None\r\n with conexion.cursor() as cursor:\r\n cursor.execute(\"SELECT idFacultad from FACULTAD WHERE nombre = %s\",\r\n (nombre))\r\n id = cursor.fetchone()\r\n conexion.close()\r\n return id\r\n\r\n\r\ndef obtener_ultimoid():\r\n conexion = obtener_conexion()\r\n id=None\r\n with conexion.cursor() as cursor:\r\n cursor.execute(\"SELECT COALESCE((MAX(idEscuela)),0)+1 as idEscuela from ESCUELA\")\r\n id = cursor.fetchone()\r\n conexion.close()\r\n return id\r\n\r\n\r\ndef listarFacultades():\r\n conexion = obtener_conexion()\r\n facultad=[]\r\n with conexion.cursor() as cursor:\r\n cursor.execute(\"SELECT nombre from FACULTAD\")\r\n facultad = cursor.fetchall()\r\n conexion.close()\r\n return facultad\r\n\r\n\r\ndef obtener_escuela():\r\n conexion = obtener_conexion()\r\n escuela = []\r\n with conexion.cursor() as cursor:\r\n cursor.execute(\"SELECT idEscuela,e.nombre,e.descripcion,CASE e.estado WHEN 'V' THEN 'Vigente' ELSE 'No vigente' END AS estado, f.nombre FROM ESCUELA e INNER JOIN FACULTAD f on e.idFacultad=f.idFacultad\")\r\n escuela = cursor.fetchall()\r\n print(escuela)\r\n conexion.close()\r\n return escuela\r\n\r\ndef obtener_escuela_index(limit,offset):\r\n conexion=obtener_conexion()\r\n escuela=[]\r\n with conexion.cursor() as cursor:\r\n cursor.execute(\"SELECT idEscuela,nombre,descripcion, CASE estado WHEN 'V' THEN 'Vigente' ELSE 'No vigente' END AS estado, idFacultad FROM ESCUELA limit {} offset {}\".format(limit, offset))\r\n escuela = cursor.fetchall()\r\n print(escuela)\r\n conexion.close()\r\n return escuela\r\n\r\ndef actualizar_escuela(nombre,descripcion, estado, nombre1,id):\r\n conexion = obtener_conexion()\r\n with conexion.cursor() as cursor:\r\n cursor.execute(\"UPDATE ESCUELA SET nombre= %s,descripcion= %s, estado= %s, idFacultad= %s WHERE idEscuela = %s\",\r\n (nombre,descripcion, estado, obtener_idfacultad(nombre1),id))\r\n conexion.commit()\r\n conexion.close()\r\n\r\n\r\ndef dar_baja(id):\r\n conexion = obtener_conexion()\r\n with conexion.cursor() as cursor:\r\n cursor.execute(\"UPDATE ESCUELA SET estado='N' WHERE idEscuela = %s\",\r\n (id))\r\n conexion.commit()\r\n conexion.close()\r\ndef dar_alta(id):\r\n conexion = obtener_conexion()\r\n with conexion.cursor() as cursor:\r\n cursor.execute(\"UPDATE ESCUELA SET estado='V' WHERE idEscuela = %s\",\r\n (id))\r\n conexion.commit()\r\n conexion.close()\r\ndef buscar_escuela(nombre):\r\n conexion = obtener_conexion()\r\n escuela = []\r\n with conexion.cursor() as cursor:\r\n cursor.execute(\"SELECT idEscuela,nombre,descripcion, estado, idFacultad FROM ESCUELA WHERE nombre LIKE ('%'||%s||'%')\", (nombre,))\r\n escuela = cursor.fetchall()\r\n conexion.close()\r\n return escuela\r\n\r\ndef buscar_escuela_id(id):\r\n conexion = obtener_conexion()\r\n escuela = []\r\n with conexion.cursor() as cursor:\r\n cursor.execute(\"SELECT idEscuela,e.nombre,e.descripcion, e.estado, f.nombre FROM ESCUELA e INNER JOIN FACULTAD f on e.idFacultad=f.idFacultad WHERE idEscuela = %s\", (id,))\r\n escuela = cursor.fetchone()\r\n conexion.close()\r\n return escuela\r\n\r\ndef eliminar_escuela(id):\r\n conexion = obtener_conexion()\r\n with conexion.cursor() as cursor:\r\n cursor.execute(\"DELETE FROM ESCUELA WHERE idEscuela = %s\", (id))\r\n conexion.commit()\r\n conexion.close()","repo_name":"BeafLee/PractiSoft","sub_path":"controladores/controlador_escuela.py","file_name":"controlador_escuela.py","file_ext":"py","file_size_in_byte":3975,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"11349986226","text":"\n\n# Writte by Trent Balius, FNLCR, April 6, 2021\n\nimport sys \n\ndef main():\n file1 = sys.argv[1]\n file2 = sys.argv[2]\n \n print(\"input = %s\"%(file1))\n print(\"output = %s\"%(file2))\n \n fh1 = open(file1,'r')\n \n dic_id_count = {}\n dic_id_line = {}\n \n \n \n for line in fh1:\n sline = line.split()\n sid = sline[1]\n if sid in dic_id_count:\n dic_id_count[sid]=dic_id_count[sid]+1\n else: \n dic_id_count[sid]=1\n dic_id_line[sid] = line.strip()\n \n \n fh1.close()\n \n fh2 = open(file2,'w')\n for key in sorted(dic_id_line.keys()):\n fh2.write('%s\\n'%(dic_id_line[key]))\n fh2.close()\n \n # print the ids that have more than on entree in the file. \n \n for key in sorted(dic_id_count.keys()):\n if dic_id_count[key] > 1: \n print(key,dic_id_count[key])\n \nmain()\n","repo_name":"tbalius/teb_scripts_programs","sub_path":"zzz.scripts/make_smi_uniq.py","file_name":"make_smi_uniq.py","file_ext":"py","file_size_in_byte":864,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"16469274227","text":"FILE_NAME = \"input.txt\"\nGAMMA = \"GAMMA\"\nEPSILON = \"EPSILON\"\n\ninput = open(FILE_NAME, \"r\")\nVECTOR_SIZE = len(input.readline()) - 1\n\ndef read_input():\n open(FILE_NAME, \"r\")\n return input.readlines()\n\nvalues = read_input()\n\ncounted_values = [0] * VECTOR_SIZE\nXOR_MASK = '1' * VECTOR_SIZE\n\ndef count_bits(value):\n for i in range(0, VECTOR_SIZE):\n if value[i] == '1':\n counted_values[i] = counted_values[i] + 1 \n\ndef calculate_gamma():\n gamma = \"\"\n for i in range (0, VECTOR_SIZE):\n if counted_values[i] >= len(values)/2:\n gamma = gamma + '1'\n else:\n gamma = gamma + '0'\n return gamma\n\ndef calculate_epsilon(gamma):\n return gamma ^ int(XOR_MASK,2)\n\ndef reset_bit_counter():\n for i in range(0, VECTOR_SIZE):\n counted_values[i] = 0\n\ndef count_all_vectors():\n for value in values:\n count_bits(value)\n\ndef eliminate_by_bit_criteria(i, criteria):\n indexes_to_delete = []\n for j in range(0,len(values)):\n if criteria[i] != (values[j])[i]:\n indexes_to_delete.append(j)\n\n for k in range(len(indexes_to_delete)-1, -1, -1):\n if(len(values) == 1):\n break\n del values[indexes_to_delete[k]]\n\ndef recount_and_eliminate(calculate_by):\n while len(values) > 1:\n for i in range(0, VECTOR_SIZE):\n reset_bit_counter()\n count_all_vectors()\n gamma = calculate_gamma()\n if(calculate_by == \"GAMMA\"):\n criteria = gamma\n else:\n epsilon = calculate_epsilon(int(gamma,2))\n criteria = str.zfill(str(bin(epsilon)).split('b')[1], VECTOR_SIZE)\n eliminate_by_bit_criteria(i, criteria)\n\n\ncount_all_vectors()\ngamma = int(calculate_gamma(),2)\nprint(\"gamma \" + str(gamma))\nepsilon = calculate_epsilon(gamma)\nprint(\"epsilon \" + str(epsilon))\n# part one solution\nprint(gamma * epsilon)\n\nvalues = (open(FILE_NAME, \"r\")).readlines()\n\nrecount_and_eliminate(GAMMA)\noxygen = int(values[0],2)\nprint(\"oxy \" + str(oxygen))\n\nvalues = (open(FILE_NAME, \"r\")).readlines()\n\nrecount_and_eliminate(EPSILON)\nco2 = int(values[0], 2)\nprint(\"co2 \" + str(co2))\n\n# part two solution\nprint(oxygen*co2)","repo_name":"antebm/advent-of-code-2021","sub_path":"day03/binary_diagnostic.py","file_name":"binary_diagnostic.py","file_ext":"py","file_size_in_byte":2204,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"5467534638","text":"from PIL import Image, ImageDraw, ImageFont\r\nimport os\r\n\r\n# Open the PNG image file\r\nimage = Image.open('image/Participation.png')\r\n\r\n# Create a drawing object\r\ndraw = ImageDraw.Draw(image)\r\n\r\n# Define the font and size for the text\r\nfont = ImageFont.truetype('GreatVibes-Regular.ttf', 400)\r\n# Define the text to write on the image\r\ntext = input('Enter the name: ')\r\n\r\n# Calculate the size of the text\r\ntext_width, text_height = draw.textbbox((0, 0), text, font=font)[2:]\r\n\r\n# Calculate the position of the text\r\nx = (image.width - text_width) / 2\r\ny = 2300\r\n\r\n# Define the color of the text\r\ntext_color = (26, 53, 95)\r\n\r\n# Write the text on the image\r\ndraw.text((x, y), text, font=font, fill=text_color)\r\n\r\n# Save the modified image as a new PNG file with the user-provided name\r\noutput_file = f'image/{text}.png'\r\nimage.save(output_file)\r\n\r\n# Display the paths of the modified and original files\r\nprint(f'Image with text: {output_file}')\r\n\r\n","repo_name":"UsamaAslam711/Certificate-Generator","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":943,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"46611617193","text":"#@profile\ndef decode_event(data,i=0):\n assert list(data[i:i+4])==[0xAA]*4\n iev=sum(b<<(j*8) for j,b in enumerate(data[i+4:i+ 8]))\n tev=sum(b<<(j*8) for j,b in enumerate(data[i+8:i+16]))\n i+=4*4\n hits=[]\n if data[i]&0xF0==0xE0: # chip empty frame\n assert data[i+2]==0xFF\n assert data[i+3]==0xFF\n i+=4\n elif data[i]&0xF0==0xA0: # chip header\n i+=2\n n=len(data)\n reg=None\n while i>9&0x3FE|(d^d>>1)&0x1,d>>1&0x1FF))\n data2=data[i+2]\n d+=1\n while data2:\n if data2&1:\n hits.append((d>>9&0x3FE|(d^d>>1)&0x1,d>>1&0x1FF))\n data2>>=1\n d+=1\n i+=3\n elif data0&0xC0==0x40: # data short\n d=reg<<14|(data0&0x3F)<<8|data[i+1]\n hits.append((d>>9&0x3FE|(d^d>>1)&0x1,d>>1&0x1FF))\n i+=2\n elif data0&0xE0==0xC0: # region header\n reg=data0&0x1F\n i+=1\n elif data0&0xF0==0xB0: # chip trailer\n i+=1\n i=(i+3)//4*4\n break\n elif data0==0xFF:\n i+=1\n else:\n raise ValueError('i=%d'%i)\n else:\n raise ValueError('i=%d'%i)\n assert list(data[i:i+4])==[0xBB]*4\n i+=4\n return hits,iev,tev,i\ndef main(d):\n nev=0\n i=0\n pbar=tqdm(total=len(d))\n while i\n#\n#\tExample:\n#\t\tMergeDataModule.py ..\\..\\Cortex-Command-Community-Project-Release Psyclones.rte\n#\n#\tWarning:\n#\t\tThis priimitive tool simply dumps all the contents of the ini files into single file to speed-up load times. \n#\t\tIt respects IncludeFile statetemnts and removes them from the output file.\n#\t\tIt's intended to use only for mod creators to speed up game loading time when they have to reload the game very often to test stuff.\n#\n\nimport sys\nimport os\n\ndef ParseFile(modpath, inifile, out, indent):\n\tprint(inifile)\n\t\n\tinput = open(os.path.join(modpath, inifile), 'r')\n\tlines = input.readlines()\n\t\n\tcurobject = dict()\n\t\n\tnextcommentmode = False\n\tcommentmode = False\n\t\n\tfor l in lines:\n\t\twriteline = True\n\t\n\t\t#Discard comments\n\t\tcmnts = l.split(\"//\")\n\t\tln = cmnts[0]\n\t\t\n\t\tcmnts = ln.split(\"/*\")\n\t\tln = cmnts[0]\n\t\tif len(cmnts) > 1:\n\t\t\tnextcommentmode = True\n\n\t\tcmnts = ln.split(\"*/\")\n\t\tif len(cmnts) > 1:\n\t\t\tln = cmnts[1]\n\t\t\tnextcommentmode = False\n\t\t\tcommentmode = False\n\n\t\tif not commentmode: \n\t\t\tv = ln.split(\"=\");\n\t\t\tt = list()\n\n\t\t\tfor i in range(0, len(v)):\n\t\t\t\tv[i] = v[i].strip()\n\t\t\t\n\t\t\tif len(v) > 1:\n\t\t\t\tv[0] = v[0].lower()\n\t\t\t\t\n\t\t\t\t#Parse included files\n\t\t\t\tif v[0] == \"includefile\":\n\t\t\t\t\tParseFile(modpath, v[1], out, True)\n\t\t\t\t\twriteline = False\n\n\t\tcommentmode = nextcommentmode\n\t\t\n\t\tif writeline:\n\t\t\tif indent:\n\t\t\t\tout.write(\"\\t\" + l);\n\t\t\telse:\n\t\t\t\tout.write(l);\n\n\tout.write(\"\\n\");\n\tinput.close()\n\nRootFolder = sys.argv[1]\nModName = sys.argv[2]\n\nOutputFile = os.path.join(RootFolder, ModName, \"MergedIndex.ini\");\n\nout = open(OutputFile, 'w')\n\nParseFile(RootFolder, os.path.join(ModName, \"Index.ini\"), out, False)\n\t\nout.close()","repo_name":"weegee720/Cortex-Command-Community-Project-Toolbox","sub_path":"Merger/MergeDataModule.py","file_name":"MergeDataModule.py","file_ext":"py","file_size_in_byte":1864,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"1026799185","text":"\"\"\"\r\nСинтаксис\r\n\"\"\"\r\n\r\n# def foo(x):\r\n# return x \r\n\r\n# foo(123)\r\n\r\n# a = 1\r\n# a = 'one'\r\n\r\n# foo = lambda x: x \r\n# foo = 1\r\n\r\n# foo(123)\r\n\r\n\"\"\"\r\nПримеры использования\r\n\"\"\"\r\n\r\n\"\"\"\r\nФункция, которая не храниться в памяти\r\n\"\"\"\r\n\r\n\r\n\r\n# print((lambda x: x + 1)(10))\r\n\r\n# def plusOne(x):\r\n# return x + 1\r\n\r\n# if plusOne(int(input(': '))) == 11:\r\n# print('ты ввел число 10 а я прибавил к нему один ! и получил 11')\r\n\r\n\"\"\"\r\nФункция, аргумент\r\n\"\"\"\r\n\r\ndef foo(num, func):\r\n a = func(num)\r\n return a\r\n\r\n\r\nIsNumberEven = foo(10, lambda x: x ** 2)\r\n\r\nprint(IsNumberEven)\r\n\r\n","repo_name":"gabrielbodrug/python","sub_path":"func/lamdba.py","file_name":"lamdba.py","file_ext":"py","file_size_in_byte":694,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"29085035238","text":"import sys\ninput = sys.stdin.readline\n\ndictionary = {}\nn = int(input())\nfor _ in range(n):\n s = input().rstrip()\n dictionary[s] = len(s)\n\nresult = list(sorted(dictionary.items(),key = lambda item: (item[1],item[0])))\n\nfor i in result:\n print(i[0])","repo_name":"spaceOfSoul/baekjunSolve_python","sub_path":"1181.py","file_name":"1181.py","file_ext":"py","file_size_in_byte":250,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"1366417131","text":"from django.shortcuts import render, get_object_or_404\nfrom customers .models import Customer\nfrom products .models import Product, Category, Colour, Size, Material\nfrom sales .models import Sale, SaleItem\nfrom main .models import Brand\nfrom django.contrib.auth.decorators import login_required\n\n\n@login_required\ndef admin_index(request):\n template_name = 'app/index.html'\n context = {\n \"is_admin_index\" : True,\n }\n return render(request,template_name,context)\n\n\ndef customers_view(request):\n customers = Customer.objects.filter(is_active=True) \n template_name = 'app/customers-view.html'\n context = {\n \"is_customers_view\" : True,\n \"customers\" : customers,\n }\n return render(request,template_name,context)\n\n\ndef products_view(request):\n products = Product.objects.filter(is_active=True)\n template_name = 'app/products-view.html'\n context = {\n \"is_products_view\" : True,\n \"products\" : products,\n }\n return render(request,template_name,context)\n\n\ndef sales_list(request):\n sales_list = Sale.objects.filter(is_active=True) \n template_name = 'app/sales-list.html'\n context = {\n \"is_sale_list\" : True,\n \"sales_list\" : sales_list,\n }\n return render((request), template_name, context)\n\ndef category_view(request):\n categories = Category.objects.filter(is_active=True)\n template_name = 'app/category-view.html'\n context = {\n \"is_context_view\" : True,\n \"categories\" : categories,\n }\n return render(request,template_name,context)\n\ndef sale_view(request,pk):\n sale_view = get_object_or_404(Sale,pk=pk)\n template_name = 'app/sales-view.html'\n context = {\n \"is_sale_view\" : True,\n \"sale_view\" : sale_view,\n }\n return render((request), template_name, context)\n\n\ndef brands_view(request):\n brands = Brand.objects.filter(is_active=True)\n template_name = 'app/brands-view.html'\n context = {\n \"is_brands_view\" : True,\n \"brands\" : brands,\n }\n return render(request,template_name,context)\n\n\ndef colors_view(request):\n colors = Colour.objects.filter(is_active=True)\n template_name = 'app/colors-view.html'\n context = {\n \"is_colors_view\" : True,\n \"colors\" : colors,\n }\n return render(request,template_name,context)\n\n\ndef sizes_view(request):\n sizes = Size.objects.filter(is_active=True)\n\n template_name = 'app/size-view.html'\n context = {\n \"is_sizes_view\" : True,\n \"sizes\" : sizes,\n }\n return render(request,template_name,context)\n\n\ndef material_view(request):\n materials = Material.objects.filter(is_active=True)\n\n template_name = 'app/material-view.html'\n context = {\n \"is_material_view\" : True,\n \"materials\" : materials,\n }\n return render(request,template_name,context)\n\n","repo_name":"Ajmal-AJ/ecommerce","sub_path":"app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2832,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"30609420577","text":"import csv\n\n#get user data\ndef retrieve_user_data():\n\tuser_data = []\n\twith open('users.csv', 'r') as f:\n\t\treader = csv.reader(f)\n\t\tfor row in reader:\n\t\t\tuser_data.append(row)\n\tuser_data.pop(0)\n\treturn user_data\n\n#get blockchain data\ndef retrieve_block_data():\n\tblock_data = []\n\twith open('block.csv', 'r') as f:\n\t\treader = csv.reader(f)\n\t\tfor row in reader:\n\t\t\tblock_data.append(row)\n\treturn block_data\n\n#write new user data into record\ndef write_user_data(s):\n\twith open('users.csv', 'w') as csvfile:\n\t\tfilewriter = csv.writer(csvfile, delimiter=',', quotechar='|', quoting=csv.QUOTE_MINIMAL)\n\t\tfor i in s:\n\t\t\tfilewriter.writerow(i)\n\n#write new blockchain data into record\ndef write_block_data(s):\n\twith open('block.csv', 'w') as csvfile:\n\t\tfilewriter = csv.writer(csvfile, delimiter=',', quotechar='|', quoting=csv.QUOTE_MINIMAL)\n\t\tfor i in s:\n\t\t\tfilewriter.writerow(i)\n\n#clean all the data in the file\ndef cleandata():\n\twith open('users.csv', 'w') as csvfile:\n\t\tfilewriter = csv.writer(csvfile, delimiter=',', quotechar='|', quoting=csv.QUOTE_MINIMAL)\n\t\tfilewriter.writerow([])\n","repo_name":"kevinyang372/Odera","sub_path":"create_user.py","file_name":"create_user.py","file_ext":"py","file_size_in_byte":1081,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"77"} +{"seq_id":"5743671252","text":"import warnings\nwarnings.filterwarnings('ignore')\nimport numpy as np\nimport pandas as pd\nimport pickle\nfrom tqdm import tqdm\nimport matplotlib.pyplot as plt\n\nprint(\"\\nWelcome to the ultra cool bacterial dynamics simulator \\n\")\n\n# Solving the differential equation\ndef solve_model(rho, S, x_min, x_max, n, D_s, D_b, chi, r, k, lambd, t_c, x_l, q, beta):\n # Defining the step in space and time\n dx = (x_max - x_min)/n\n dt = dx**2 / (64*D_b)\n # print(dt)\n # print(t_c/dt)\n # Defining space\n x = np.linspace(x_min, x_max, n)\n # Array of derivatives\n drhodt = np.empty(n)\n dSdt = np.empty(n)\n # Defining time\n #t = np.arange(0, t_max, dt)\n \n # Array of solutions for density and concentration\n rhos = [rho]\n Ss = [S]\n tot_rho = [np.sum(rho*dx), np.sum(rho*dx)]\n tot_S = [np.sum(S*dx)]\n \n #Dirac delta function\n dirac = np.zeros(n)\n dirac[int(x_l/dx)] = 1\n \n gamma = r/k\n \n # Loop in time\n i = 0\n while i < 2000000:\n # if i % 1000 == 0:\n # print(f\"{i+1}/2000000. Substance is released in i = {int(t_c/dt)}\", end = \"\\r\")\n if np.abs(tot_rho[-1] - tot_rho[-2]) < 1e-7 and dt*i > 1.1*t_c:\n break\n i += 1\n #for i in range(len(t)):\n # Loop in space\n if dt*i < t_c:\n q_eff = 0\n else:\n q_eff = q\n S[n-1] = S[n-2]\n S[0] = S[1]\n rho[0] = D_b*rho[1]/(chi*(S[1] - S[0]) + D_b)\n rho[n-1] = D_b*rho[n-2]/(chi*(S[n-1] - S[n-2]) + D_b)\n for j in range(1,n-1):\n # Substance diffusion and degradetion\n dSdt[j] = D_s*((S[j+1] - S[j])/dx**2 - (S[j]-S[j-1])/dx**2) - lambd*S[j]*rho[j] + q_eff*dirac[j]\n # Chemotaxis\n chem = chi*(((rho[j+1] - rho[j-1])*(S[j+1] - S[j-1])/(4*dx**2)) + rho[j]*((S[j+1] - 2*S[j] + S[j-1])/dx**2))\n # Growth\n growth = r*rho[j]\n # Competition\n competition = gamma*rho[j]**2\n # Death by consumption\n death = lambd*beta*rho[j]*S[j]\n # Bacterial equation\n drhodt[j] = D_b*((rho[j+1] - rho[j])/dx**2 - (rho[j]-rho[j-1])/dx**2) + chem + growth - competition - death\n rho = rho + drhodt*dt\n S = S + dSdt*dt\n\n rhos.append(rho)\n Ss.append(S)\n tot_rho.append(np.sum(rho*dx))\n tot_S.append(np.sum(S*dx))\n return rhos, Ss, tot_rho, tot_S, i, dx\n\nn = 100\ncenter = 0\nsd = 0.4\nr = 0.05\nk = 0.8\nchi = 0.05\ngamma = r/k\nlambd = 0.03\nq = 0.6\nbeta = 0.5\n\nD_b = 1e-2\nD_s = 5e-2\n#t_final = 2200\nt_c = 200\n\n# Initial condition\nS = np.zeros(n)\nrho = np.random.uniform(0.05, 0.1, n)\n\nx_max = np.arange(1, 30, 0.5)\nq_s = np.arange(0.05, 1, 0.05)\n\ntot_rho_final = np.zeros((len(x_max), len(q_s)))\nn = 100\nfinal_profile_rho = {}\nfinal_profile_S = {}\ntot_rho_profile = {}\n\nprint('\\nReady to run simulations. Varying L and q \\n')\n\n#rhos, Ss, tot_rho, tot_S, idx, dx = solve_model(rho, S, 0, 10, n, D_s, D_b, chi, r, k, lambd, t_c, 5, 0.6, beta)\n\nfor i in range(len(x_max)):\n for j in range(len(q_s)):\n dx = 0.1\n \n n = int(x_max[i]/dx)\n\n print(f\"Run {i+1} of {len(x_max)} - Size: {x_max[i]} - Total steps: {n}\", end = \"\\r\")\n\n S = np.zeros(n)\n\n dt = dx**2 / (64*D_b)\n\n # Create the space\n x = np.arange(0, x_max[i], dx)\n\n # S[50] = 5\n rho = np.random.uniform(0.05, 0.1, n)\n rhos, Ss, tot_rho, tot_S, idx, dx = solve_model(rho, S, 0, x_max[i], n, D_s, D_b, chi, r, k, lambd, t_c, x_max[i]/2, q_s[j], beta)\n rhos = np.array(rhos)\n Ss = np.array(Ss)\n if (rhos < 0).any() == False:\n tot_rho_final[i,j] = tot_rho[-1]\n final_profile_rho[f\"{x_max[i]:.2f} - {q_s[j]:.2f}\"] = rhos[-1]\n final_profile_S[f\"{x_max[i]:.2f} - {q_s[j]:.2f}\"] = Ss[-1]\n tot_rho_profile[f\"{x_max[i]:.2f} - {q_s[j]:.2f}\"] = tot_rho\n else:\n print(f\"There was an error. x_max = {x_max[i]:1f}\")\n tot_rho_final[i,j] = np.nan\n final_profile_rho[f\"{x_max[i]:.2f} - {q_s[j]:.2f}\"] = np.nan\n final_profile_S[f\"{x_max[i]:.2f} - {q_s[j]:.2f}\"] = np.nan\n tot_rho_profile[f\"{x_max[i]:.2f} - {q_s[j]:.2f}\"] = np.nan\n\nprint(\"Simulations done! \\n\")\n\ntry:\n pd.DataFrame(final_profile_rho).to_csv('final_rho_profile_varying_L-q.csv')\n pd.DataFrame(final_profile_S).to_csv('final_S_profile_varying_L-q.csv')\n pd.DataFrame(tot_rho_profile).to_csv('temporal_N_profile_varying_L-q.csv')\nexcept:\n print('Could not save files as DataFrames, saving using Pickle instead')\n with open('final_rho_profile_varying_L-q.pkl', \"wb\") as fp:\n pickle.dump(final_profile_rho, fp)\n with open('final_S_profile_varying_L-q.pkl', \"wb\") as fp:\n pickle.dump(final_profile_S, fp)\n with open('temporal_N_profile_varying_L-q.pkl', \"wb\") as fp:\n pickle.dump(tot_rho_profile, fp)\n\nnp.savetxt('final_population_varying_L-q.txt', tot_rho_final)\n\nprint('DONE! :)')\n","repo_name":"henauts/QECO","sub_path":"Codes/bacterial_model_v1_verify_L-q.py","file_name":"bacterial_model_v1_verify_L-q.py","file_ext":"py","file_size_in_byte":4984,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"28556872500","text":"from flask import Flask, request, jsonify\nimport pandas as pd\nimport requests\nimport os\nfrom os.path import exists\nimport io\nimport dropbox\nimport joblib\n\napp = Flask(__name__)\n\nmodel_path = 'model'\n\ndbx = dropbox.Dropbox('sl.BN0iICVUKpTKadJ3SPGjAgdxDe1LX0C2yILam1-C-7_yB_84NlvwPRt7GHwSK28bmnnVRzgqDLRILx3mX5B3IgaoE59C9geJoLWvZiUXReD6AazCGfbVtyiE9rbeH0Il2dzwo-E')\n\nif not os.path.isdir(model_path):\n\tos.makedirs(model_path)\n\n\ndef get_pipeline(): # Load pipeline\n\tpath = model_path+'/pipeline.joblib'\n\tif not exists(path):\n\t\tprint(path, 'does not exist')\n\t\tfilename = \"/pipeline.joblib\"\n\t\ts, r = dbx.files_download(filename)\n\t\twith open(model_path+'/pipeline.joblib', 'wb') as f:\n\t\t\tf.write(r.content)\n\tpipeline = joblib.load(model_path+'/pipeline.joblib')\n\treturn pipeline\n\n\npipeline = get_pipeline()\n\n\n@app.route('/api/predict', methods=['POST'])\ndef predict():\n\tdata = request.data\n\tio_val = io.StringIO(data.decode('utf-8'))\n\tdf = pd.read_csv(io_val, index_col=[0])\n\treturn jsonify(\n\t\t{'prediction': pipeline.predict_proba(df)[0].tolist()})\n\n\n@app.route(\"/\")\ndef home_view():\n\treturn \"

Hello World!

\"\n","repo_name":"DamienDous/scoringmodelflaskapi","sub_path":"app/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1112,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"13687079744","text":"import collections\nimport os\nimport random\n\nfrom .utils import filter_dict_by_iqr\n\n\nBASE_DIR = os.path.dirname(__file__)\n\n\nclass Trimmer:\n def __init__(self, path):\n self._datasets_path = path\n self._females = collections.defaultdict(int)\n self._males = collections.defaultdict(int)\n self._load_users_stats()\n\n def remove_outliers(self):\n \"\"\"\n Remove outlier users (users with too many or few tweets).\n \"\"\"\n print('Removing outliers')\n self._females = filter_dict_by_iqr(self._females)\n self._males = filter_dict_by_iqr(self._males)\n\n def split_datasets(self, training_set_percentage):\n \"\"\"\n Splits both datasets (females.tsv and males.tsv) into training and test datasets.\n\n Params:\n training_set_percentage (float): Percentage of tweets that should go to training dataset.\n \"\"\"\n print('Trimming datasets')\n self._split_dataset(self._females, os.path.join(self._datasets_path, 'females.tsv'), training_set_percentage)\n self._split_dataset(self._males, os.path.join(self._datasets_path, 'males.tsv'), training_set_percentage)\n os.remove(os.path.join(self._datasets_path, 'females.tsv'))\n os.remove(os.path.join(self._datasets_path, 'males.tsv'))\n\n def _load_users_stats(self):\n \"\"\"\n Load user stats from generated datasets.\n \"\"\"\n with open(os.path.join(self._datasets_path, 'females.tsv'), 'r', encoding='utf-8') as file:\n for line in file:\n self._females[line.split('\\t')[0]] += 1\n\n with open(os.path.join(self._datasets_path, 'males.tsv'), 'r', encoding='utf-8') as file:\n for line in file:\n self._males[line.split('\\t')[0]] += 1\n\n def _split_dataset(self, users, dataset, training_set_percentage):\n \"\"\"\n Split a dataset into training and test datasets.\n\n Params:\n users (dict): Valid users for the dataset with tweets count as values.\n dataset (string): Path to dataset to split.\n training_set_percentage (float): Percentage of tweets that should go to training dataset.\n \"\"\"\n total_tweets = sum(list(users.values()))\n training_indexes = set(random.sample(range(total_tweets), int(total_tweets * training_set_percentage)))\n\n valid_tweets_processed = 0\n with open(dataset, 'r', encoding='utf-8') as file,\\\n open(dataset.replace('.tsv', '-training.tsv'), 'w+', encoding='utf-8') as training_file,\\\n open(dataset.replace('.tsv', '-test.tsv'), 'w+', encoding='utf-8') as test_file:\n for line in file:\n if line.split('\\t')[0] in users:\n if valid_tweets_processed in training_indexes:\n training_file.write(line)\n else:\n test_file.write(line)\n valid_tweets_processed += 1","repo_name":"davidmogar/lexgen","sub_path":"lexgen/trimmer.py","file_name":"trimmer.py","file_ext":"py","file_size_in_byte":2961,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"42978823300","text":"# 作 者 :王建设\n# 开发时间 :2022/8/9 21:01\nimport os\nfilename = 'student.txt'\ndef main():\n while True:\n menu()\n choice = int(input('请选择'))\n if choice in range(8):\n if choice ==0:\n answer = input('您确定要退出系统吗?y/n')\n if answer=='y' or answer =='Y':\n print('谢谢您的使用!!!')\n break\n else:\n continue\n elif choice == 1:\n insert()\n elif choice == 2:\n search()\n elif choice == 3:\n delete()\n elif choice ==4:\n modify()\n elif choice ==5:\n sort()\n elif choice ==6:\n tital()\n elif choice == 7:\n show()\n\ndef menu():\n print(\"==================学生管理系统=================\")\n print('--------------------功能菜单-----------------')\n print('\\t\\t\\t\\t\\t\\t1.录入学生信息')\n print('\\t\\t\\t\\t\\t\\t2.查找学生信息')\n print('\\t\\t\\t\\t\\t\\t3.删除学生信息')\n print('\\t\\t\\t\\t\\t\\t4.修改学生信息')\n print('\\t\\t\\t\\t\\t\\t5.排序')\n print('\\t\\t\\t\\t\\t\\t6.统计学生总人数')\n print('\\t\\t\\t\\t\\t\\t7.显示所有学生信息')\n print('\\t\\t\\t\\t\\t\\t0.退出')\n print('------------------------------------------')\n\ndef insert():\n student_list=[]\n while True:\n id = input('请输入ID(如1001):')\n if not id:\n break\n name = input('请输入姓名')\n if not name:\n break\n\n try:\n english = int(input('请输入英语成绩'))\n python = int(input(\"请输入python成绩\"))\n java = int(input('请输入Java成绩'))\n except:\n print(\"输入无效,请重新输入\")\n continue\n\n #将录入的学生信息保存到字典中\n student = {'id':id,'name':name,'english':english,'python':python,'java':java}\n #将学生信息添加到列表中\n student_list.append(student)\n anwser = input('是否继续添加?y/n\\n')\n if anwser == 'y' or anwser =='Y':\n continue\n else:\n break\n\n #调用save()函数\n save(student_list)\n print('学生信息录入完毕!!!')\n\ndef save(lst):\n try:\n stu_txt = open(filename,'a',encoding ='utf-8')\n except:\n stu_txt = open(filename,'w',encoding ='utf-8')\n for item in lst:\n stu_txt.write(str(item)+'\\n')\n\ndef search():\n student_query =[]\n while True:\n id = ''\n name = ''\n if os.path.exists(filename):\n mode = input('按id查找请输入1,按姓名查找请输入2:')\n if mode == '1':\n id = input('请输入学生id')\n elif mode=='2':\n name = input('请输入学生姓名')\n else:\n print('您输入的信息有误,请重新输入')\n search()\n with open(filename,'r',encoding='utf-8') as rfile:\n student = rfile.readlines()\n for item in student:\n d = dict(eval(item))\n if id !='':\n if d['id'] == id:\n student_query.append(d)\n elif name!='':\n if d['name'] ==name:\n student_query.append(d)\n #显示查询结果\n show_student(student_query)\n #清空列表\n student_query.clear()\n anwser = input('是否继续查询?y/n\\n')\n if anwser == 'y' or anwser == 'Y':\n continue\n else:\n break\n else:\n print('暂未保存学生信息')\n return\ndef show_student(lst):\n if len(lst) == 0:\n print('没有查询到学生信息,无数据显示')\n return\n #定义标题显示格式\n format_title = '{:^6}\\t{:^12}\\t{:^8}\\t{:^10}\\t{:^10}\\t{:^8}'\n print(format_title.format('id','姓名','英语成绩','python成绩','Java成绩','总成绩'))\n #定义内容的显示格式\n format_data = '{:^6}\\t{:^12}\\t{:^8}\\t{:^10}\\t{:^10}\\t{:^8}'\n for item in lst:\n print(format_data.format(item.get('id'),\n item.get('name'),\n item.get('english'),\n item.get('python'),\n item.get('java'),\n int(item.get('english'))+int(item.get('python'))+int(item.get('java'))\n ))\n\n\ndef delete():\n while True:\n student_id = input('请输入要删除的学生id:')\n if student_id !='':\n if os.path.exists(filename):\n with open(filename,'r',encoding='utf-8') as file:\n student_old = file.readlines()\n else:\n student_old = []\n flag = False #标记是否删除\n if student_old:\n with open(filename ,'w',encoding='utf-8') as wfile:\n d={}\n for item in student_old:\n d= dict(eval(item))\n if d['id']!=student_id:\n wfile.write(str(d)+'\\n')\n else:\n flag = True\n if flag:\n print(f'id为{student_id}的学生信息已被删除')\n else:\n print(f'没有找到id为{student_id}的学生信息')\n else:\n print('无学生信息')\n break\n show() #删除之后要重新显示所有学生信息\n answer = input('是否继续删除?y/n\\n')\n if answer == 'y' or answer == 'Y':\n continue\n else:\n break\n\ndef modify():\n show()\n if os.path.exists(filename):\n with open(filename,'r',encoding='utf-8') as rfile:\n student_old = rfile.readlines()\n else:\n return\n student_id = input('请输入要修改的学生id')\n with open(filename,'w',encoding='utf8') as wfile:\n for item in student_old:\n d=dict(eval(item))\n if d['id'] == student_id:\n print(\"找到学生信息,可以修改他的相关信息了!\")\n while True:\n try:\n d['name'] = input('请输入姓名:')\n d['english'] = input('请输入英语成绩:')\n d['python'] = input('请输入python成绩:')\n d['java'] = input('请输入java成绩:')\n except:\n print('您的输入有误!!!')\n else:\n break\n wfile.write(str(d)+'\\n')\n print('修改成功!!!')\n else:\n wfile.write(str(d)+'\\n')\n answer = input('是否继续修改?y/n\\n')\n if answer == 'y' or answer == 'Y':\n modify()\n\ndef sort():\n show()\n if os.path.exists(filename):\n with open(filename,'r',encoding='utf-8') as rfile:\n student_list = rfile.readlines()\n student_new = []\n for item in student_list:\n d = dict(eval(item))\n student_new.append(d)\n else:\n return\n asc_or_desc = input('请选择(0.升序 1.降序)')\n if asc_or_desc == '0':\n asc_or_desc_bool =False\n elif asc_or_desc == '1':\n asc_or_desc_bool = True\n else:\n print('您输入的有误,请重新输入')\n sort()\n mode = input('请选择排序方法:1.按英语成绩排序;2.按python成绩排序;3.按java成绩排序 0.按总成绩排序')\n if mode == '1':\n student_new.sort(key=lambda x :int(x['english']),reverse = asc_or_desc_bool)\n elif mode == '2':\n student_new.sort(key=lambda x :int(x['python']),reverse = asc_or_desc_bool)\n elif mode =='3':\n student_new.sort(key=lambda x :int(x['java']),reverse = asc_or_desc_bool)\n elif mode == '0':\n student_new.sort(key=lambda x :int(x['english'])+int(x['python'])+int(x['java']),reverse = asc_or_desc_bool)\n else:\n print('您的输入有误,请重新输入!!!')\n sort()\n show_student(student_new)\ndef tital():\n if os.path.exists(filename):\n with open(filename,'r',encoding='utf-8') as rfile:\n students = rfile.readlines()\n if students:\n print(f'一共有{len(students)}名学生')\n else:\n print('还没有录入学生信息')\n else:\n print('暂未保存数据信息')\n\ndef show():\n student_list = []\n if os.path.exists(filename):\n with open(filename,'r',encoding='utf-8') as rfile:\n students = rfile.readlines()\n for item in students:\n student_list.append(eval(item))\n if student_list:\n show_student(student_list)\n else:\n print('暂未保存数据信息!!!')\n\nmain()\n","repo_name":"Jianjianw6/python_studentsysytem","sub_path":"studentsystem/stusystem.py","file_name":"stusystem.py","file_ext":"py","file_size_in_byte":9214,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"671641169","text":"import socket\nfrom message import Message\n\n\nclass ThrustMQProducer:\n\n def __init__(self, host=\"localhost\", port=1888):\n self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self.sock.connect((host, port))\n\n def send_message(self, message):\n self.sock.sendall(message.bucket_id.to_bytes(8, byteorder='little'))\n self.sock.sendall(message.length.to_bytes(4, byteorder='little'))\n self.sock.sendall(message.data)\n\n def send(self, messages):\n if not isinstance(messages, list):\n messages = [messages]\n\n batch_size = len(messages)\n\n self.sock.sendall(batch_size.to_bytes(4, byteorder='little'))\n\n for message in messages:\n self.send_message(message)\n\n result = self.sock.recv(message.length)\n","repo_name":"rambler-digital-solutions/thrustmq","sub_path":"clients/python/thrustmq/producer.py","file_name":"producer.py","file_ext":"py","file_size_in_byte":798,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"77"} +{"seq_id":"16608721026","text":"from adoctor_cli.base_cmd import BaseCommand\nfrom aops_utils.restful.helper import make_check_url\nfrom aops_utils.conf.constant import CHECK_GET_RESULT\nfrom aops_utils.time_utils import time_transfer\nfrom aops_utils.validate import name_check, str_split\nfrom aops_utils.cli_utils import add_page, add_access_token, add_query_args\nfrom aops_utils.cli_utils import add_start_and_end, request_without_print, pretty_json\n\n\nclass CheckCommand(BaseCommand):\n \"\"\"\n Description: start the check part\n Attributes:\n sub_parse: Subcommand parameters\n params: Command line parameters\n \"\"\"\n\n def __init__(self):\n \"\"\"\n Description: Instance initialization\n \"\"\"\n super().__init__()\n self.add_subcommand(sub_command='check',\n help_desc=\"check operations\")\n self.sub_parse.add_argument(\n '--host_list',\n nargs='?',\n type=str,\n help='host ips')\n\n self.sub_parse.add_argument(\n '--check_items',\n nargs='?',\n type=str,\n help='ckeck items')\n\n add_start_and_end(self.sub_parse)\n add_access_token(self.sub_parse)\n add_query_args(self.sub_parse, ['check_item', 'start', 'end'])\n add_page(self.sub_parse)\n\n def do_command(self, params):\n \"\"\"\n Description: Executing command\n Args:\n params: Command line parameters\n \"\"\"\n self.manage_requests_check(params)\n\n @staticmethod\n def manage_requests_check(params):\n \"\"\"\n Description: Executing check command\n Args:\n params: Command line parameters\n Returns:\n dict: body of response\n \"\"\"\n\n hosts = str_split(params.host_list)\n checks = str_split(params.check_items)\n name_check(hosts)\n name_check(checks)\n time_list = time_transfer(params.start, params.end)\n\n pyload = {\n \"time_range\": [time_list[0], time_list[1]],\n \"check_items\": checks,\n \"host_list\": hosts,\n \"page\": params.page,\n \"per_page\": params.per_page\n }\n if params.sort is not None:\n pyload['sort'] = params.sort\n pyload['direction'] = params.direction\n check_url, header = make_check_url(CHECK_GET_RESULT)\n result = request_without_print('POST', check_url, pyload, header, params.access_token)\n print(pretty_json(result))\n","repo_name":"openeuler-mirror/A-Ops","sub_path":"adoctor-cli/adoctor_cli/commands/check_cmd.py","file_name":"check_cmd.py","file_ext":"py","file_size_in_byte":2487,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"77"} +{"seq_id":"25136034471","text":"# imports\nimport torch\nfrom transformers import AutoTokenizer\nfrom trl import PPOTrainer, PPOConfig, AutoModelForCausalLMWithValueHead, create_reference_model\nfrom trl.core import respond_to_batch\n\n# get models\nmodel = AutoModelForCausalLMWithValueHead.from_pretrained('gpt2')\nmodel_ref = create_reference_model(model)\n\ntokenizer = AutoTokenizer.from_pretrained('gpt2')\ntokenizer.pad_token = tokenizer.eos_token\n\n# initialize trainer\nppo_config = PPOConfig(\n batch_size=1,\n)\n\n# encode a query\nquery_txt = \"This morning I went to the \"\nquery_tensor = tokenizer.encode(query_txt, return_tensors=\"pt\")\n\n# get model response\nresponse_tensor = respond_to_batch(model, query_tensor)\n\n# create a ppo trainer\nppo_trainer = PPOTrainer(ppo_config, model, model_ref, tokenizer)\n\n# define a reward for response\n# (this could be any reward such as human feedback or output from another model)\nreward = [torch.tensor(1.0)]\n\n# train model for one step with ppo\ntrain_stats = ppo_trainer.step([query_tensor[0]], [response_tensor[0]], reward)\n\nfrom pprint import pprint\npprint(train_stats)\n\n\n","repo_name":"corradomio/python_projects","sub_path":"check_hf_llma/check_trl.py","file_name":"check_trl.py","file_ext":"py","file_size_in_byte":1079,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"8155116266","text":"import re\nimport hashlib\nimport inspect\nimport collections\n\nfrom base64 import b64decode\n\nimport idiokit\nfrom idiokit.xmlcore import Element, Elements\n\n\ndef _replace_non_xml_chars(unicode_obj, replacement=u\"\\ufffd\"):\n return _NON_XML.sub(replacement, unicode_obj)\n\n\n_NON_XML = re.compile(u\"[\\x00-\\x08\\x0b\\x0c\\x0e-\\x1f\\ud800-\\udfff\\ufffe\\uffff]\", re.U)\n\n\ndef _normalize(value):\n \"\"\"Return the value converted to unicode. Raise a TypeError if the\n value is not a string.\n\n >>> _normalize(\"a\")\n u'a'\n >>> _normalize(u\"b\")\n u'b'\n >>> _normalize(1)\n Traceback (most recent call last):\n ...\n TypeError: expected a string value, got the value 1 of type int\n\n When converting str objects the default encoding is tried, and an\n UnicodeDecodeError is raised if the value can not bot converted.\n\n >>> _normalize(\"\\\\xe4\")\n Traceback (most recent call last):\n ...\n UnicodeDecodeError: ...\n \"\"\"\n\n if isinstance(value, basestring):\n return unicode(value)\n\n name = type(value).__name__\n module = inspect.getmodule(value)\n if module is not None and module.__name__ != \"__builtin__\":\n name = module.__name__ + \".\" + name\n msg = \"expected a string value, got the value %r of type %s\" % (value, name)\n raise TypeError(msg)\n\n\nEVENT_NS = \"abusehelper#event\"\n\n\ndef _unicode_quote(string):\n r\"\"\"\n >>> _unicode_quote(u\"a\")\n u'a'\n >>> _unicode_quote(u\"=\")\n u'\"=\"'\n >>> _unicode_quote(u\"\\n\")\n u'\"\\n\"'\n \"\"\"\n\n if _UNICODE_QUOTE_CHECK.search(string):\n return u'\"' + _UNICODE_QUOTE.sub(r'\\\\\\g<0>', string) + u'\"'\n return string\n\n\n_UNICODE_QUOTE_CHECK = re.compile(r'[\\s\"\\\\,=]', re.U)\n_UNICODE_QUOTE = re.compile(r'[\"\\\\]', re.U)\n\n\ndef _unicode_parse_part(string, start):\n match = _UNICODE_PART.match(string, start)\n quoted, unquoted = match.groups()\n end = match.end()\n\n if quoted is not None:\n return _UNICODE_UNQUOTE.sub(\"\\\\1\", quoted), end\n if unquoted is not None:\n return unquoted, end\n return u\"\", end\n\n\n_UNICODE_UNQUOTE = re.compile(r'\\\\(.)', re.U)\n_UNICODE_PART = re.compile(r'\\s*(?:(?:\"((?:\\\\.|[^\"])*)\")|([^\\s\"=,]+)|)\\s*', re.U)\n\n\nclass Event(object):\n __slots__ = [\"_attrs\"]\n\n _UNDEFINED = object()\n\n @classmethod\n def _itemize(cls, *args, **keys):\n result = dict()\n\n for obj in args + (keys,):\n if type(obj) == Event:\n for key, values in obj._attrs.iteritems():\n if key not in result:\n result[key] = values.copy()\n else:\n result[key].update(values)\n continue\n\n if hasattr(obj, \"iteritems\"):\n obj = obj.iteritems()\n elif hasattr(obj, \"items\"):\n obj = obj.items()\n\n for key, values in obj:\n if isinstance(values, basestring):\n values = (_normalize(values),)\n else:\n values = (_normalize(x) for x in values)\n\n key = _normalize(key)\n if key not in result:\n result[key] = set(values)\n else:\n result[key].update(values)\n\n return result\n\n @classmethod\n def from_unicode(cls, string):\n r\"\"\"\n >>> event = Event({\"a\": \"b\"})\n >>> Event.from_unicode(unicode(event)) == event\n True\n\n >>> event = event.union({u'=': u'\"'})\n >>> Event.from_unicode(unicode(event)) == event\n True\n\n Regression test: Check that character escaping\n doesn't mess up parsing.\n\n >>> event = Event({\n ... u\"x\": u\"\\\\\",\n ... u\"y\": u\"b\"\n ... })\n >>> Event.from_unicode(ur'x=\"\\\\\", \"y\"=b') == event\n True\n \"\"\"\n\n string = string.strip()\n if not string:\n return cls()\n\n attrs = collections.defaultdict(list)\n\n index = 0\n length = len(string)\n while True:\n key, index = _unicode_parse_part(string, index)\n if index >= length:\n raise ValueError(\"unexpected string end\")\n if string[index] != u\"=\":\n raise ValueError(\"unexpected character %r at index %d\" %\n (string[index], index))\n index += 1\n\n value, index = _unicode_parse_part(string, index)\n attrs[key].append(value)\n\n if index >= length:\n return cls(attrs)\n\n if string[index] != u\",\":\n raise ValueError(\"unexpected character %r at index %d\" %\n (string[index], index))\n index += 1\n\n @classmethod\n def from_elements(self, elements):\n \"\"\"Yield events parsed from XML element(s).\n\n >>> element = Element(\"message\")\n >>> list(Event.from_elements(element))\n []\n >>> element.add(Element(\"event\", xmlns=EVENT_NS))\n >>> list(Event.from_elements(element)) == [Event()]\n True\n\n >>> event = Event({u\"\\\\uffff\": u\"\\\\x05\"}) # include some forbidden XML chars\n >>> element = Element(\"message\")\n >>> element.add(event.to_elements())\n >>> list(Event.from_elements(element)) == [Event({u\"\\\\ufffd\": u\"\\\\ufffd\"})]\n True\n \"\"\"\n\n # Future event format\n for event_element in elements.children(\"e\", EVENT_NS):\n attrs = collections.defaultdict(list)\n for key_element in event_element.children(\"k\").with_attrs(\"a\"):\n key = b64decode(key_element.get_attr(\"a\")).decode(\"utf-8\")\n for value_element in key_element.children(\"v\").with_attrs(\"a\"):\n value = b64decode(value_element.get_attr(\"a\")).decode(\"utf-8\")\n attrs[key].append(value)\n yield Event(attrs)\n\n # Legacy event format\n for event_element in elements.children(\"event\", EVENT_NS):\n attrs = collections.defaultdict(list)\n for attr in event_element.children(\"attr\").with_attrs(\"key\", \"value\"):\n key = attr.get_attr(\"key\")\n value = attr.get_attr(\"value\")\n attrs[key].append(value)\n yield Event(attrs)\n\n def __init__(self, *args, **keys):\n \"\"\"\n Regression test: Keep the the correct internal encoding in the\n copy/merge constructor.\n\n >>> event = Event({u\"\\xe4\": u\"\\xe4\"})\n >>> Event(event).items()\n ((u'\\\\xe4', u'\\\\xe4'),)\n \"\"\"\n\n self._attrs = self._itemize(*args, **keys)\n\n def union(self, *args, **keys):\n \"\"\"Return a new event that contains all key-value pairs from\n appearing in the original event and/or Event(*args, **keys).\n\n >>> sorted(Event(a=[\"1\", \"2\"]).union(a=[\"1\", \"3\"]).items())\n [(u'a', u'1'), (u'a', u'2'), (u'a', u'3')]\n \"\"\"\n\n return type(self)(self, *args, **keys)\n\n def difference(self, *args, **keys):\n \"\"\"Return a new event that contains all key-value pairs\n from the original event except those also appearing in\n Event(*args, **keys).\n\n >>> sorted(Event(a=[\"1\", \"2\"]).difference(a=[\"1\", \"3\"]).items())\n [(u'a', u'2')]\n \"\"\"\n\n other = self._itemize(*args, **keys)\n result = dict()\n for key, values in self._attrs.iteritems():\n diff = values.difference(other.get(key, ()))\n if diff:\n result[key] = diff\n return type(self)(result)\n\n def add(self, key, value, *values):\n \"\"\"Add value(s) for a key.\n\n >>> event = Event()\n >>> event.add(\"key\", \"1\")\n >>> event.values(\"key\")\n (u'1',)\n\n More than one value can be added with one call.\n\n >>> event = Event()\n >>> event.add(\"key\", \"1\", \"2\")\n >>> sorted(event.values(\"key\"))\n [u'1', u'2']\n\n Key-value pairs is already contained by the event are ignored.\n\n >>> event = Event()\n >>> event.add(\"key\", \"1\")\n >>> event.values(\"key\")\n (u'1',)\n >>> event.add(\"key\", \"1\")\n >>> event.values(\"key\")\n (u'1',)\n \"\"\"\n\n self.update(key, (value,) + values)\n\n def update(self, key, values):\n \"\"\"Update the values of a key.\n\n >>> event = Event()\n >>> event.update(\"key\", [\"1\", \"2\"])\n >>> sorted(event.values(\"key\"))\n [u'1', u'2']\n\n The event will not be modified if there are no values to add.\n\n >>> event = Event()\n >>> event.update(\"key\", [])\n >>> event.contains(\"key\")\n False\n \"\"\"\n\n key = _normalize(key)\n if key not in self._attrs:\n self._attrs[key] = set()\n self._attrs[key].update(_normalize(value) for value in values)\n\n def discard(self, key, value, *values):\n \"\"\"Discard some value(s) of a key.\n\n >>> event = Event()\n >>> event.add(\"key\", \"1\", \"2\", \"3\")\n >>> event.discard(\"key\", \"1\", \"3\")\n >>> event.values(\"key\")\n (u'2',)\n\n Values that don't exist for the given key are silently ignored.\n\n >>> event = Event()\n >>> event.add(\"key\", \"2\")\n >>> event.discard(\"key\", \"1\", \"2\")\n >>> event.values(\"key\")\n ()\n \"\"\"\n\n key = _normalize(key)\n if key not in self._attrs:\n return\n valueset = self._attrs[key]\n valueset.difference_update(_normalize(value) for value in (value,) + values)\n if not valueset:\n del self._attrs[key]\n\n def clear(self, key):\n \"\"\"Clear all values of a key.\n\n >>> event = Event()\n >>> event.add(\"key\", \"1\")\n >>> event.clear(\"key\")\n >>> event.contains(\"key\")\n False\n\n Clearing keys that do not exist does nothing.\n\n >>> event = Event()\n >>> event.clear(\"key\")\n \"\"\"\n\n key = _normalize(key)\n self._attrs.pop(key, None)\n\n def _unkeyed(self):\n for values in self._attrs.itervalues():\n for value in values:\n yield value\n\n def _iter(self, key, parser, filter):\n if key is self._UNDEFINED:\n values = set(self._unkeyed())\n else:\n key = _normalize(key)\n values = self._attrs.get(key, ())\n\n if parser is not None:\n parsed = (parser(x) for x in values)\n\n if filter is not None:\n return (x for x in parsed if filter(x))\n else:\n return (x for x in parsed if x is not None)\n\n if filter is not None:\n return (x for x in values if filter(x))\n\n return values\n\n def pop(self, key, parser=None, filter=None):\n \"\"\"Pop value(s) of a key and clear them.\n >>> event = Event()\n >>> event.add(\"key\", \"y\", \"x\", \"1.2.3.4\")\n >>> sorted(event.pop(\"key\"))\n [u'1.2.3.4', u'x', u'y']\n >>> event.contains(\"key\")\n False\n\n Perform parsing, validation and filtering by passing in\n parsing and filtering functions. Only values that match\n are cleared from the event. Values that do not match\n are preserved.\n\n >>> def int_parse(string):\n ... try:\n ... return int(string)\n ... except ValueError:\n ... return None\n >>> event = Event()\n >>> event.add(\"key\", \"1\", \"a\")\n >>> sorted(event.pop(\"key\", parser=int_parse))\n [1]\n >>> sorted(event.values(\"key\"))\n [u'a']\n \"\"\"\n\n key = _normalize(key)\n values = tuple(self._attrs.get(key, ()))\n\n if parser is not None:\n parsed = ((parser(x), x) for x in values)\n else:\n parsed = ((x, x) for x in values)\n\n if filter is not None:\n filtered = ((x, y) for (x, y) in parsed if filter(x))\n else:\n filtered = ((x, y) for (x, y) in parsed if x is not None)\n\n results = []\n\n for x, y in filtered:\n self.discard(key, y)\n results.append(x)\n\n return tuple(results)\n\n def values(self, key=_UNDEFINED, parser=None, filter=None):\n \"\"\"Return a tuple of event values (for a specific key, if\n given).\n\n >>> event = Event(key=[\"1\", \"2\"], other=[\"3\", \"4\"])\n >>> sorted(event.values())\n [u'1', u'2', u'3', u'4']\n >>> sorted(event.values(\"key\"))\n [u'1', u'2']\n\n Perform parsing, validation and filtering by passing in\n parsing and filtering functions (by default all None objects\n are filtered when a parsing function has been given).\n\n >>> import socket\n >>> def ipv4(string):\n ... try:\n ... return socket.inet_ntoa(socket.inet_aton(string))\n ... except socket.error:\n ... return None\n >>> event = Event(key=[\"1.2.3.4\", \"abba\"], other=\"10.10.10.10\")\n >>> event.values(\"key\", parser=ipv4)\n ('1.2.3.4',)\n >>> sorted(event.values(parser=ipv4))\n ['1.2.3.4', '10.10.10.10']\n \"\"\"\n\n return tuple(self._iter(key, parser, filter))\n\n def value(self, key=_UNDEFINED, default=_UNDEFINED,\n parser=None, filter=None):\n \"\"\"Return one event value (for a specific key, if given).\n\n The value can be picked either from the values of some\n specific key or amongst event values.\n\n >>> event = Event(key=\"1\", other=\"2\")\n >>> event.value(\"key\")\n u'1'\n >>> event.value() in [u\"1\", u\"2\"]\n True\n\n A default return value can be defined in case no suitable\n value is available:\n\n >>> event = Event()\n >>> event.value(\"key\", \"default value\")\n 'default value'\n >>> event.value(default=\"default value\")\n 'default value'\n\n KeyError is raised if no suitable values are available and no\n default is given.\n\n >>> event = Event()\n >>> event.value()\n Traceback (most recent call last):\n ...\n KeyError: 'no value available'\n >>> event.value(\"somekey\")\n Traceback (most recent call last):\n ...\n KeyError: 'somekey'\n\n As with .values(...), parsing and filtering functions can be\n given, and they will be used to modify the results.\n\n >>> def int_parse(string):\n ... try:\n ... return int(string)\n ... except ValueError:\n ... return None\n >>> event = Event(key=[\"1\", \"a\"])\n >>> event.value(parser=int_parse)\n 1\n >>> event.value(\"key\", parser=int_parse)\n 1\n >>> event.value(\"other\", parser=int_parse)\n Traceback (most recent call last):\n ...\n KeyError: 'other'\n \"\"\"\n\n for value in self._iter(key, parser, filter):\n return value\n\n if default is self._UNDEFINED:\n if key is self._UNDEFINED:\n raise KeyError(\"no value available\")\n raise KeyError(key)\n return default\n\n def contains(self, key=_UNDEFINED, value=_UNDEFINED,\n parser=None, filter=None):\n \"\"\"Return whether the event contains a key-value pair (for\n specific key and/or value, if given).\n\n >>> event = Event()\n >>> event.contains() # Does the event contain any values at all?\n False\n\n >>> event = event.union(key=\"1\")\n >>> event.contains()\n True\n >>> event.contains(\"key\") # Any value for key \"key\"?\n True\n >>> event.contains(value=\"1\") # Value \"1\" for any key?\n True\n >>> event.contains(\"key\", \"1\") # Value \"1\" for key \"key\"?\n True\n >>> event.contains(\"other\", \"2\") # Value \"2\" for key \"other\"?\n False\n\n Parsing and filtering functions can be given to modify the results.\n\n >>> def int_parse(string):\n ... try:\n ... return int(string)\n ... except ValueError:\n ... return None\n >>> event.contains(parser=int_parse) # Any int value for any key?\n True\n >>> event.contains(\"key\", parser=int_parse) # Any int value for \"key\"?\n True\n >>> event.contains(value=1, parser=int_parse) # Value 1 for any key?\n True\n >>> event = event.union(other=\"x\")\n >>> event.contains(\"other\", parser=int_parse)\n False\n \"\"\"\n\n if key is self._UNDEFINED:\n values = set(self._unkeyed())\n else:\n key = _normalize(key)\n values = self._attrs.get(key, ())\n\n if parser is not None:\n parsed = (parser(x) for x in values)\n\n if filter is not None:\n filtered = (x for x in parsed if filter(x))\n else:\n filtered = (x for x in parsed if x is not None)\n elif filter is not None:\n filtered = (x for x in values if filter(x))\n else:\n filtered = values\n\n for filtered_value in filtered:\n if value is self._UNDEFINED or value == filtered_value:\n return True\n return False\n\n def items(self, parser=None, filter=None):\n \"\"\"Return a tuple of key-value pairs contained by the event.\n\n >>> event = Event()\n >>> event.items()\n ()\n >>> event = event.union(key=\"1\", other=[\"x\", \"y\"])\n >>> sorted(event.items())\n [(u'key', u'1'), (u'other', u'x'), (u'other', u'y')]\n\n Parsing and filtering functions can be given to modify the results.\n\n >>> def int_parse(string):\n ... try:\n ... return int(string)\n ... except ValueError:\n ... return None\n >>> event.items(parser=int_parse)\n ((u'key', 1),)\n\n The order of the key-value pairs is undefined.\n \"\"\"\n\n result = list()\n\n for key, values in self._attrs.iteritems():\n for value in values:\n if parser is not None:\n value = parser(value)\n if filter is not None and not filter(value):\n continue\n if filter is None and value is None:\n continue\n result.append((key, value))\n\n return tuple(result)\n\n def keys(self, parser=None, filter=None):\n \"\"\"Return a tuple of keys with at least one value.\n\n >>> event = Event()\n >>> event.keys()\n ()\n >>> event = event.union(key=\"1\", other=[\"x\", \"y\"])\n >>> sorted(event.keys())\n [u'key', u'other']\n\n Parsing and filtering functions can be given to modify the\n results.\n\n >>> def int_parse(string):\n ... try:\n ... return int(string)\n ... except ValueError:\n ... return None\n >>> sorted(event.keys(parser=int_parse))\n [u'key']\n \"\"\"\n\n return tuple(key for key in self._attrs\n if self.contains(key, parser=parser, filter=filter))\n\n def to_elements(self, include_body=True):\n element = Element(\"event\", xmlns=EVENT_NS)\n\n for key, value in self.items():\n key = _replace_non_xml_chars(key)\n value = _replace_non_xml_chars(value)\n attr = Element(\"attr\", key=key, value=value)\n element.add(attr)\n\n if not include_body:\n return element\n\n body = Element(\"body\")\n body.text = _replace_non_xml_chars(unicode(self))\n return Elements(body, element)\n\n def __reduce__(self):\n return self.__class__, (self._attrs,)\n\n def __eq__(self, other):\n if not isinstance(other, Event):\n return NotImplemented\n return other._attrs == self._attrs\n\n def __ne__(self, other):\n value = self.__eq__(other)\n if value is NotImplemented:\n return NotImplemented\n return not value\n\n def __unicode__(self):\n \"\"\"Return an unicode representation of the event.\n\n >>> unicode(Event())\n u''\n >>> unicode(Event({\"a,\": \"b\"}))\n u'\"a,\"=b'\n\n The specific order of the key-value pairs is undefined.\n \"\"\"\n\n return u\", \".join(_unicode_quote(key) + u\"=\" + _unicode_quote(value)\n for (key, value) in self.items())\n\n def __repr__(self):\n attrs = dict()\n for key, value in self.items():\n attrs.setdefault(key, list()).append(value)\n return self.__class__.__name__ + \"(\" + repr(attrs) + \")\"\n\n\ndef hexdigest(event, func=hashlib.sha1):\n \"\"\"Return a hexadecimal digest string created by from the given event's\n key-value pairs.\n\n The result is guaranteed to be the same for two events e1 and e2 when\n e1 == e2. Key-value insertion order does not affect the result.\n\n >>> e1 = Event()\n >>> e1.add(\"a\", \"b\")\n >>> e1.add(\"x\", \"y\")\n >>>\n >>> e2 = Event()\n >>> e2.add(\"x\", \"y\")\n >>> e2.add(\"a\", \"b\")\n >>>\n >>> hexdigest(e1) == hexdigest(e2)\n True\n\n The result is not guaranteed to be different for two events e1 and e2\n when e1 != e2. However such a collision is usually exceedingly unlikely\n when a good hashing algorithm is used. SHA1 is the default, but can be\n changed by passing in an algorithm implementation with a compatible\n interface. For example, algorithms defined in the standard 'hashlib'\n library are compatible.\n\n >>> import hashlib\n >>> hexdigest(Event(a=\"b\"), hashlib.md5)\n '51a8ca876645d37e29419694f6396fbc'\n\n The default hashing algorithm is NOT guaranteed to be SHA1 forever. If you\n want to guarantee that the hexdigest is always created using e.g. SHA1,\n pass the hash function explicitly as the second parameter:\n\n >>> import hashlib\n >>> hexdigest(Event(a=\"b\"), hashlib.sha1)\n 'edf6294fc1d3f9fe8be4a2d5626788bcfde05e62'\n \"\"\"\n\n result = func()\n\n for key, value in sorted(event.items()):\n result.update(key.encode(\"utf-8\"))\n result.update(\"\\xc0\")\n result.update(value.encode(\"utf-8\"))\n result.update(\"\\xc0\")\n\n return result.hexdigest()\n\n\ndef stanzas_to_events():\n return idiokit.map(Event.from_elements)\n\n\ndef events_to_elements():\n return idiokit.map(lambda x: (x.to_elements(),))\n","repo_name":"abusesa/abusehelper","sub_path":"abusehelper/core/events.py","file_name":"events.py","file_ext":"py","file_size_in_byte":22236,"program_lang":"python","lang":"en","doc_type":"code","stars":112,"dataset":"github-code","pt":"77"} +{"seq_id":"21900236162","text":"from __future__ import print_function\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\nfrom torchvision import datasets, transforms\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport TestModel\npretrained_model = \"lenet_mnist_model.pth\"\nclass Net(nn.Module):\n def __init__(self):\n super(Net, self).__init__()\n self.conv1 = nn.Conv2d(1, 10, kernel_size=5)\n self.conv2 = nn.Conv2d(10, 20, kernel_size=5)\n self.conv2_drop = nn.Dropout2d()\n self.fc1 = nn.Linear(320, 50)\n self.fc2 = nn.Linear(50, 10)\n\n def forward(self, x):\n x = F.relu(F.max_pool2d(self.conv1(x), 2))\n x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2))\n x = x.view(-1, 320)\n x = F.relu(self.fc1(x))\n x = F.dropout(x, training=self.training)\n x = self.fc2(x)\n return F.log_softmax(x, dim=1)\n\n# 初始化输入数据并赋值\ndef getInitModel():\n model = Net()\n model.load_state_dict(torch.load(pretrained_model, map_location='cpu'))\n model.eval()\n print(model)\n return model","repo_name":"tokisamu/AdversarialExampleGennerator","sub_path":"TestModel.py","file_name":"TestModel.py","file_ext":"py","file_size_in_byte":1110,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"73285180727","text":"# Classes to handle asynchronous downloads\n\nfrom threading import Thread\n\nfrom requests import exceptions, get\n\n\nclass DownloadCommand:\n def __init__(self, url, callback, *args, **kwargs):\n self.callback = callback\n self.args = args\n self.kwargs = kwargs\n self.timeout = 5\n self.timestamp = None\n self.error = None\n self.url = url\n self.response = None\n\n\nclass AsyncDownloadService:\n def execute(self, command, response_handler):\n def _callback_with_args(response, **kwargs):\n command.response = response\n response_handler(command)\n\n kwargs = {\"command\": command, \"callback\": _callback_with_args}\n\n thread = Thread(target=AsyncDownloadService.download, kwargs=kwargs)\n thread.start()\n\n @staticmethod\n def download(command, callback):\n kwargs = {\"timeout\": command.timeout, \"hooks\": {\"response\": callback}}\n\n try:\n get(command.url, **kwargs)\n except exceptions.RequestException as e:\n command.error = \"Connection error \" + str(e)\n callback(None)\n\n\nclass DownloadService:\n def execute(self, command, response_handler):\n try:\n command.response = get(command.url, timeout=command.timeout)\n response_handler(command)\n except exceptions.RequestException as e:\n command.error = \"Connection error \" + str(e)\n","repo_name":"bluppfisk/coindicator","sub_path":"src/coin/downloader.py","file_name":"downloader.py","file_ext":"py","file_size_in_byte":1422,"program_lang":"python","lang":"en","doc_type":"code","stars":152,"dataset":"github-code","pt":"77"} +{"seq_id":"39287263673","text":"import RPi.GIPO as GPIO\nimport time\nimport pyautogui as pag\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport spiUtils as su\n\nGPIO.setmode(GPIO.BCM)\n\nclass Button:\n def __init__(self, pinNum, keyToPress):\n self.pin = pinNum\n self.key = keyToPress\n GPIO.setup(pinNum, GPIO.IN, pull_up_down=GPIO.PUD_DOWN)\n GPIO.add_event_detect(pinNUM, GPIO.RISING, callback=pushButton)\n \n def pushButton(self, channel):\n pag.press(key)\n \n# setup buttons\n # A button (pin 24)\naPin = 24\naButton = 'a'\n#GPIO.setup(aPin, GPIO.IN, pull_up_down=GPIO.PUD_DOWN)\n # B button (pin 25)\nbPin = 25\nbButton = 'b'\n#GPIO.setup(bPin, GPIO.IN, pull_up_down=GPIO.PUD_DOWN)\n\na = Button(aPin, aButton)\nb = Button(bPin, bButton)\n\n# setup potentiometer\npot = su.readADC(channel=1)\n\ni=0\nfor i in range(0, 1000):\n # fits potentiometer into range (-1 <-> 1)\n plusMinusValue = (su.readADC(channel=1)/511.5)-1\n print(plusMinusValue)\n time.sleep(.1)\n i=i+1\n pag.move(200*plusMinusValue, 0)\n\n","repo_name":"benbrokaw/Capstone","sub_path":"capstone.py","file_name":"capstone.py","file_ext":"py","file_size_in_byte":1026,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"22543791872","text":"from contextlib import contextmanager\nimport os\nimport sqlite3\nfrom typing import Generator\n\nimport pytest\n\nfrom app.cache.persistent_cache import PersistentCache\nfrom app.items import Item\n\n\n@contextmanager\ndef open_cursor() -> Generator[sqlite3.Cursor, None, None]:\n connection = sqlite3.connect(\"test.db\")\n cursor = connection.cursor()\n yield cursor\n connection.commit()\n cursor.close()\n connection.close()\n\n\n@pytest.fixture(scope=\"module\", autouse=True)\ndef verify_cache_removed():\n db_name = \"test.db\"\n if os.path.exists(db_name):\n os.remove(db_name)\n yield\n if os.path.exists(db_name):\n os.remove(db_name)\n\n\n@pytest.fixture(scope=\"function\")\ndef cache():\n db_name = \"test.db\"\n cache = PersistentCache(db_name = db_name)\n yield cache\n os.remove(db_name)\n assert not os.path.exists(db_name)\n\n\ndef test__persistent_cache__add(cache: PersistentCache[Item]):\n # Act\n item = Item(\"key\", \"title\", 1)\n with cache:\n cache.add(item.item_id, item)\n \n # Assert\n with open_cursor() as cursor:\n cursor.execute(\"SELECT * FROM items\")\n actual = cursor.fetchall()\n\n assert len(actual) == 1\n assert actual[0][0] == \"key\"\n\n\ndef test__persistent_cache__count(cache: PersistentCache[Item]):\n # Arrange\n item = Item(\"key\", \"value_1\", 1)\n with cache:\n cache.add(item.item_id, item)\n item.title = \"value_2\"\n with cache:\n cache.add(item.item_id, item)\n\n # Act\n with cache:\n actual = cache.count(\"key\")\n\n # Assert\n assert actual == 2\n\n\ndef test__persistent_cache__count_empty(cache: PersistentCache[Item]):\n # Act\n with cache:\n actual = cache.count(\"key\")\n\n # Assert\n assert actual == 0\n\n\ndef test__persistent_cache__head(cache: PersistentCache[Item]):\n # Arrange\n item = Item(\"key\", \"value_1\", 1)\n with cache:\n cache.add(item.item_id, item)\n item.title = \"value_2\"\n with cache:\n cache.add(item.item_id, item)\n\n # Act\n with cache:\n actual = cache.head(\"key\")\n\n # Assert\n assert isinstance(actual, Item)\n assert actual.item_id == \"key\"\n assert actual.title == \"value_2\"\n\n\ndef test__persistent_cache__head_empty(cache: PersistentCache[Item]):\n with pytest.raises(Exception):\n with cache:\n cache.head(\"key\")\n\n\ndef test__persistent_cache__list(cache: PersistentCache[Item]):\n # Arrange\n item = Item(\"key\", \"value_1\", 1)\n with cache:\n cache.add(item.item_id, item)\n item.title = \"value_2\"\n with cache:\n cache.add(item.item_id, item)\n\n # Act\n with cache:\n actual = list(cache.list())\n\n # Assert\n assert len(actual) == 2\n assert set([item.title for item in actual]) == {\"value_1\", \"value_2\"}\n","repo_name":"python-spokane/journey-to-the-pythonic-peak","sub_path":"tests/cache/persistent_cache_test.py","file_name":"persistent_cache_test.py","file_ext":"py","file_size_in_byte":2762,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"30823622590","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat May 7 14:15:21 2016\n\n@author: pikashoes\n\"\"\"\nimport random\n\ncountries = ['China', 'South Korea', 'United States', 'Brazil', 'Argentina', 'Russia',\n 'France', 'Denmark', 'Philippines', 'Taiwan', 'Norway', 'Australia',\n 'Mexico', 'Canada', 'Germany', 'Vietnam', 'Poland', 'Hong Kong', 'Spain', 'Sweden',\n 'Netherlands', 'United Kingdom', 'Singapore', 'Turkey', 'Finland', 'Lithuania', 'Estonia',\n 'Belgium', 'Thailand', 'Armenia', 'Ukraine', 'Greece', 'Chile', 'Czech Republic', 'Japan',\n 'Hungary', 'Uruguay', 'Peru', 'Israel', 'Colombia', 'Portugal', 'Kazakhstan', 'Indoensia',\n 'Afghanistan', 'Croatia', 'South Africa', 'Costa Rica', 'Iraq', 'Slovenia', 'Serbia',\n 'Panama', 'Macao', 'Italy', 'Ecuador', 'Malta', 'Iceland', 'Luxembourg']\n\n\ndef main():\n final_file = \"\"\n teamlist = []\n with open('PlayerBITeam.txt', 'r') as file:\n for line in file:\n random_country = random.randint(0, 56)\n line = line.replace(\"\\n\", \"\")\n new_line = line.split('|')\n if new_line[1] not in teamlist:\n final_file += new_line[1] + \"|\" + countries[random_country] + \"\\n\"\n teamlist.append(new_line[1])\n \n a = open('CurrentTeam.txt', 'w')\n a.write(final_file)\n a.close()\n\nmain()","repo_name":"pikashoes/mpcs-databases","sub_path":"Python/CurrentTeam.py","file_name":"CurrentTeam.py","file_ext":"py","file_size_in_byte":1298,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"71223059130","text":"def solution(clothes):\n dic = dict()\n for cloth in clothes: #초기화\n dic[cloth[1]] = 0\n \n for cloth in clothes:\n dic[cloth[1]] += 1\n print(dic)\n \n \n \n ls = list(dic.values())\n print(ls)\n \n answer = 1\n for num in ls:\n answer *= (num+1)\n return answer-1\n\n'''\n1. (headgear의 수 + 1) 1을 더 해주는 이유는 headgear를 착용하지 않을 수도 있기 때문입니다.\n2. (eyewear의 수 + 1 ) 1을 더 해주는 이유는 eyewear를 착용하지 않을 수도 있기 때문입니다.\n3. 두 수는 각각 독립적이기 때문에 1번 2번의 수를 곱하고 - 1 (모두 안입는 경우는 존재하지 않으므로)\n'''\n","repo_name":"confettimimy/Python-for-coding-test","sub_path":"• 프로그래머스/해시/위장.py","file_name":"위장.py","file_ext":"py","file_size_in_byte":700,"program_lang":"python","lang":"ko","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"15471964677","text":"# -*- coding: UTF-8 -*-\n\n__author__ = 'Bruce Frank Wong'\n\n\nfrom typing import List, Optional, Union\nimport csv\nfrom pathlib import Path\nfrom enum import Enum\nimport datetime as dt\nfrom contextlib import closing\n\nimport pandas as pd\nfrom tqsdk import TqApi, TqAuth, TqSim\nfrom tqsdk.tools import DataDownloader\n\nfrom ...utility import CONFIGS, PACKAGE_PATH\n\n\nclass Symbol:\n def __init__(self, exchange: str, product: str, delivery: str):\n self.exchange = exchange\n self.product = product\n self.delivery = delivery\n\n\nclass Period(Enum):\n Tick = 'Tick'\n Second = 'Second'\n Minute = 'Minute'\n Hour = 'Hour'\n Day = 'Day'\n Week = 'Week'\n Month = 'Month'\n Year = 'Year'\n\n def to_second(self) -> int:\n if self.value == 'Tick':\n return 0\n elif self.value == 'Second':\n return 1\n elif self.value == 'Minute':\n return 60\n elif self.value == 'Hour':\n return 60 * 60\n elif self.value == 'Day':\n return 60 * 60 * 24\n elif self.value == 'Week':\n return 60 * 60 * 24 * 5\n elif self.value == 'Month':\n return 60 * 60 * 24 * 5 * 4\n elif self.value == 'Year':\n return 60 * 60 * 24 * 5 * 4 * 12\n\n def to_english(self) -> str:\n if self.value == 'Tick':\n return 'Tick'\n elif self.value == 'Second':\n return 'Second'\n elif self.value == 'Minute':\n return 'Minute'\n elif self.value == 'Hour':\n return 'Hour'\n elif self.value == 'Day':\n return 'Day'\n elif self.value == 'Week':\n return 'Week'\n elif self.value == 'Month':\n return 'Month'\n elif self.value == 'Year':\n return 'Year'\n\n def to_chinese(self) -> str:\n if self.value == 'Tick':\n return 'Tick'\n elif self.value == 'Second':\n return '秒'\n elif self.value == 'Minute':\n return '分钟'\n elif self.value == 'Hour':\n return '小时'\n elif self.value == 'Day':\n return '日'\n elif self.value == 'Week':\n return '周'\n elif self.value == 'Month':\n return '月'\n elif self.value == 'Year':\n return '年'\n\n def __str__(self, chinese: bool = False):\n if chinese:\n return self.to_chinese()\n else:\n return self.to_english()\n\n\nclass DownloadRequest:\n symbol: str\n start: Union[dt.datetime, dt.date]\n end: Union[dt.datetime, dt.date]\n period: Period\n\n def __init__(self,\n symbol: str,\n period: Period,\n start: Union[dt.datetime, dt.date],\n end: Optional[Union[dt.datetime, dt.date]] = None\n ):\n self.symbol = symbol\n self.period = period\n self.start = start\n if end:\n if isinstance(end, dt.date):\n self.end = end if end < dt.date.today() else dt.date.today()\n else:\n self.end = end if end < dt.datetime.now() else dt.datetime.now()\n else:\n if isinstance(start, dt.date):\n self.end = dt.date.today()\n else:\n self.end = dt.datetime.now()\n\n\ndef tq_download(download_request_list: List[DownloadRequest]):\n # TqSDK api.\n tq_api: TqApi = TqApi(\n auth=TqAuth(\n CONFIGS['TQ']['account'],\n CONFIGS['TQ']['password']\n )\n )\n\n # Download path, make sure it existed.\n download_path: Path = PACKAGE_PATH.joinpath('data_downloaded')\n if not download_path.exists():\n download_path.mkdir()\n\n # csv header.\n bar_column_list: List[str] = [\n 'open', 'high', 'low', 'close', 'volume', 'open_oi', 'close_oi'\n ]\n tick_column_list: List[str] = [\n 'last_price', 'highest', 'lowest',\n 'bid_price1', 'bid_volume1', 'ask_price1', 'ask_volume1',\n 'volume', 'amount', 'open_interest'\n ]\n\n # Do the download.\n task_name: str\n file_path: Path\n task: DataDownloader\n with closing(tq_api):\n download_request: DownloadRequest\n for download_request in download_request_list:\n task_name = download_request.symbol\n file_path = download_path.joinpath(\n f'{download_request.symbol}_{download_request.period.to_english()}.csv'\n )\n task = DataDownloader(\n tq_api,\n symbol_list=download_request.symbol,\n dur_sec=download_request.period.to_second(),\n start_dt=download_request.start,\n end_dt=download_request.end,\n csv_file_name=str(file_path)\n )\n\n while not task.is_finished():\n tq_api.wait_update()\n print(\n f'正在下载 [{task_name}] 的 {download_request.period.to_chinese()} 数据,'\n f'已完成: {task.get_progress():>7.3f}%。'\n )\n\n # 处理下载好的 csv 文件的 header, 也就是 pandas.DataFrame 的 column.\n if task.is_finished():\n df = pd.read_csv(file_path)\n if download_request.period.to_second() == Period.Tick:\n column_list = tick_column_list\n else:\n column_list = bar_column_list\n for column in column_list:\n column_x = ''.join([download_request.symbol, '.', column])\n if column_x in df.columns:\n df.rename(columns={column_x: column}, inplace=True)\n df.to_csv(file_path, index=False)\n","repo_name":"BruceFrankWong/Research","sub_path":"InvestmentResearch/collector/tq/tq.py","file_name":"tq.py","file_ext":"py","file_size_in_byte":5735,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"17464897023","text":"from ax.core.parameter import (\n ChoiceParameter,\n FixedParameter,\n ParameterType,\n RangeParameter,\n)\nfrom ax.core.parameter_constraint import (\n ComparisonOp,\n OrderConstraint,\n ParameterConstraint,\n SumConstraint,\n)\nfrom ax.utils.common.testutils import TestCase\n\n\nclass ParameterConstraintTest(TestCase):\n def setUp(self) -> None:\n self.constraint = ParameterConstraint(\n constraint_dict={\"x\": 2.0, \"y\": -3.0}, bound=6.0\n )\n self.constraint_repr = \"ParameterConstraint(2.0*x + -3.0*y <= 6.0)\"\n\n def test_Eq(self) -> None:\n constraint1 = ParameterConstraint(\n constraint_dict={\"x\": 2.0, \"y\": -3.0}, bound=6.0\n )\n constraint2 = ParameterConstraint(\n constraint_dict={\"y\": -3.0, \"x\": 2.0}, bound=6.0\n )\n self.assertEqual(constraint1, constraint2)\n\n constraint3 = ParameterConstraint(\n constraint_dict={\"x\": 2.0, \"y\": -5.0}, bound=6.0\n )\n self.assertNotEqual(constraint1, constraint3)\n\n def test_Properties(self) -> None:\n self.assertEqual(self.constraint.constraint_dict[\"x\"], 2.0)\n self.assertEqual(self.constraint.bound, 6.0)\n\n def test_Repr(self) -> None:\n self.assertEqual(str(self.constraint), self.constraint_repr)\n\n def test_Validate(self) -> None:\n parameters = {\"x\": 4, \"z\": 3}\n with self.assertRaises(ValueError):\n # pyre-fixme[6]: For 1st param expected `Dict[str, Union[float, int]]`\n # but got `Dict[str, int]`.\n self.constraint.check(parameters)\n\n # check slack constraint\n parameters = {\"x\": 4, \"y\": 1}\n # pyre-fixme[6]: For 1st param expected `Dict[str, Union[float, int]]` but\n # got `Dict[str, int]`.\n self.assertTrue(self.constraint.check(parameters))\n\n # check tight constraint (within numerical tolerance)\n parameters = {\"x\": 4, \"y\": (2 - 0.5e-8) / 3}\n self.assertTrue(self.constraint.check(parameters))\n\n # check violated constraint\n parameters = {\"x\": 4, \"y\": (2 - 0.5e-6) / 3}\n self.assertFalse(self.constraint.check(parameters))\n\n def test_Clone(self) -> None:\n constraint_clone = self.constraint.clone()\n self.assertEqual(self.constraint.bound, constraint_clone.bound)\n\n constraint_clone._bound = 7.0\n self.assertNotEqual(self.constraint.bound, constraint_clone.bound)\n\n def test_CloneWithTransformedParameters(self) -> None:\n constraint_clone = self.constraint.clone_with_transformed_parameters(\n transformed_parameters={}\n )\n self.assertEqual(self.constraint.bound, constraint_clone.bound)\n\n constraint_clone._bound = 7.0\n self.assertNotEqual(self.constraint.bound, constraint_clone.bound)\n\n def test_Sortable(self) -> None:\n constraint1 = ParameterConstraint(\n constraint_dict={\"x\": 2.0, \"y\": -3.0}, bound=1.0\n )\n constraint2 = ParameterConstraint(\n constraint_dict={\"y\": -3.0, \"x\": 2.0}, bound=6.0\n )\n self.assertTrue(constraint1 < constraint2)\n\n\nclass OrderConstraintTest(TestCase):\n def setUp(self) -> None:\n self.x = RangeParameter(\"x\", ParameterType.INT, lower=0, upper=1)\n self.y = RangeParameter(\"y\", ParameterType.INT, lower=0, upper=1)\n self.constraint = OrderConstraint(\n lower_parameter=self.x, upper_parameter=self.y\n )\n self.constraint_repr = \"OrderConstraint(x <= y)\"\n\n def test_Properties(self) -> None:\n self.assertEqual(self.constraint.lower_parameter.name, \"x\")\n self.assertEqual(self.constraint.upper_parameter.name, \"y\")\n\n def test_Repr(self) -> None:\n self.assertEqual(str(self.constraint), self.constraint_repr)\n\n def test_Validate(self) -> None:\n self.assertTrue(self.constraint.check({\"x\": 0, \"y\": 1}))\n self.assertTrue(self.constraint.check({\"x\": 1, \"y\": 1}))\n self.assertFalse(self.constraint.check({\"x\": 1, \"y\": 0}))\n\n def test_Clone(self) -> None:\n constraint_clone = self.constraint.clone()\n self.assertEqual(\n self.constraint.lower_parameter, constraint_clone.lower_parameter\n )\n\n constraint_clone._lower_parameter = self.y\n self.assertNotEqual(\n self.constraint.lower_parameter, constraint_clone.lower_parameter\n )\n\n def test_CloneWithTransformedParameters(self) -> None:\n constraint_clone = self.constraint.clone_with_transformed_parameters(\n transformed_parameters={p.name: p for p in self.constraint.parameters}\n )\n self.assertEqual(\n self.constraint.lower_parameter, constraint_clone.lower_parameter\n )\n\n constraint_clone._lower_parameter = self.y\n self.assertNotEqual(\n self.constraint.lower_parameter, constraint_clone.lower_parameter\n )\n\n def test_InvalidSetup(self) -> None:\n z = FixedParameter(\"z\", ParameterType.INT, 0)\n with self.assertRaises(ValueError):\n self.constraint = OrderConstraint(lower_parameter=self.x, upper_parameter=z)\n\n z = ChoiceParameter(\"z\", ParameterType.STRING, [\"a\", \"b\", \"c\"])\n with self.assertRaises(ValueError):\n self.constraint = OrderConstraint(lower_parameter=self.x, upper_parameter=z)\n\n\nclass SumConstraintTest(TestCase):\n def setUp(self) -> None:\n self.x = RangeParameter(\"x\", ParameterType.INT, lower=-5, upper=5)\n self.y = RangeParameter(\"y\", ParameterType.INT, lower=-5, upper=5)\n self.constraint1 = SumConstraint(\n parameters=[self.x, self.y], is_upper_bound=True, bound=5\n )\n self.constraint2 = SumConstraint(\n parameters=[self.x, self.y], is_upper_bound=False, bound=-5\n )\n\n self.constraint_repr1 = \"SumConstraint(x + y <= 5.0)\"\n self.constraint_repr2 = \"SumConstraint(x + y >= -5.0)\"\n\n def test_BadConstruct(self) -> None:\n with self.assertRaises(ValueError):\n SumConstraint(parameters=[self.x, self.x], is_upper_bound=False, bound=-5.0)\n z = ChoiceParameter(\"z\", ParameterType.STRING, [\"a\", \"b\", \"c\"])\n with self.assertRaises(ValueError):\n # pyre-fixme[16]: `SumConstraintTest` has no attribute `constraint`.\n self.constraint = SumConstraint(\n parameters=[self.x, z], is_upper_bound=False, bound=-5.0\n )\n\n def test_Properties(self) -> None:\n self.assertEqual(self.constraint1.op, ComparisonOp.LEQ)\n self.assertTrue(self.constraint1._is_upper_bound)\n\n self.assertEqual(self.constraint2.op, ComparisonOp.GEQ)\n self.assertFalse(self.constraint2._is_upper_bound)\n\n def test_Repr(self) -> None:\n self.assertEqual(str(self.constraint1), self.constraint_repr1)\n self.assertEqual(str(self.constraint2), self.constraint_repr2)\n\n def test_Validate(self) -> None:\n self.assertTrue(self.constraint1.check({\"x\": 1, \"y\": 4}))\n self.assertTrue(self.constraint1.check({\"x\": 4, \"y\": 1}))\n self.assertFalse(self.constraint1.check({\"x\": 1, \"y\": 5}))\n\n self.assertTrue(self.constraint2.check({\"x\": -4, \"y\": -1}))\n self.assertTrue(self.constraint2.check({\"x\": -1, \"y\": -4}))\n self.assertFalse(self.constraint2.check({\"x\": -5, \"y\": -1}))\n\n def test_Clone(self) -> None:\n constraint_clone = self.constraint1.clone()\n self.assertEqual(self.constraint1.bound, constraint_clone.bound)\n\n constraint_clone._bound = 7.0\n self.assertNotEqual(self.constraint1.bound, constraint_clone.bound)\n\n constraint_clone_2 = self.constraint2.clone()\n self.assertEqual(self.constraint2.bound, constraint_clone_2.bound)\n\n def test_CloneWithTransformedParameters(self) -> None:\n constraint_clone = self.constraint1.clone_with_transformed_parameters(\n transformed_parameters={p.name: p for p in self.constraint1.parameters}\n )\n self.assertEqual(self.constraint1.bound, constraint_clone.bound)\n\n constraint_clone._bound = 7.0\n self.assertNotEqual(self.constraint1.bound, constraint_clone.bound)\n","repo_name":"facebook/Ax","sub_path":"ax/core/tests/test_parameter_constraint.py","file_name":"test_parameter_constraint.py","file_ext":"py","file_size_in_byte":8161,"program_lang":"python","lang":"en","doc_type":"code","stars":2182,"dataset":"github-code","pt":"77"} +{"seq_id":"41415231281","text":"from os import mkdir, remove, rmdir, listdir\r\nfrom tkinter import Tk, Toplevel, ttk, Label, Entry, Button\r\nfrom tkinter.filedialog import asksaveasfilename\r\nfrom subprocess import run, call\r\nfrom os import startfile, getcwd\r\nfrom geopy.geocoders import Nominatim\r\nfrom typing import Union\r\n\r\nclass Snapchat:\r\n\r\n def __init__(self, parent: Union[Tk, Toplevel], long: str = None, lat: str = None, city: str = None):\r\n self.parent = parent\r\n self.long = long\r\n self.lat = lat\r\n self.city = city\r\n self.parent.iconify()\r\n try:\r\n mkdir(\"temp\")\r\n except FileExistsError:\r\n print(\"Folder already exists, possible messy exit previously\")\r\n self.snapwindow = self.genWindow()\r\n self.genWidgets()\r\n\r\n # Generates the window\r\n def genWindow(self) -> Toplevel:\r\n snapwindow = Toplevel(self.parent)\r\n snapwindow.title(\"Snapchat\")\r\n snapwindow.resizable(False, False)\r\n snapwindow.geometry(\"+%d+%d\" % (self.parent.winfo_x(), self.parent.winfo_y()))\r\n snapwindow.protocol(\"WM_DELETE_WINDOW\", lambda: self.closewin())\r\n return snapwindow\r\n\r\n # Generates the widgets to be displayed on the window\r\n def genWidgets(self) -> None:\r\n # Labels\r\n latLabel = Label(self.snapwindow, text=\"Latitude: \")\r\n longLabel = Label(self.snapwindow, text=\"Longitude: \")\r\n cityLabel = Label(self.snapwindow, text=\"City: \")\r\n radiusLabel = Label(self.snapwindow, text=\"Radius (meters): \")\r\n\r\n # Entries\r\n latEntry = Entry(self.snapwindow, width=20)\r\n longEntry = Entry(self.snapwindow, width=20)\r\n cityEntry = Entry(self.snapwindow, width=20)\r\n radiusEntry = Entry(self.snapwindow, width=10)\r\n\r\n radiusEntry.insert(0, \"10000\")\r\n\r\n # Button\r\n SearchButton = Button(self.snapwindow, text=\"Search\", command=lambda: self.verifyInput(latEntry.get(), longEntry.get(), cityEntry.get(), radiusEntry.get()))\r\n SaveButton = Button(self.snapwindow, text=\"Save\", command=lambda: self.saveFile())\r\n\r\n # Autofill\r\n if self.long:\r\n longEntry.insert(0, self.long)\r\n if self.lat:\r\n latEntry.insert(0, self.lat)\r\n if self.city:\r\n cityEntry.insert(0, self.city)\r\n\r\n # Treeview\r\n self.details = ttk.Treeview(self.snapwindow, show=\"headings\", height=\"6\")\r\n\r\n self.details['columns'] = (\"Name\", \"Type\")\r\n self.details.column(\"#0\", width=0)\r\n self.details.column(\"Name\", width=350)\r\n self.details.column(\"Type\", width=150, minwidth=80)\r\n\r\n self.details.heading(\"#0\", text=\"\")\r\n self.details.heading(\"Name\", text=\"Name\")\r\n self.details.heading(\"Type\", text=\"Type\")\r\n\r\n scroll = ttk.Scrollbar(self.snapwindow, orient=\"vertical\", command=self.details.yview)\r\n self.details.config(yscrollcommand=scroll.set)\r\n self.details.bind(\"\", self.openFile)\r\n\r\n # Grid Layout\r\n latLabel.grid(row=0, column=0, sticky=\"E\")\r\n latEntry.grid(row=0, column=1, padx=(0, 5))\r\n longLabel.grid(row=0, column=2, sticky=\"E\")\r\n longEntry.grid(row=0, column=3, padx=(0, 5))\r\n radiusLabel.grid(row=1, column=2, sticky=\"E\")\r\n radiusEntry.grid(row=1, column=3, padx=(0, 5))\r\n cityLabel.grid(row=1, column=0, sticky=\"E\")\r\n cityEntry.grid(row=1, column=1, padx=(0, 5))\r\n\r\n SearchButton.grid(row=2, column=2, columnspan=1, sticky=\"NSEW\", pady=10)\r\n SaveButton.grid(row=4, column=2, pady=(0, 10))\r\n\r\n self.details.grid(row=3, column=0, columnspan=4, padx=(5, 0), pady=5)\r\n scroll.grid(row=3, column=4, padx=(0, 5), sticky=\"NS\")\r\n\r\n # Opens the images displayed in the treeview when double clicked\r\n def openFile(self, event) -> None:\r\n filename = getcwd() + \"/temp/\" + self.details.item(self.details.focus())['values'][0]\r\n try:\r\n startfile(filename)\r\n except AttributeError:\r\n call(['open', filename])\r\n\r\n # Saves the file to a directory of their choice from the temp folder\r\n def saveFile(self) -> None:\r\n if self.details.item(self.details.focus())['values'][1] == \"Image\":\r\n file = asksaveasfilename(initialfile=self.details.item(self.details.focus())['values'][0].rsplit(\".\", 1)[0], filetypes=[(\"JPG image\", \"*.jpg\")]) + \".jpg\"\r\n else:\r\n file = asksaveasfilename(initialfile=self.details.item(self.details.focus())['values'][0].rsplit(\".\", 1)[0], filetypes=[(\"MPEG video\", \"*.mp4\")]) + \".mp4\"\r\n if len(file) > 4:\r\n with open(file, \"wb+\") as f:\r\n with open(getcwd() + \"/temp/\" + self.details.item(self.details.focus())['values'][0], \"rb\") as f2:\r\n f.write(f2.read())\r\n\r\n # Checks to see if the range is an int, and gets the lat, long of the city if chosen\r\n def verifyInput(self, lat: str, long: str, city: str, range: str) -> None:\r\n try:\r\n rangeInt = int(range)\r\n except ValueError:\r\n print(\"Error, radius isn't a number. Resorting to default!\")\r\n rangeInt = 10000\r\n if lat == \"\":\r\n geo = Nominatim(user_agent=\"CTI Toolkit\")\r\n location = geo.geocode(city)\r\n lat = location.latitude\r\n long = location.longitude\r\n\r\n coords: str = str(lat) + \",\" + str(long)\r\n\r\n self.searchSnap(coords, rangeInt)\r\n\r\n # Clears the temp folder, and then runs the snapmap_archiver python module\r\n def searchSnap(self, coords: str, range: int) -> None:\r\n # Cleanup\r\n self.details.delete(*self.details.get_children())\r\n for files in listdir(\"temp\"):\r\n remove(\"temp/\" + files)\r\n\r\n # Runs the command (Will freeze the program)\r\n run('python -m snapmap_archiver -o temp -l=\\\"{}\\\" -r {}'.format(coords, range))\r\n\r\n # Lists the output\r\n for files in listdir(\"temp\"):\r\n if \"mp4\" in files.rsplit(\".\")[1]:\r\n self.details.insert(\"\", 'end', values=(files, \"Video\"))\r\n else:\r\n self.details.insert(\"\", 'end', values=(files, \"Image\"))\r\n\r\n # Closes the window\r\n def closewin(self) -> None:\r\n for file in listdir(\"temp\"):\r\n remove(\"temp/\" + file)\r\n rmdir(\"temp\")\r\n self.parent.deiconify()\r\n self.snapwindow.destroy()\r\n","repo_name":"AlanTheBlank/CTI-Toolkit","sub_path":"snapchat.py","file_name":"snapchat.py","file_ext":"py","file_size_in_byte":6397,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"34264409656","text":"# -*- coding: utf-8 -*-\n#tfidf(21)\n\n# -*- coding: utf-8 -*-\n\nimport jieba.posseg as pseg\nimport pandas as pd\nfrom gensim import models, similarities,corpora\nimport codecs\n\n# 构建停用词表\nstop_words = './stop_words.txt'\nstopwords = codecs.open(stop_words,'r',encoding='utf8').readlines()\nstopwords = [ w.strip() for w in stopwords ]\nstop_flag = ['x', 'c', 'u','d', 'p', 't', 'uj', 'm', 'f', 'r']\ndef tokenization(title):\n result = []\n words = pseg.cut(title)\n for word, flag in words:\n if flag not in stop_flag and word not in stopwords:\n result.append(word)\n return result\n\ndef train_text():\n # 训练文本数据\n all_doc = []\n datas = pd.read_csv(\"train_data.csv\")\n titles = datas['title']\n for title in titles:\n all_doc.append(title)\n\n\n # 对目标文档进行分词\n print(\"对目标文档进行分词\")\n all_doc_list = []\n for doc in all_doc:\n doc_list = tokenization(doc)\n all_doc_list.append(doc_list)\n\n\n\n # 测试文档数据\n print(\"测试文档数据\")\n test_doc = []\n test_datas = pd.read_csv(\"test_data.csv\", encoding=\"gbk\")\n test_titles = test_datas[\"title\"]\n for title in test_titles:\n test_doc.append(title)\n\n\n\n # 测试文档进行分词\n test_doc_list = []\n for doc in test_doc:\n doc_list = tokenization(doc)\n test_doc_list.append(doc_list)\n\n # 制作语料库\n print(\"制作语料库\")\n\n dictionary = corpora.Dictionary(all_doc_list)\n dictionary.keys()\n dictionary.token2id\n corpus = [dictionary.doc2bow(doc) for doc in all_doc_list] #(0,1)(1,1)\n tfidf = models.TfidfModel(corpus)\n results = []\n for doc_test_list in test_doc_list:\n doc_test_vec = dictionary.doc2bow(doc_test_list)\n index = similarities.SparseMatrixSimilarity(tfidf[corpus], num_features=len(dictionary.keys()))\n sim = index[tfidf[doc_test_vec]]\n similiar_sorted = sorted(enumerate(sim), key=lambda item: -item[1])[:21]\n print(similiar_sorted)\n indexs = [str(item[0]+1) for item in similiar_sorted]\n print(indexs)\n results.append(\" \".join(indexs))\n\n with open(\"results.txt\", \"w\") as f:\n for item in results:\n item = item.strip().split()\n for i in range(0, 21):\n f.write(item[0] + \"\\t\" + item[i] + \"\\n\")\n\nif __name__ == \"__main__\":\n train_text()\n with open(\"results.txt\", \"r\") as f, open(\"submisson2.txt\", \"w\") as wf:\n wf.write(\"source_id\" + \"\\t\" + \"target_id\" + \"\\n\")\n datas = f.readlines()\n for data in datas:\n data = data.strip().split(\"\\t\")\n wf.write(data[0] + \"\\t\" + data[1] + \"\\n\")\n\n\n\n","repo_name":"WangYanZ/NewsRecommendBaseline","sub_path":"baseline_2.py","file_name":"baseline_2.py","file_ext":"py","file_size_in_byte":2688,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"18775026506","text":"def fib():\n count = int(input(\"Enter the number fib you want to print out : \"))\n print(\"You have enter: \", count)\n i = 1\n if count == 0:\n fib = [0]\n elif count == 1:\n fib = [1]\n elif count == 2:\n fib = [1,1]\n elif count > 2:\n fib = [1,1]\n while i < count - 1:\n\n fib.append((fib[i] + fib[i-1]))\n i += 1\n return fib\nprint(\"your fibonacci number is:\", fib())","repo_name":"Peuapeu2019/Selftought","sub_path":"Fibonacci.py","file_name":"Fibonacci.py","file_ext":"py","file_size_in_byte":436,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"39112841452","text":"from tracks.models import Track\nfrom django.shortcuts import get_object_or_404\nfrom django.core.exceptions import PermissionDenied\n\nclass TrackMixin(object):\n '''\n Load a track and check user\n can access it\n '''\n model = Track\n context_object_name = 'track'\n\n def get_object(self, check_ownership=False):\n # Load requested track\n self.track = get_object_or_404(Track, pk=self.kwargs['track_id'])\n\n # Check right access to tracks\n track_user = self.track.session.day.week.user\n if check_ownership and track_user != self.request.user:\n raise PermissionDenied\n if 'tracks' not in track_user.get_privacy_rights(self.request.user):\n raise PermissionDenied\n\n return self.track\n","repo_name":"La0/runreport","sub_path":"src/tracks/views/mixins.py","file_name":"mixins.py","file_ext":"py","file_size_in_byte":709,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"77"} +{"seq_id":"12236653327","text":"#!/usr/bin/python\n###\n# 11B.\n# [chrosta@toolbox:~/Github/advent-of-code@chrosta/advent-of-code/2022]$ cat ./data/11.text | ./11B.py\n###\nimport re, ast, sys\nfrom copy import deepcopy as dc\nfrom itertools import groupby\nfrom functools import reduce\n\n###\n# ???\n###\nALL_DIVISORS_PRODUCT = 1\n\nclass Monkey:\n def __init__(self, number, items, oper, test):\n global ALL_DIVISORS_PRODUCT\n self.__number = number\n self.__items = items\n self.__oper = oper\n self.__test = test \n self.__monkeys = []\n self.__count = 0\n ALL_DIVISORS_PRODUCT *= self.__test\n\n def __repr__(self):\n return str([self.__number, self.__items, self.__test, self.__monkeys[0].number(), self.__monkeys[1].number(), self.__count])\n\n def __str__(self):\n return self.__repr__()\n \n def count(self):\n return self.__count\n\n def number(self):\n return self.__number\n\n def turn(self):\n while len(self.__items) > 0:\n self.__count += 1\n old = self.__items.pop(0)\n new = eval(self.__oper.split(\" = \")[1])\n #--\n new = new % ALL_DIVISORS_PRODUCT\n #--\n if new % self.__test == 0:\n self.__monkeys[0].append_throwed_item(new)\n else:\n self.__monkeys[1].append_throwed_item(new)\n \n def append_throwed_item(self, i):\n self.__items.append(i)\n\n def bind_to_monkeys(self, m):\n self.__monkeys = m\n\ndata = {}\nlines = [l.strip().split(':') for l in sys.stdin.readlines()]\nfor l in lines:\n if \"Monkey\" in l[0]:\n number = int(l[0].split(' ')[1])\n continue\n if \"Starting items\" in l[0]:\n items = [int(i.strip()) for i in l[1].split(\", \")]\n continue\n if \"Operation\" in l[0]:\n oper = l[1].strip()\n continue\n if \"Test\" in l[0]:\n test = int(l[1].split(' ')[3])\n continue\n if \"If true\" in l[0]:\n t = int(l[1].split(' ')[4])\n continue\n if \"If false\" in l[0]:\n f = int(l[1].split(' ')[4])\n continue\n if len(l[0]) == 0:\n data[number] = [items, oper, test, t, f]\n\n[print(\"I:\", k, d[0]) for k, d in data.items()]\nprint(\"---[\", 0, \"]---\")\nmonkeys = [Monkey(k, d[0], d[1], d[2]) for k, d in data.items()]\nfor m in monkeys:\n b = [n for n in data[m.number()][3:5]]\n m.bind_to_monkeys([monkeys[b[0]], monkeys[b[1]]])\n print(\"A:\", m)\n\nfor r in range(0, 10000):\n print(\"---[\", r + 1, \"]---\")\n for m in monkeys:\n print(\"A:\", m)\n m.turn()\n print(\"B:\", m)\n\nprint(\"---[ R ]---\")\nr = [m.count() for m in monkeys]\nr.sort()\nr = reduce(lambda x, y: x*y, r[-2:])\nprint(r)\n###\n# 25712998901\n###\n","repo_name":"chrosta/advent-of-code","sub_path":"2022/11B.py","file_name":"11B.py","file_ext":"py","file_size_in_byte":2700,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"72227828730","text":"#!/usr/bin/python\n# -*- encoding: utf-8 -*-\n\nimport sys\nsys.path.insert(0, '.')\nimport os\nimport os.path as osp\nimport random\nimport logging\nimport time\nimport argparse\nimport numpy as np\nfrom tabulate import tabulate\n\nimport torch\nimport torch.nn as nn\nimport torch.distributed as dist\nfrom torch.utils.data import DataLoader\n\nfrom networks import model_factory\nfrom configs import cfg_factory\nfrom dataload.rexroth_cv2 import get_data_loader\nfrom evaluate.evaluate import evaluate\nfrom ohem_ce_loss import OhemCELoss\nfrom lr_scheduler import WarmupPolyLrScheduler\nfrom utils.meters import TimeMeter, AvgMeter\nfrom utils.logger import setup_logger, print_log_msg, print_log_msg_withoutaux\nfrom torch.utils.tensorboard import SummaryWriter\n\n# apex\nhas_apex = True\ntry:\n from apex import amp, parallel\nexcept ImportError:\n has_apex = False\n\n\n## fix all random seeds\ntorch.manual_seed(123)\ntorch.cuda.manual_seed(123)\nnp.random.seed(123)\nrandom.seed(123)\ntorch.backends.cudnn.deterministic = True\n# torch.backends.cudnn.benchmark = True\n# torch.multiprocessing.set_sharing_strategy('file_system')\n\n\ndef parse_args():\n parse = argparse.ArgumentParser()\n parse.add_argument('--local_rank', dest='local_rank', type=int, default=-1,)\n parse.add_argument('--port', dest='port', type=int, default=44554,)\n parse.add_argument('--model', dest='model', type=str, default='bisenetv1')\n parse.add_argument('--finetune-from', type=str, default=None,)\n return parse.parse_args()\n\nargs = parse_args()\ncfg = cfg_factory[args.model]\nwriter = SummaryWriter(log_dir=cfg.logpath)\n\ndef set_model():\n net = model_factory[cfg.model_type](n_classes=cfg.categories, aux_output=cfg.aux_output, export=False)\n if not args.finetune_from is None:\n net.load_state_dict(torch.load(args.finetune_from, map_location='cpu'))\n if cfg.use_sync_bn: net = set_syncbn(net)\n net.cuda()\n net.train()\n criteria_pre = OhemCELoss(0.7)\n if cfg.aux_output:\n criteria_aux = [OhemCELoss(0.7) for _ in range(cfg.num_aux_heads)]\n return net, criteria_pre, criteria_aux\n else:\n return net, criteria_pre\n\ndef set_syncbn(net):\n if has_apex:\n net = parallel.convert_syncbn_model(net)\n else:\n net = nn.SyncBatchNorm.convert_sync_batchnorm(net)\n return net\n\n\ndef set_optimizer(model):\n if hasattr(model, 'get_params'):\n wd_params, nowd_params, lr_mul_wd_params, lr_mul_nowd_params = model.get_params()\n params_list = [\n {'params': wd_params, },\n {'params': nowd_params, 'weight_decay': 0},\n {'params': lr_mul_wd_params, 'lr': cfg.lr_start * cfg.lr_multiplier},\n {'params': lr_mul_nowd_params, 'weight_decay': 0, 'lr': cfg.lr_start * cfg.lr_multiplier},\n ]\n else:\n wd_params, non_wd_params = [], []\n for name, param in model.named_parameters():\n if param.dim() == 1:\n non_wd_params.append(param)\n elif param.dim() == 2 or param.dim() == 4:\n wd_params.append(param)\n params_list = [\n {'params': wd_params, },\n {'params': non_wd_params, 'weight_decay': 0},\n ]\n optim = torch.optim.SGD(\n params_list,\n lr=cfg.lr_start,\n momentum=0.9,\n weight_decay=cfg.weight_decay,\n )\n return optim\n\n\ndef set_model_dist(net):\n if has_apex:\n net = parallel.DistributedDataParallel(net, delay_allreduce=True, find_unused_parameters=True)\n else:\n local_rank = dist.get_rank()\n net = nn.parallel.DistributedDataParallel(\n net,\n device_ids=[local_rank, ],\n output_device=local_rank, find_unused_parameters=True)\n return net\n\n\ndef set_meters():\n time_meter = TimeMeter(cfg.max_iter)\n loss_meter = AvgMeter('loss')\n loss_pre_meter = AvgMeter('loss_prem')\n if cfg.aux_output:\n loss_aux_meters = [AvgMeter('loss_aux{}'.format(i))\n for i in range(cfg.num_aux_heads)]\n return time_meter, loss_meter, loss_pre_meter, loss_aux_meters\n else:\n return time_meter, loss_meter, loss_pre_meter\n\n\ndef train():\n logger = logging.getLogger()\n is_dist = dist.is_initialized()\n\n ## dataset\n dl_train = get_data_loader(\n cfg.im_root, cfg.train_im_anns,\n cfg.ims_per_gpu, cfg.scales, cfg.cropsize,\n cfg.max_iter, mode='train', distributed=is_dist, n_cats=cfg.categories)\n\n ## model\n if cfg.aux_output:\n net, criteria_pre, criteria_aux = set_model()\n else:\n net, criteria_pre = set_model()\n\n ## optimizer\n optim = set_optimizer(net)\n\n ## fp16\n if has_apex:\n opt_level = 'O1' if cfg.use_fp16 else 'O0'\n net, optim = amp.initialize(net, optim, opt_level=opt_level)\n\n ## ddp training\n net = set_model_dist(net)\n\n ## meters\n if cfg.aux_output:\n time_meter, loss_meter, loss_pre_meter, loss_aux_meters = set_meters()\n else:\n time_meter, loss_meter, loss_pre_meter = set_meters()\n ## lr scheduler\n lr_schdr = WarmupPolyLrScheduler(optim, power=0.9,\n max_iter=cfg.max_iter, warmup_iter=cfg.warmup_iters,\n warmup_ratio=0.1, warmup='exp', last_epoch=-1,)\n\n ## train loop\n iteration = 0\n for it, (im, lb) in enumerate(dl_train):\n im = im.cuda()\n lb = lb.cuda()\n\n lb = torch.squeeze(lb, 1)\n\n optim.zero_grad()\n if cfg.aux_output:\n logits, *logits_aux = net(im)\n else:\n logits = net(im)\n loss_pre = criteria_pre(logits, lb)\n loss = loss_pre\n if cfg.aux_output:\n loss_aux = [crit(lgt, lb) for crit, lgt in zip(criteria_aux, logits_aux)]\n loss = loss_pre + sum(loss_aux)\n if has_apex:\n with amp.scale_loss(loss, optim) as scaled_loss:\n scaled_loss.backward()\n else:\n loss.backward()\n optim.step()\n torch.cuda.synchronize()\n lr_schdr.step()\n\n writer.add_scalar(\"loss\", loss_pre, it)\n\n time_meter.update()\n loss_meter.update(loss.item())\n iteration = it\n if cfg.aux_output:\n loss_pre_meter.update(loss_pre.item())\n _ = [mter.update(lss.item()) for mter, lss in zip(loss_aux_meters, loss_aux)]\n\n\n ## print training log message\n if (it + 1) % 100 == 0:\n lr = lr_schdr.get_lr()\n lr = sum(lr) / len(lr)\n writer.add_scalar(\"learning_rate\", lr, it)\n if cfg.aux_output:\n print_log_msg(\n it, cfg.max_iter, lr, time_meter, loss_meter,\n loss_pre_meter, loss_aux_meters)\n else:\n print_log_msg_withoutaux(\n it, cfg.max_iter, lr, time_meter, loss_meter)\n\n ## Save model every 1000 iterations\n if (it + 1) % 1000 == 0:\n save_pth = osp.join(cfg.respth, cfg.save_name)\n logger.info('\\nsave models to {}'.format(save_pth+str(it+1)))\n state = net.module.state_dict()\n if dist.get_rank() == 0: torch.save(state, save_pth+str(it+1))\n writer.add_scalar(\"train_loss_1000\", loss_pre, it)\n\n count = (iteration+1)//1000\n logger.info('\\nevaluating the models')\n classes = [\"Background\", \"Monorail\", \"Person\", \"Forklift\"]\n\n for i in range(count):\n save_pth = osp.join(cfg.respth, cfg.save_name)\n iteration = (i + 1) * 1000\n logger.info('\\n Iteration number:'+str(iteration))\n torch.cuda.empty_cache()\n ious_ss_eval, ious_mssc_eval, ious_mcf_eval, ious_msfc_eval, ious_ss_test, ious_mssc_test, ious_mcf_test, ious_msfc_test = evaluate(cfg, save_pth+str(iteration))\n\n\n for j in range(cfg.categories):\n writer.add_scalar(\"ss_class_iou_eval \"+ classes[j], ious_ss_eval.tolist()[j], iteration)\n writer.add_scalar(\"mssc_class_iou_eval \"+ classes[j], ious_mssc_eval.tolist()[j], iteration)\n writer.add_scalar(\"mcf_class_iou_eval \" + classes[j], ious_mcf_eval.tolist()[j], iteration)\n writer.add_scalar(\"msfc_class_iou_eval \" + classes[j], ious_msfc_eval.tolist()[j], iteration)\n writer.add_scalar(\"ss_class_iou_test \" + classes[j], ious_ss_test.tolist()[j], iteration)\n writer.add_scalar(\"mssc_class_iou_test \" + classes[j], ious_mssc_test.tolist()[j], iteration)\n writer.add_scalar(\"mcf_class_iou_test \" + classes[j], ious_mcf_test.tolist()[j], iteration)\n writer.add_scalar(\"msfc_class_iou_test \" + classes[j], ious_msfc_test.tolist()[j], iteration)\n\n return\n\n\ndef main():\n torch.cuda.set_device(args.local_rank)\n dist.init_process_group(\n backend='nccl',\n init_method='tcp://127.0.0.1:{}'.format(args.port),\n world_size=torch.cuda.device_count(),\n rank=args.local_rank\n )\n if not osp.exists(cfg.respth): os.makedirs(cfg.respth)\n setup_logger('{}-train'.format(cfg.model_type), cfg.respth)\n train()\n writer.flush()\n writer.close()\n\n\nif __name__ == \"__main__\":\n main()","repo_name":"Eashwar93/Realtime-SemanticSegmentation","sub_path":"train/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":9053,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"27050515613","text":"\n\nwith open('KakaoTalk_friend.txt', 'r', encoding='utf-8') as input_file:\n out=open('kakaotalk.txt','w')\n while(1):\n s=input_file.readline()\n if not s:\n break\n s=s.split(']')\n if(len(s)<3):\n continue\n out.write(s[2])\n out.close()\n\n","repo_name":"MubaBot/muba-chatbot","sub_path":"chatbot_api/Markov_response/kakaotalk_extract.py","file_name":"kakaotalk_extract.py","file_ext":"py","file_size_in_byte":297,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"36529413798","text":"import datetime\nfrom functools import cmp_to_key\nfrom datetime import datetime as dt\nfrom AppConfig import AppConfig\n\nif not AppConfig().isTest():\n from BirdNETLite import analyzeAudioData, prepareAudioSignal, parseArgs\nelse:\n from BirdNETLiteMOCK import analyzeAudioData, prepareAudioSignal, parseArgs\n\ndef getWeek():\n dtn = dt.now()\n return datetime.date(dtn.year, dtn.month, dtn.day).isocalendar().week\n\ndef getTimestamp():\n return dt.now().strftime(\"%Y-%m-%d %H:%M:%S\")\n\ndef getDate():\n return getTimestamp().split(\" \")[0]\n\ndef getTime():\n return getTimestamp().split(\" \")[1]\n\ndef getNewArgMap(lat, lon, week=None):\n argMap = {\"lat\": 0, \"lon\": 0, \"week\": getWeek(), \"overlap\": 0.0}\n if (lat != None):\n argMap[\"lat\"] = lat\n if (lon != None):\n argMap[\"lon\"] = lon\n if (week != None):\n argMap[\"week\"] = week\n return argMap\n\ndef parseArgMap(argMap):\n \"\"\"\n input args as a map {\"lat\": 1, \"lon\": 2, \"week\": 3, \"overlap\": 0}\n \"\"\"\n argv = []\n for a in argMap:\n argv.append(\"--\" + a)\n argv.append(str(argMap[a]))\n return argv\n\n\ndef detectSpecies(sig=None, rate=None, argMap=None, interpreter=None, callbackProgress=None):\n\n parsedArgMap = parseArgMap(argMap)\n args = parseArgs(parsedArgMap)\n if (interpreter == None):\n assert interpreter != None\n\n audioData = prepareAudioSignal(sig, rate, args.overlap)\n week = max(1, min(args.week, 48))\n sensitivity = max(0.5, min(1.0 - (args.sensitivity - 1.0), 1.5))\n detections = analyzeAudioData(audioData, args.lat, args.lon, week, sensitivity, args.overlap, interpreter, callbackProgress)\n\n return {\"detections\": detections, \"timestamp\": getTimestamp(), \"lat\": args.lat, \"lon\": args.lon}\n\n\ndef filterDetections(detections_result, p_limit=0.1, lang=\"en\"):\n count_total = 0\n count = 0\n detections = detections_result[\"detections\"]\n filtered_list = []\n for d in detections:\n detection = detections[d]\n for item in detection:\n count_total += 1\n names, p = item\n if (p > p_limit):\n count += 1\n values = names.split(\"_\")\n filtered_list.append([d, p, values[1], values[0]])\n return {\"count_total\": count_total, \n \"p_limit\": p_limit, \n \"count\": count, \n \"filtered_list\": filtered_list, \n \"timestamp\": detections_result[\"timestamp\"],\n \"lat\": detections_result[\"lat\"],\n \"lon\": detections_result[\"lon\"]\n }\n\ndef compare_sum(a,b):\n return a[0]-b[0]\n\ndef rankResult(filtered_result, desc=True):\n filtered_detections = filtered_result[\"filtered_list\"]\n sci_ix = 3\n sci_names = set(map(lambda x: x[sci_ix], filtered_detections))\n rank_list = []\n for sci_name in sci_names:\n detections_species = list(filter(lambda x: x[sci_ix] == sci_name, filtered_detections))\n p = []\n for d in detections_species:\n p.append(d[1])\n rank_list.append([sum(p), p, detections_species[0][2], detections_species[0][3]])\n rank_list.sort(key=cmp_to_key(compare_sum), reverse=desc)\n return {\"ranked_list\": rank_list,\n \"timestamp\": filtered_result[\"timestamp\"], \n \"lat\": filtered_result[\"lat\"], \n \"lon\": filtered_result[\"lon\"]}\n","repo_name":"gorlang/BirdifyApp","sub_path":"src/BirdifyAPI.py","file_name":"BirdifyAPI.py","file_ext":"py","file_size_in_byte":3328,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"1073632152","text":"import ops\nimport re\n\n__atom_pattern = re.compile(r'@'\n r'|'\n r'\\$[A-Za-z0-9_]+'\n r'|'\n r'\\d+[`°]\\d{0,2}\\'?\\d{0,2}\"?\\d*'\n r'|'\n r'\\d+\\.?\\d*'\n r'|'\n r'π'\n r'|'\n r'_?[+\\-*/%!^()\\[\\]{},;~\\:?<>.√×]'\n r'|'\n r'[A-Za-z]+'\n r'|'\n r'\\s+'\n )\n\nvar_format = re.compile(r'@|\\$[A-Za-z0-9]$')\n\nans = '@'\n\n\ndef __try_get(key, *funcs):\n for f in funcs:\n try:\n return f(key)\n except:\n pass\n raise KeyError\n\n\ndef translate(formula_str: str, oplist: ops.OpList)-> list:\n def _get_var(s):\n if re.match(var_format, s) is not None:\n return oplist[s]\n else:\n raise KeyError\n\n origin_list = re.findall(__atom_pattern, formula_str)\n formula = [oplist.head]\n for piece in origin_list:\n if re.match(r'\\s+', piece) is not None:\n pass\n else:\n if oplist.is_number(formula[-1]) or oplist.is_right_bracket(formula[-1]):\n try:\n t = __try_get(piece, oplist.get_right_bracket, oplist.get_binary)\n formula.append(t)\n except KeyError:\n try:\n t = __try_get(piece, oplist.get_left_bracket, oplist.get_const,\n oplist.string_to_real, oplist.get_unary, _get_var)\n formula.append(oplist.connector)\n formula.append(t)\n except KeyError:\n t = oplist.get_unary(oplist.postpos_unary_dict[piece])\n count = 0\n for i in range(len(formula) - 1, -1, -1):\n if oplist.is_right_bracket(formula[i]):\n count += 1\n elif oplist.is_left_bracket(formula[i]):\n count -= 1\n if count == 0:\n formula.insert(i, t)\n break\n else:\n formula.append(__try_get(piece, oplist.get_unary, oplist.get_const, oplist.string_to_real,\n oplist.get_left_bracket, oplist.get_right_bracket, _get_var))\n return formula\n\n","repo_name":"StarvinCulex/scicalc","sub_path":"formtrans.py","file_name":"formtrans.py","file_ext":"py","file_size_in_byte":2623,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"24680924249","text":"'''\nThere are cases when different processes needs to access the same resource (properly instantiated).\nRace conditions can occur when two or more processes access a shared piece of data or resource simoultaneously.\n'''\n\nimport time\nfrom multiprocessing import Process, Value\n\nr = 1000000\nbalance = Value('f', 200.00) # instance of shared resource (f:float)\n\n# process 1\ndef deposit(balance):\n for i in range(r):\n balance.value += 1.00 # critical section\n\n# process 2\ndef withdraw(balance):\n for i in range(r):\n balance.value -= 1.00 # critical section\n\n\nif __name__ == '__main__':\n\n # define two processes accesing the shared resource\n d = Process(target=deposit, args=(balance,)) \n w = Process(target=withdraw, args=(balance,))\n\n d.start()\n w.start()\n d.join()\n w.join()\n\n print(balance.value)\n # expected output 200 instead is random at each execution","repo_name":"rpalloni/concurrency","sub_path":"9_race_conditions_process.py","file_name":"9_race_conditions_process.py","file_ext":"py","file_size_in_byte":901,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"26835762083","text":"import base64\nfrom ipywidgets import CallbackDispatcher, Checkbox, CoreWidget, DOMWidget, register, Tab, VBox\nfrom traitlets import Bool, Bytes, Float, Int, List, TraitError, Unicode, validate\n\n\n@register\nclass NlLink(DOMWidget):\n \"\"\"A widget to display links.\n\n\n Parameters\n ----------\n value: str\n text to display for the link.\n href: str\n URL of the link.\n \"\"\"\n\n _view_name = Unicode(\"LinkView\").tag(sync=True)\n _model_name = Unicode('LinkModel').tag(sync=True)\n _view_module = Unicode(\"neurolang-ipywidgets\").tag(sync=True)\n _model_module = Unicode('neurolang-ipywidgets').tag(sync=True)\n\n _view_module_version = Unicode(\"0.1.0\").tag(sync=True)\n _model_module_version = Unicode('^0.1.0').tag(sync=True)\n\n # Widget specific properties\n # value to appear as link text\n value = Unicode().tag(sync=True)\n # url of the link\n href = Unicode().tag(sync=True)\n\n # TODO check href to be a link\n\n\n@register\nclass NlProgress(DOMWidget):\n \"\"\"A widget to display progress as a horizontal bar.\n\n Parameters\n ----------\n value: float\n amount of progress.\n max: int\n maximum possible value.\n \"\"\"\n\n _view_name = Unicode(\"ProgressView\").tag(sync=True)\n _model_name = Unicode('ProgressModel').tag(sync=True)\n _view_module = Unicode(\"neurolang-ipywidgets\").tag(sync=True)\n _model_module = Unicode('neurolang-ipywidgets').tag(sync=True)\n\n _view_module_version = Unicode(\"0.1.0\").tag(sync=True)\n _model_module_version = Unicode('^0.1.0').tag(sync=True)\n\n # Widget specific properties\n # actual value\n value = Float().tag(sync=True)\n # maximum value\n max = Int().tag(sync=True)\n\n @validate('value')\n def _valid_value(self, proposal):\n if proposal['value'] < 0:\n raise TraitError('Value should be greater than 0.')\n if proposal['value'] > self.max:\n raise TraitError(f\"Value should be less then max value {self.max}\")\n return proposal['value']\n\n\n@register\nclass NlCheckbox(Checkbox):\n \"\"\"A Checkbox widget that changes opacity when disabled.\n\n Parameters\n ----------\n opacity: float\n opacity value for the checkbox when disabled.\n \"\"\"\n\n _view_name = Unicode('NCheckboxView').tag(sync=True)\n _model_name = Unicode('NCheckboxModel').tag(sync=True)\n _view_module = Unicode(\"neurolang-ipywidgets\").tag(sync=True)\n _model_module = Unicode('neurolang-ipywidgets').tag(sync=True)\n\n _view_module_version = Unicode(\"0.1.0\").tag(sync=True)\n _model_module_version = Unicode('^0.1.0').tag(sync=True)\n\n # Widget specific properties\n # opacity value to be used when disabled.\n opacity = Float(0.45).tag(sync=True)\n bg_color = Unicode('white').tag(sync=True)\n\n # TODO check opacity value.\n\n\n@register\nclass NlIconTab(Tab):\n _view_name = Unicode('IconTabView').tag(sync=True)\n _model_name = Unicode('IconTabModel').tag(sync=True)\n _view_module = Unicode(\"neurolang-ipywidgets\").tag(sync=True)\n _model_module = Unicode('neurolang-ipywidgets').tag(sync=True)\n\n _view_module_version = Unicode(\"0.1.0\").tag(sync=True)\n _model_module_version = Unicode('^0.1.0').tag(sync=True)\n\n # Widget specific properties\n title_icons = List().tag(sync=True)\n\n def reset(self):\n self.unobserve_all()\n self.selected_index = None\n self.title_icons = []\n self._titles = {}\n self.children = []\n\n\n@register\nclass NlVBoxOverlay(VBox):\n \"\"\"A VBox widget that is viewed as overlay.\n\n Parameters\n ----------\n \"\"\"\n\n _view_name = Unicode('VBoxOverlayView').tag(sync=True)\n _model_name = Unicode('VBoxOverlayModel').tag(sync=True)\n _view_module = Unicode(\"neurolang-ipywidgets\").tag(sync=True)\n _model_module = Unicode('neurolang-ipywidgets').tag(sync=True)\n\n _view_module_version = Unicode(\"0.1.0\").tag(sync=True)\n _model_module_version = Unicode('^0.1.0').tag(sync=True)\n\n\ndef content_to_json(pydt, manager):\n \"\"\"Serialize file content to json.\n \"\"\"\n if pydt is None:\n return None\n else:\n b64 = base64.encodebytes(pydt).decode()\n return b64\n\n\ncontent_serialization = {\n 'to_json': content_to_json\n}\n\n\n@register\nclass NlDownloadLink(DOMWidget, CoreWidget):\n \"\"\"A widget to download content as file with filename.\n\n\n Parameters\n ----------\n content: str\n content of the file as bytes\n filename: str\n file name\n mimetype: str\n text/csv by default\n description: str\n description for link\n tooltip: str\n tooltip to display when link hovered\n disabled: bool\n boolean value to indicate if the link is disabled\n \"\"\"\n\n _view_name = Unicode(\"DownloadLinkView\").tag(sync=True)\n _model_name = Unicode('DownloadLinkModel').tag(sync=True)\n _view_module = Unicode(\"neurolang-ipywidgets\").tag(sync=True)\n _model_module = Unicode('neurolang-ipywidgets').tag(sync=True)\n\n _view_module_version = Unicode(\"0.1.0\").tag(sync=True)\n _model_module_version = Unicode('^0.1.0').tag(sync=True)\n\n # Widget specific properties\n content = Bytes().tag(sync=True, **content_serialization)\n mimetype = Unicode(\"text/csv\").tag(sync=True)\n filename = Unicode().tag(sync=True)\n description = Unicode().tag(sync=True)\n tooltip = Unicode(\"Download\").tag(sync=True)\n disabled = Bool(False).tag(sync=True)\n\n # below lines are copied from button widget to handle click on the link\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self._click_handlers = CallbackDispatcher()\n self.on_msg(self._handle_button_msg)\n\n # this is necessary when data is big and it content should not be set at when widget is initialized\n def on_click(self, callback, remove=False):\n \"\"\"Register a callback to execute when the button is clicked.\n The callback will be called with one argument, the clicked button\n widget instance.\n Parameters\n ----------\n remove: bool (optional)\n Set to true to remove the callback from the list of callbacks.\n \"\"\"\n self._click_handlers.register_callback(callback, remove=remove)\n\n def click(self):\n \"\"\"Programmatically trigger a click event.\n This will call the callbacks registered to the clicked button\n widget instance.\n \"\"\"\n self._click_handlers(self)\n\n def _handle_button_msg(self, _, content, buffers):\n \"\"\"Handle a msg from the front-end.\n Parameters\n ----------\n content: dict\n Content of the msg.\n \"\"\"\n if content.get('event', '') == 'click':\n self.click()\n","repo_name":"NeuroLang/neurolang_ipywidgets","sub_path":"neurolang_ipywidgets/cell_widgets.py","file_name":"cell_widgets.py","file_ext":"py","file_size_in_byte":6679,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"30928303869","text":"from typing import Tuple\n\nfrom .Users import UserList\nfrom ..utils.Logger import getServerLogger\nfrom ..utils.Middleware import Middleware\n\nlogger = getServerLogger(\"P2PMiddleware\")\n\n\nclass P2PMiddleware(Middleware):\n def __init__(self, users: UserList, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n self.users = users\n\n self.handlers = {\"addr_request\": self.get_user_addr}\n\n def get_user_addr(self, sid: str, data: dict) -> Tuple[bool, dict]:\n \"\"\"\n Si llega un evento de tipo addr_request,\n se obtiene la address del usuario de destino y se retorna\n \"\"\"\n\n uri, uuid = None, None\n user = self.users.get_user_by_name(data[\"username\"])\n if user:\n uri, uuid = user.uri, user.uuid\n\n # Este middleware no tiene que seguir avanzando, por lo que se retorna False\n # (La respuesta ya esta completa)\n return False, {\"uri\": uri, \"uuid\": uuid}\n","repo_name":"MrEarle/SSDD-T4","sub_path":"src/server/P2PMiddleware.py","file_name":"P2PMiddleware.py","file_ext":"py","file_size_in_byte":950,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"12897685903","text":"import time\nstart = time.time()\ndef seperator(l):\n moves=0\n i=0\n j=1\n l1=l.copy()\n while(jl1[j]):\n i=j\n j+=1\n \n elif(l1[i]l2[j]):\n temp=l2[i]\n l2[i]=l2[j]\n l2[j]=temp\n moves1+=(j-i)\n i+=1\n j+=1\n \n return (l1,moves,l2,moves1)\n \nprint(seperator([1,0,1,1,0,1,1]))\nend=time.time()\nprint()\nprint()\nprint(end-start)\n","repo_name":"DoWithPassion/PythonPrograms","sub_path":"swaps_to_seperate_0s_and1s.py","file_name":"swaps_to_seperate_0s_and1s.py","file_ext":"py","file_size_in_byte":866,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"31711927770","text":"import math\nimport time\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium import webdriver\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.wait import WebDriverWait\nfrom webdriver_manager.chrome import ChromeDriverManager\nfrom selenium.webdriver.support.ui import Select\n\nbrowser = webdriver.Chrome(executable_path=ChromeDriverManager().install())\nlink = \"http://suninjuly.github.io/explicit_wait2.html\"\n\n\ndef calc(x):\n return str(math.log(abs(12 * math.sin(int(x)))))\n\ntry:\n browser.get(link)\n price = browser.find_element_by_id('price').text\n my_price = WebDriverWait(browser, 12).until(\n EC.text_to_be_present_in_element(\n (By.ID, \"price\"), \"$100\")\n )\n book = browser.find_element_by_id('book')\n book.click()\n x = int(browser.find_element_by_id('input_value').text)\n my_answer = calc(x)\n browser.find_element_by_id('answer').send_keys(my_answer)\n button = browser.find_element_by_id(\"solve\")\n button.click()\n\nfinally:\n # успеваем скопировать код за 10 секунд\n time.sleep(10)\n # закрываем браузер после всех манипуляций\n browser.quit()\n","repo_name":"AkioYuki/stepik_auto_tests_course","sub_path":"2.4.1.py","file_name":"2.4.1.py","file_ext":"py","file_size_in_byte":1227,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"27610145827","text":"from thunderstore.community.models import Community, CommunitySite\nfrom thunderstore.core.utils import capture_exception\nfrom thunderstore.repository.api.v1.viewsets import serialize_package_list_for_community\nfrom thunderstore.repository.models.cache import APIV1PackageCache\n\n\ndef update_api_v1_caches() -> None:\n update_api_v1_indexes()\n\n\ndef update_api_v1_indexes() -> None:\n for site in CommunitySite.objects.iterator():\n try:\n APIV1PackageCache.update_for_community(\n community=site.community,\n content=serialize_package_list_for_community(\n community=site.community,\n ),\n )\n except Exception as e: # pragma: no cover\n capture_exception(e)\n for community in Community.objects.filter(sites=None).iterator():\n try:\n APIV1PackageCache.update_for_community(\n community=community,\n content=serialize_package_list_for_community(\n community=community,\n ),\n )\n except Exception as e: # pragma: no cover\n capture_exception(e)\n APIV1PackageCache.drop_stale_cache()\n","repo_name":"thunderstore-io/Thunderstore","sub_path":"django/thunderstore/repository/api/v1/tasks.py","file_name":"tasks.py","file_ext":"py","file_size_in_byte":1197,"program_lang":"python","lang":"en","doc_type":"code","stars":81,"dataset":"github-code","pt":"77"} +{"seq_id":"29340036529","text":"#Conversión de Decimal a Binario\n#reciba como entrada un número decimal e imprima el resultado de convertirlo a binario. \ndecimal= int(input(\"ingrese numero decimal: \"))\nlista_modulos = [ ]\nwhile decimal != 0:\n modulo = decimal % 2\n cociente = decimal // 2\n # print(\"COCIENTE::: \"+ str(cociente))\n lista_modulos.append(str(modulo))\n decimal = cociente\nprint(\"resultado=\" + \"\".join(lista_modulos[::-1]))\n","repo_name":"pabloschwarzenberg/grader","sub_path":"hito1_ej4/hito1_ej4_edcf279b44328751cac4ce5efce99364.py","file_name":"hito1_ej4_edcf279b44328751cac4ce5efce99364.py","file_ext":"py","file_size_in_byte":436,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"7880903155","text":"import os\nimport sys\nimport numpy as np\nimport pytest\n\nimport tvm\nimport tvm.relay.testing\nimport tvm.relay.transform as transform\nfrom tvm import relay\nfrom tvm.contrib import util\nfrom tvm.relay.annotation import compiler_begin, compiler_end\nfrom tvm.relay.expr_functor import ExprMutator\n\n# Leverage the pass manager to write a simple white list based annotator\n@transform.function_pass(opt_level=0)\nclass WhiteListAnnotator:\n def __init__(self, op_list, compiler):\n assert isinstance(op_list, (list, tuple, set))\n self.op_list = op_list\n self.compiler = compiler\n\n def transform_function(self, func, mod, ctx):\n\n annotator = self\n class Annotator(tvm.relay.ExprMutator):\n def visit_call(self, call):\n op_name = call.op.name\n if op_name in annotator.op_list:\n new_args = []\n for arg in call.args:\n ann = compiler_begin(super().visit(arg),\n annotator.compiler)\n new_args.append(ann)\n new_call = relay.Call(call.op, new_args, call.attrs,\n call.type_args)\n return compiler_end(new_call, annotator.compiler)\n else:\n return super().visit_call(call)\n return Annotator().visit(func)\n\n\nclass CcompilerAnnotator(ExprMutator):\n \"\"\"\n A simple annotator that creates the following program:\n |\n -- begin --\n |\n add\n |\n subtract\n |\n multiply\n |\n -- end --\n |\n \"\"\"\n\n def __init__(self):\n super(CcompilerAnnotator, self).__init__()\n self.in_compiler = 0\n\n def visit_call(self, call):\n if call.op.name == \"add\": # Annotate begin at args\n if self.in_compiler == 1:\n lhs = compiler_begin(super().visit(call.args[0]), \"ccompiler\")\n rhs = compiler_begin(super().visit(call.args[1]), \"ccompiler\")\n op = relay.add(lhs, rhs)\n self.in_compiler = 2\n return op\n elif call.op.name == \"subtract\":\n if self.in_compiler == 1:\n lhs = super().visit(call.args[0])\n rhs = super().visit(call.args[1])\n if isinstance(lhs, relay.expr.Var):\n lhs = compiler_begin(lhs, \"ccompiler\")\n if isinstance(rhs, relay.expr.Var):\n rhs = compiler_begin(rhs, \"ccompiler\")\n return relay.subtract(lhs, rhs)\n elif call.op.name == \"multiply\": # Annotate end at output\n self.in_compiler = 1\n lhs = super().visit(call.args[0])\n rhs = super().visit(call.args[1])\n if isinstance(lhs, relay.expr.Var):\n lhs = compiler_begin(lhs, \"ccompiler\")\n if isinstance(rhs, relay.expr.Var):\n rhs = compiler_begin(rhs, \"ccompiler\")\n op = relay.multiply(lhs, rhs)\n if self.in_compiler == 2:\n op = compiler_end(op, \"ccompiler\")\n self.in_compiler = 0\n return op\n return super().visit_call(call)\n\n\nclass WholeGraphAnnotator(ExprMutator):\n \"\"\"\n An annotator that creates a compiler for an entire graph.\n \"\"\"\n\n def __init__(self, compiler):\n super(WholeGraphAnnotator, self).__init__()\n self.compiler = compiler\n self.last_call = True\n\n def visit_call(self, call):\n curr_last = self.last_call\n self.last_call = False\n\n params = []\n for arg in call.args:\n param = super().visit(arg)\n if isinstance(param, relay.expr.Var):\n param = compiler_begin(param, self.compiler)\n params.append(param)\n\n new_call = relay.Call(call.op, params, call.attrs)\n if curr_last:\n new_call = compiler_end(new_call, self.compiler)\n return new_call\n\n\nclass MobileNetAnnotator(ExprMutator):\n \"\"\"\n Annotate mobilenet until global_avg_pool.\n \"\"\"\n\n def __init__(self, compiler):\n super(MobileNetAnnotator, self).__init__()\n self.compiler = compiler\n self.compiler_open = False\n\n def visit_call(self, call):\n\n if call.op.name == 'nn.global_avg_pool2d':\n self.compiler_open = True\n compiler_open = self.compiler_open\n\n params = []\n for arg in call.args:\n param = super().visit(arg)\n if call.op.name == 'nn.global_avg_pool2d':\n param = compiler_end(param, self.compiler)\n if compiler_open and isinstance(param, relay.expr.Var):\n param = compiler_begin(param, self.compiler)\n params.append(param)\n\n new_call = relay.Call(call.op, params, call.attrs)\n return new_call\n\n\ndef check_result(mod, map_inputs, out_shape, result, tol=1e-5, target=\"llvm\",\n ctx=tvm.cpu(), params=None):\n if sys.platform == \"win32\":\n print(\"Skip test on Windows for now\")\n return\n\n def update_lib(lib):\n test_dir = os.path.dirname(os.path.realpath(os.path.expanduser(__file__)))\n source_dir = os.path.join(test_dir, \"..\", \"..\", \"..\")\n contrib_path = os.path.join(source_dir, \"src\", \"runtime\", \"contrib\")\n\n kwargs = {}\n kwargs[\"options\"] = [\"-O2\", \"-std=c++11\", \"-I\" + contrib_path]\n tmp_path = util.tempdir()\n lib_name = 'lib.so'\n lib_path = tmp_path.relpath(lib_name)\n lib.export_library(lib_path, fcompile=False, **kwargs)\n lib = tvm.module.load(lib_path)\n\n return lib\n\n def check_vm_result():\n with relay.build_config(opt_level=3, disabled_pass=[\"AlterOpLayout\"]):\n exe = relay.vm.compile(mod, target=target, params=params)\n code, lib = exe.save()\n lib = update_lib(lib)\n exe = relay.vm.Executable.load_exec(code, lib)\n vm = relay.vm.VirtualMachine(exe)\n vm.init(ctx)\n out = vm.run(**map_inputs)\n tvm.testing.assert_allclose(out.asnumpy(), result, rtol=tol, atol=tol)\n\n def check_graph_runtime_result():\n with relay.build_config(opt_level=3, disabled_pass=[\"AlterOpLayout\"]):\n json, lib, param = relay.build(mod, target=target, params=params)\n lib = update_lib(lib)\n rt_mod = tvm.contrib.graph_runtime.create(json, lib, ctx)\n\n for name, data in map_inputs.items():\n rt_mod.set_input(name, data)\n rt_mod.set_input(**param)\n rt_mod.run()\n out = tvm.nd.empty(out_shape, ctx=ctx)\n out = rt_mod.get_output(0, out)\n\n tvm.testing.assert_allclose(out.asnumpy(), result, rtol=tol, atol=tol)\n\n check_vm_result()\n check_graph_runtime_result()\n\n\ndef test_multi_node_compiler():\n x = relay.var('x', shape=(10, 10))\n w0 = relay.var('w0', shape=(10, 10))\n w1 = relay.var('w1', shape=(10, 10))\n w2 = relay.var('w2', shape=(10, 10))\n w3 = relay.var('w3', shape=(10, 10))\n w4 = relay.var('w4', shape=(10, 10))\n w5 = relay.var('w5', shape=(10, 10))\n w6 = relay.var('w6', shape=(10, 10))\n w7 = relay.var('w7', shape=(10, 10))\n\n # C compiler\n # FIXME: We generate two compilers for this case but they should be merged to one\n # due to the common input (x).\n z0 = relay.add(x, w0)\n p0 = relay.subtract(z0, w1)\n q0 = relay.multiply(p0, w2)\n\n z1 = relay.add(x, w3)\n p1 = relay.subtract(z1, w4)\n q1 = relay.multiply(p1, w5)\n\n # Other parts on TVM\n z2 = relay.add(x, w6)\n q2 = relay.subtract(z2, w7)\n\n r = relay.concatenate((q0, q1, q2), axis=0)\n f = relay.Function([x, w0, w1, w2, w3, w4, w5, w6, w7], r)\n mod = relay.Module()\n ann = CcompilerAnnotator()\n mod[\"main\"] = ann.visit(f)\n mod = transform.PartitionGraph()(mod)\n mod = transform.InferType()(mod)\n\n x_data = np.random.rand(10, 10).astype('float32')\n w_data = []\n for _ in range(8):\n w_data.append(np.random.rand(10, 10).astype('float32'))\n\n map_inputs = {\"w{}\".format(i): w_data[i] for i in range(8)}\n map_inputs[\"x\"] = x_data\n check_result(\n mod, map_inputs, (30, 10),\n np.concatenate((((x_data + w_data[0]) - w_data[1]) * w_data[2],\n ((x_data + w_data[3]) - w_data[4]) * w_data[5],\n x_data + w_data[6] - w_data[7]),\n axis=0))\n\n\ndef test_extern_ccompiler_single_op():\n @transform.function_pass(opt_level=0)\n class MyAnnotator:\n def transform_function(self, func, mod, ctx):\n class Annotator(tvm.relay.ExprMutator):\n def visit_call(self, call):\n new_args = []\n for arg in call.args:\n ann = compiler_begin(self.visit(arg), \"ccompiler\")\n new_args.append(ann)\n new_call = relay.Call(call.op, new_args)\n return compiler_end(new_call, \"ccompiler\")\n return Annotator().visit(func)\n\n x = relay.var('x', shape=(8, 8))\n y = relay.var('y', shape=(8, 8))\n z = x + y\n f = relay.Function([x, y], z)\n x_data = np.random.rand(8, 8).astype('float32')\n y_data = np.random.rand(8, 8).astype('float32')\n mod = relay.Module()\n mod[\"main\"] = f\n mod = MyAnnotator()(mod)\n mod = transform.PartitionGraph()(mod)\n\n check_result(mod, {\"x\": x_data, \"y\": y_data}, (8, 8), x_data + y_data)\n\n\ndef test_extern_ccompiler_default_ops():\n def expected():\n x = relay.var(\"x\", shape=(8, 8))\n y = relay.var(\"y\", shape=(8, 8))\n x0 = relay.var(\"x0\", shape=(8, 8))\n y0 = relay.var(\"y0\", shape=(8, 8))\n add = x0 + y0\n # Function that uses C compiler\n func = relay.Function([x0, y0], add)\n func = func.set_attribute(\"Primitive\", tvm.expr.IntImm(\"int32\", 1))\n func = func.set_attribute(\"Compiler\",\n tvm.expr.StringImm(\"ccompiler\"))\n func = func.set_attribute(\"ExternalSymbol\",\n tvm.expr.StringImm(\"ccompiler_0\"))\n add_call = relay.Call(func, [x, y])\n # Function that uses default compiler. Ops are fused in this function.\n p0 = relay.var(\"p0\", shape=(8, 8))\n log = relay.log(p0)\n exp = relay.exp(p0)\n concat = relay.concatenate([log, exp], axis=0)\n fused_func = relay.Function([p0], concat)\n fused_func = fused_func.set_attribute(\"Primitive\",\n tvm.expr.IntImm(\"int32\", 1))\n fused_call = relay.Call(fused_func, [add_call])\n main = relay.Function([x, y], fused_call)\n mod = relay.Module()\n mod[\"main\"] = main\n return mod\n\n x = relay.var(\"x\", shape=(8, 8))\n y = relay.var(\"y\", shape=(8, 8))\n add = x + y\n log = relay.log(add)\n exp = relay.exp(add)\n concat = relay.concatenate([log, exp], axis=0)\n f = relay.Function([x, y], concat)\n mod = relay.Module()\n mod[\"main\"] = f\n mod = WhiteListAnnotator([\"add\", \"subtract\", \"multiply\"], \"ccompiler\")(mod)\n mod = transform.PartitionGraph()(mod)\n\n fused_mod = transform.FuseOps(2)(mod)\n expected_mod = expected()\n assert relay.alpha_equal(fused_mod, expected_mod)\n\n x_data = np.random.rand(8, 8).astype('float32')\n y_data = np.random.rand(8, 8).astype('float32')\n np_add = x_data + y_data\n res = np.concatenate([np.log(np_add), np.exp(np_add)])\n check_result(mod, {\"x\": x_data, \"y\": y_data}, (16, 8), res)\n\n\ndef test_extern_ccompiler():\n x = relay.var('x', shape=(2, 2))\n y = relay.var('y', shape=(2, 2))\n z = x + x\n p = y * y\n f = relay.Function([x, y], p - z)\n x_data = np.random.rand(2, 2).astype('float32')\n y_data = np.random.rand(2, 2).astype('float32')\n mod = relay.Module()\n mod[\"main\"] = f\n mod = WhiteListAnnotator([\"add\", \"subtract\", \"multiply\"], \"ccompiler\")(mod)\n mod = transform.PartitionGraph()(mod)\n\n check_result(mod, {\"x\": x_data, \"y\": y_data}, (2, 2), (y_data * y_data) - (x_data + x_data))\n\n\ndef test_extern_dnnl():\n if not tvm.get_global_func(\"relay.ext.dnnl\", True):\n print(\"skip because DNNL codegen is not available\")\n return\n\n dtype = 'float32'\n ishape = (1, 32, 14, 14)\n w1shape = (32, 1, 3, 3)\n data = relay.var('data', shape=(ishape), dtype=dtype)\n weight1 = relay.var('weight1', shape=(w1shape), dtype=dtype)\n depthwise_conv2d_1 = relay.nn.conv2d(data,\n weight1,\n kernel_size=(3, 3),\n padding=(1, 1),\n groups=32)\n depthwise_conv2d_2 = relay.nn.conv2d(depthwise_conv2d_1,\n weight1,\n kernel_size=(3, 3),\n padding=(1, 1),\n groups=32)\n out = relay.add(depthwise_conv2d_1, depthwise_conv2d_2)\n\n f = relay.Function([data, weight1], out)\n\n mod = relay.Module()\n mod['main'] = WholeGraphAnnotator('dnnl').visit(f)\n mod = transform.PartitionGraph()(mod)\n\n ref_mod = relay.Module()\n ref_mod['main'] = f\n\n i_data = np.random.uniform(0, 1, ishape).astype(dtype)\n w1_data = np.random.uniform(0, 1, w1shape).astype(dtype)\n\n ref_ex = relay.create_executor(\"graph\", mod=ref_mod, ctx=tvm.cpu())\n ref_res = ref_ex.evaluate()(i_data, w1_data)\n check_result(mod, {\"data\": i_data, \"weight1\": w1_data},\n (1, 32, 14, 14), ref_res.asnumpy(), tol=1e-5)\n\n\ndef test_extern_dnnl_mobilenet():\n if not tvm.get_global_func(\"relay.ext.dnnl\", True):\n print(\"skip because DNNL codegen is not available\")\n return\n\n dtype = 'float32'\n ishape = (1, 3, 224, 224)\n mod, params = relay.testing.mobilenet.get_workload(\n batch_size=1, dtype='float32')\n\n op_list = [\"nn.conv2d\", \"nn.dense\", \"nn.relu\", \"add\"]\n mod = WhiteListAnnotator(op_list, \"dnnl\")(mod)\n mod = transform.PartitionGraph()(mod)\n i_data = np.random.uniform(0, 1, ishape).astype(dtype)\n\n ref_mod, params = relay.testing.mobilenet.get_workload(batch_size=1,\n dtype='float32')\n ref_ex = relay.create_executor(\"graph\", mod=ref_mod, ctx=tvm.cpu(0))\n ref_res = ref_ex.evaluate()(i_data, **params)\n\n check_result(mod, {\"data\": i_data},\n (1, 1000), ref_res.asnumpy(), tol=1e-5, params=params)\n\n\nif __name__ == \"__main__\":\n test_multi_node_compiler()\n test_extern_ccompiler_single_op()\n test_extern_ccompiler_default_ops()\n test_extern_ccompiler()\n test_extern_dnnl()\n test_extern_dnnl_mobilenet()\n","repo_name":"gary30404/tvm-yolov3","sub_path":"tests/python/relay/test_pass_partition_graph.py","file_name":"test_pass_partition_graph.py","file_ext":"py","file_size_in_byte":14780,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"77"} +{"seq_id":"41567791867","text":"import wx\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport os\nimport core.load as load\n\n\ndef readout_noise_process(mainFrame):\n gain = float(mainFrame.gain_rdnPage.rdn_textCtrl1.GetValue())\n nClip = int(mainFrame.gain_rdnPage.rdn_textCtrl3.GetValue())\n path = mainFrame.biasfilePath\n files = []\n\n if isinstance(path, str):\n # 输入路径为str,即目录\n for file in os.listdir(path):\n files.append(os.path.join(path, file))\n\n if isinstance(path, list):\n # 输入路径为list, 即文件名\n files = path\n\n # 判断nClip是否合适\n if len(files) <= 2 * nClip:\n dlg = wx.MessageDialog(None, \"请输入合适的nClip!\", caption=\"警告\", style=wx.OK)\n dlg.ShowModal()\n return\n\n tmp = []\n # arr = np.zeros(load.getData(MainFrame, files[0]).shape)\n for file in files:\n each_data = load.getData(mainFrame, file)\n # 多个三维fits 堆叠待解决\n if each_data.ndim > 2:\n # arr = np.concatenate((arr, each_data))\n tmp = each_data\n else:\n tmp.append(each_data)\n\n # 全部图像堆叠\n data = np.array(tmp)\n # 剔除最大值和最小是\n data_sort = np.sort(data, axis=0)\n if nClip:\n data_clip = data_sort[nClip:-nClip, :, :]\n else:\n data_clip = data_sort\n # 计算N张本底图像各个像元的平均值\n data_mean = np.mean(data_clip, axis=0)\n # 计算N张本地图像各个像元的标准偏差作为该像元的读出噪声\n data_std = np.std(data_clip, axis=0)\n # 整个图像的平均读出噪声\n data_std_overall = np.mean(data_std)\n # 读出噪声结果(e-)\n res = gain * data_std_overall\n\n # 显示结果\n mainFrame.gain_rdnPage.rdn_textCtrl2.SetValue(str(round(res, 3)))\n\n # 合并后的fits图像的平均值和标准差分布作图, 以及直方图\n plt.figure(1)\n plt.title(\"MeanValue Distribution\")\n plt.axis('off')\n plt.imshow(data_mean, cmap=plt.cm.gray)\n\n plt.figure(2)\n plt.title(\"StdValue Distribution\")\n plt.imshow(data_std, cmap=plt.cm.gray)\n plt.axis('off')\n\n plt.figure(3)\n\n n, bins, patches = plt.hist(data_std.flatten(), bins='auto', color='steelblue')\n plt.title(\"Readout Noise Historgam\")\n plt.xlabel(\"Readout Noise (e-)\")\n plt.ylabel(\"Counts\")\n plt.tight_layout()\n\n plt.show()\n\n return\n\n\ndef gain_process(mainFrame):\n bias_path = mainFrame.biasfilePath\n flat_path = mainFrame.flatfilePath\n bias_files = []\n flat_files = []\n if isinstance(bias_path, str):\n # 输入路径为str,即目录\n for bias_file in os.listdir(bias_path):\n bias_files.append(os.path.join(bias_path, bias_file))\n\n if isinstance(bias_path, list):\n # 输入路径为list, 即文件名\n bias_files = bias_path\n\n if isinstance(flat_path, str):\n # 输入路径为str,即目录\n for flat_file in os.listdir(flat_path):\n flat_files.append(os.path.join(flat_path, flat_file))\n\n if isinstance(flat_path, list):\n # 输入路径为list, 即文件名\n flat_files = flat_path\n\n bias_tmp = []\n flat_tmp = []\n\n # 读取本底场图像\n for bias_file in bias_files:\n each_data = load.getData(mainFrame, bias_file)\n # 多个三维fits 堆叠待解决\n if each_data.ndim > 2:\n # arr = np.concatenate((arr, each_data))\n bias_tmp = each_data\n else:\n bias_tmp.append(each_data)\n # 读取平场图像\n for flat_file in flat_files:\n each_data = load.getData(mainFrame, flat_file)\n # 多个三维fits 堆叠待解决\n if each_data.ndim > 2:\n # arr = np.concatenate((arr, each_data))\n flat_tmp = each_data\n else:\n flat_tmp.append(each_data)\n # 图像堆叠转为array\n bias_data = np.array(bias_tmp, dtype=float)\n flat_data = np.array(flat_tmp, dtype=float)\n\n # 选择两幅平均值最接近的图像\n bias1, bias2 = load.im_select(bias_data)\n flat1, flat2 = load.im_select(flat_data)\n\n bias_diff = bias1 - bias2\n flat_diff = flat1 - flat2\n\n bias_dif_var = np.var(bias_diff)\n flat_diff_var = np.var(flat_diff)\n\n # 计算增益\n gain = (np.mean(flat1) + np.mean(flat2) - np.mean(bias1) - np.mean(bias2)) / (flat_diff_var - bias_dif_var)\n\n # 显示结果\n mainFrame.gain_rdnPage.gain_textCtrl.SetValue(str(round(gain, 3)))\n return\n\n\ndef ptc_process(mainFrame):\n bias_path = mainFrame.biasfilePath\n flat_path = mainFrame.flatfilePath\n bias_files = []\n flat_files = []\n if isinstance(bias_path, str):\n # 输入路径为str,即目录\n for bias_file in os.listdir(bias_path):\n bias_files.append(os.path.join(bias_path, bias_file))\n\n if isinstance(bias_path, list):\n # 输入路径为list, 即文件名\n bias_files = bias_path\n\n if isinstance(flat_path, str):\n # 输入路径为str,即目录\n for flat_file in os.listdir(flat_path):\n flat_files.append(os.path.join(flat_path, flat_file))\n\n if isinstance(flat_path, list):\n # 输入路径为list, 即文件名\n flat_files = flat_path\n\n bias_tmp = []\n flat_tmp = []\n\n # 读取本底场图像\n for bias_file in bias_files:\n each_data = load.getData(mainFrame, bias_file)\n # 多个三维fits 堆叠待解决\n if each_data.ndim > 2:\n # arr = np.concatenate((arr, each_data))\n bias_tmp = each_data\n else:\n bias_tmp.append(each_data)\n # 图像堆叠转为array\n bias_data = np.array(bias_tmp, dtype=float)\n # 选择两幅平均值最接近的图像\n bias1, bias2 = load.im_select(bias_data)\n bias_mean = (np.mean(bias1) + np.mean(bias2)) / 2\n bias_var = np.var(bias1 - bias2) / 2\n\n # 读取平场图像\n PTC_arr = np.zeros((2, len(flat_files)))\n index = 0\n\n for flat_file in flat_files:\n flat_tmp = []\n if os.path.isdir(flat_file):\n for item in os.listdir(flat_file):\n tmp_path = os.path.join(flat_file, item)\n flat_tmp.append(load.getData(mainFrame, tmp_path))\n else:\n each_data = load.getData(mainFrame, flat_file)\n # 多个三维fits 堆叠待解决\n if each_data.ndim > 2:\n # arr = np.concatenate((arr, each_data))\n flat_tmp = each_data\n else:\n flat_tmp.append(each_data)\n flat_data = np.array(flat_tmp, dtype=float)\n flat1, flat2 = load.im_select(flat_data)\n flat_mean = (np.mean(flat1) + np.mean(flat2)) / 2\n flat_var = np.var(flat1 - flat2) / 2\n\n PTC_arr[0, index] = flat_mean - bias_mean\n PTC_arr[1, index] = flat_var - bias_var\n index += 1\n\n # 找拐点\n argMax = PTC_arr[1, :].argmax()\n PTC_arr_fit = PTC_arr[:, :argMax + 1]\n\n # 拟合\n X_fit = PTC_arr_fit[0, :]\n Y_fit = PTC_arr_fit[1, :]\n Z_fit = np.polyfit(X_fit, Y_fit, 1)\n # 原始数据\n X_origin = PTC_arr[0, :argMax + 1]\n Y_origin = PTC_arr[1, :argMax + 1]\n\n # z[0]为曲线斜率, 1/z[0]为增益\n gain = 1 / Z_fit[0]\n readout_noise = np.sqrt(bias_var) * gain\n fullWellCapacity = gain * PTC_arr[0, argMax]\n\n # PTC 非线性度\n p = np.poly1d(Z_fit)\n delta = []\n for i in range(len(X_origin)):\n delta.append((Y_origin[i] - p(X_origin[i])) / p(X_origin[i]))\n delta = np.array(delta, dtype=float)\n none_linearity = np.mean(np.abs(delta))\n\n # 响应非线性度\n maxExposure = float(mainFrame.ptcPage.ptc_textCtrl1.GetValue())\n length = PTC_arr.shape[1]\n arr_exposure = np.linspace(0, maxExposure, length)\n coord_max = PTC_arr[1, :].argmax()\n response_x = arr_exposure[:coord_max + 1]\n response_y = PTC_arr[0, :coord_max + 1]\n response_fit = np.polyfit(response_x, response_y, 1)\n response_poly = np.poly1d(response_fit)\n response_yp = response_poly(response_x)\n res_non_linearity = np.mean((response_y - response_yp) / response_yp)\n\n # 显示结果\n mainFrame.ptcPage.ptc_textCtrl3.SetValue(str(round(gain, 4)))\n mainFrame.ptcPage.ptc_textCtrl4.SetValue(str(round(readout_noise, 4)))\n mainFrame.ptcPage.ptc_textCtrl5.SetValue(str(round(fullWellCapacity, 2)))\n mainFrame.ptcPage.ptc_textCtrl6.SetValue(str(np.abs(round(none_linearity, 2))))\n mainFrame.ptcPage.ptc_textCtrl7.SetValue(str(np.abs(round(res_non_linearity, 2))))\n\n # 作图\n if mainFrame.ptcPage.ptc_plot_trigger.GetValue():\n # PTC\n plt.figure(1)\n l1, = plt.plot(PTC_arr[0, :], PTC_arr[1, :], 'b*', label='Original Data')\n l2, = plt.plot(X_origin, p(X_origin), 'r--', label='Fit Curve')\n plt.legend(loc='best')\n plt.xlabel('Mean(ADU)')\n plt.ylabel('Variance')\n plt.legend(loc='best')\n plt.title('PTC')\n\n # PTC non-linearity\n plt.figure(2)\n l1, = plt.plot(X_origin, delta, 'b*', label='Non-linearity')\n l2, = plt.plot(X_origin, np.zeros(len(X_origin)), 'r--', label='zero line')\n plt.legend(loc='best')\n plt.xlabel('Mean(ADU)')\n plt.ylabel('Non-linearity')\n plt.title('PTC Non-linearity')\n\n # PTC response\n plt.figure(3)\n l1, = plt.plot(arr_exposure, PTC_arr[0, :], \"b*\", label='Original Data')\n l2, = plt.plot(response_x, response_yp, 'r--', label='Fit Curve')\n plt.legend(loc='best')\n plt.xlabel('Exposure Time / (s)')\n plt.ylabel('Signal / MeanValue')\n plt.title('PTC Response')\n\n # PTC response non-\n plt.figure(4)\n l1, = plt.plot(response_x, (response_y - response_yp) / response_yp, 'b*', label='Non-linearity')\n l2, = plt.plot(response_x, np.zeros(len(response_x)), 'r--', label='zero line')\n plt.legend(loc='best')\n plt.xlabel('Exposure Time / (s)')\n plt.ylabel('Non-linearity')\n plt.title('Response Non-linearity')\n\n plt.show()\n return\n\n\ndef dark_current_process(mainFrame):\n gain = float(mainFrame.darkcurrentPage.dc_textCtrl1.GetValue())\n time = float(mainFrame.darkcurrentPage.dc_textCtrl2.GetValue())\n bias_path = mainFrame.biasfilePath\n dark_path = mainFrame.darkfilePath\n bias_files = []\n dark_files = []\n\n if isinstance(bias_path, str):\n # 输入路径为str,即目录\n for bias_file in os.listdir(bias_path):\n bias_files.append(os.path.join(bias_path, bias_file))\n\n if isinstance(bias_path, list):\n # 输入路径为list, 即文件名\n bias_files = bias_path\n\n if isinstance(dark_path, str):\n # 输入路径为str,即目录\n for dark_file in os.listdir(dark_path):\n dark_files.append(os.path.join(dark_path, dark_file))\n\n if isinstance(dark_path, list):\n # 输入路径为list, 即文件名\n dark_files = dark_path\n\n bias_tmp = []\n dark_tmp = []\n # arr = np.zeros(load.getData(MainFrame, files[0]).shape)\n # 读取本底场图像\n for bias_file in bias_files:\n each_data = load.getData(mainFrame, bias_file)\n # 多个三维fits 堆叠待解决\n if each_data.ndim > 2:\n # arr = np.concatenate((arr, each_data))\n bias_tmp = each_data\n else:\n bias_tmp.append(each_data)\n # 读取暗场图像\n for dark_file in dark_files:\n each_data = load.getData(mainFrame, dark_file)\n # 多个三维fits 堆叠待解决\n if each_data.ndim > 2:\n # arr = np.concatenate((arr, each_data))\n dark_tmp = each_data\n else:\n dark_tmp.append(each_data)\n # 图像堆叠转为array\n bias_data = np.array(bias_tmp)\n dark_data = np.array(dark_tmp)\n\n bias_mean = load.im_combine(bias_data)\n dark_mean = load.im_combine(dark_data)\n\n # 暗电流平均值\n res_darkCurrent = np.mean(dark_mean - bias_mean)\n\n # 计算热像元\n # 取第一帧暗场图像\n dark_firstFrame = dark_data[0, :, :]\n dark_res = dark_firstFrame - bias_mean\n # 计算阈值\n threshold_val = np.mean(dark_res) + 25\n # 热像元个数\n coords = np.where(dark_res >= threshold_val)\n hot_pixel_num = coords[0].shape[0]\n count_row = np.unique(coords[0], return_counts=True)\n count_col = np.unique(coords[1], return_counts=True)\n # 热像元缺陷行列\n hot_pixel_row = count_row[0][np.where(count_row[1] > 100)]\n hot_pixel_col = count_col[0][np.where(count_col[1] > 100)]\n\n # 计算超过4倍典型值的像元比例\n coord_hotPixel_over4x = np.where(dark_res > 4 * res_darkCurrent)\n ratio_over4x = coord_hotPixel_over4x[0].shape[0] / dark_res.size\n\n # 显示结果\n mainFrame.darkcurrentPage.dc_textCtrl3.SetValue(str(round(res_darkCurrent, 6) / time * gain))\n mainFrame.darkcurrentPage.dc_textCtrl4.SetValue(str(hot_pixel_num))\n mainFrame.darkcurrentPage.dc_textCtrl5.SetValue(str(round(ratio_over4x, 5)))\n mainFrame.darkcurrentPage.dc_textCtrl6.SetValue(\",\".join(hot_pixel_row))\n mainFrame.darkcurrentPage.dc_textCtrl6.SetValue(\",\".join(hot_pixel_col))\n return\n\n\ndef prnu_process(mainFrame):\n bias_path = mainFrame.biasfilePath\n flat_path = mainFrame.flatfilePath\n bias_files = []\n flat_files = []\n if isinstance(bias_path, str):\n # 输入路径为str,即目录\n for bias_file in os.listdir(bias_path):\n bias_files.append(os.path.join(bias_path, bias_file))\n\n if isinstance(bias_path, list):\n # 输入路径为list, 即文件名\n bias_files = bias_path\n\n if isinstance(flat_path, str):\n # 输入路径为str,即目录\n for flat_file in os.listdir(flat_path):\n flat_files.append(os.path.join(flat_path, flat_file))\n\n if isinstance(flat_path, list):\n # 输入路径为list, 即文件名\n flat_files = flat_path\n\n bias_tmp = []\n flat_tmp = []\n\n # 读取本底场图像\n for bias_file in bias_files:\n each_data = load.getData(mainFrame, bias_file)\n # 多个三维fits 堆叠待解决\n if each_data.ndim > 2:\n # arr = np.concatenate((arr, each_data))\n bias_tmp = each_data\n else:\n bias_tmp.append(each_data)\n # 读取平场图像\n for flat_file in flat_files:\n each_data = load.getData(mainFrame, flat_file)\n # 多个三维fits 堆叠待解决\n if each_data.ndim > 2:\n # arr = np.concatenate((arr, each_data))\n flat_tmp = each_data\n else:\n flat_tmp.append(each_data)\n # 图像堆叠转为array\n bias_data = np.array(bias_tmp, dtype=float)\n flat_data = np.array(flat_tmp, dtype=float)\n\n # 选择两幅平均值最接近的图像\n bias1, bias2 = load.im_select(bias_data)\n flat1, flat2 = load.im_select(flat_data)\n\n bias_diff = bias1 - bias2\n flat_diff = flat1 - flat2\n\n bias_dif_var = np.var(bias_diff)\n flat_diff_var = np.var(flat_diff)\n\n res_prnu = np.sqrt(2) * np.sqrt(\n np.var(flat1) + np.var(flat2) - np.var(bias1) - np.var(bias2) - (flat_diff_var - bias_dif_var)) \\\n / (np.mean(flat1) + np.mean(flat2) - np.mean(bias1) - np.mean(bias2))\n\n mainFrame.prnuPage.prnu_textCtrl1.SetValue(str(round(res_prnu, 4)))\n return\n","repo_name":"bitursa/CCD-Data-Process","sub_path":"core/item.py","file_name":"item.py","file_ext":"py","file_size_in_byte":15309,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"72799230649","text":"'''\nWhile DFS uses a set, for BFS we use a queue\nFor each items that we pop off the queue, we find its unvisied neighbors and add them to the end of the queue\n'''\n\nfrom collections import deque\n\ndef BFS(graph,start,visited={}):\n queue = deque([start])\n\n while queue:\n vertex = queue.popleft()\n visited.add(vertex)\n for neighbor in graph[vertex]:\n if neighbor not in visited:\n queue.append(neighbor)\n \n return visited\n","repo_name":"LennyGonz/LeetCode-Questions","sub_path":"Patterns/Tree_Breadth_First_Search/0_Understand_BFS.py","file_name":"0_Understand_BFS.py","file_ext":"py","file_size_in_byte":442,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"2885573106","text":"\"\"\"\nInsertion sort\n\n\nInsertion sort involves finding the right place for a given element in a sorted list. So in beginning we compare the first two elements \nand sort them by comparing them. Then we pick the third element and find its proper position among the previous two sorted elements. \nThis way we gradually go on adding more elements to the already sorted list by putting them in their proper position.\n\n\"\"\"\n\"\"\"\npseudo code\n\nprocedure insertionSort( A : array of items )\nint holePosition\n int valueToInsert\n\t\n for i = 1 to length(A) inclusive do:\n\t\n /* select value to be inserted */\n valueToInsert = A[i]\n holePosition = i\n \n /*locate hole position for the element to be inserted */\n\t\t\n while holePosition > 0 and A[holePosition-1] > valueToInsert do:\n A[holePosition] = A[holePosition-1]\n holePosition = holePosition -1\n end while\n\t\t\n /* insert the number at hole position */\n A[holePosition] = valueToInsert\n \n end for\n\t\nend procedure\n\"\"\"\n\n\n#Algorithm\ndef insertion_sort(InputList):\n for i in range(1, len(InputList)):\n j = i-1\n nxt_element = InputList[i]\n\t\t\n while (InputList[j] > nxt_element) and (j >= 0):\n InputList[j+1] = InputList[j]\n j=j-1\n InputList[j+1] = nxt_element\n\nunsorted_list = []\nnum= int(input(\"Enter number of elements\"))\n\nprint(f'Enter {num} elements')\n\nfor i in range(num):\n data = int(input(f'{i+1}. '))\n unsorted_list.append(data)\nprint(f'Unsorted list is{unsorted_list}')\ninsertion_sort(list)\nprint(f'Sorted list is {merge_sort(unsorted_list)}')\n","repo_name":"DSCTOCE/Algorithms","sub_path":"insertionsort.py","file_name":"insertionsort.py","file_ext":"py","file_size_in_byte":1615,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"77"} +{"seq_id":"37130806345","text":"def selection(x):\n n = len(x)\n for i in range(0, n-1):\n ordenado = True\n for j in range(i+1, n):\n if x[i]>x[j]:\n x[i], x[j] = x[j], x[i]\n ordenado = False\n if ordenado:\n return x\n return x\nx = [2, 7, 8, 1, 3, 6]\nprint(\"Fim do algoritmo -\", selection(x))\n","repo_name":"marceloarantes19/estruturaDeDados2022","sub_path":"ordenacaoNaoOtima/selectionSort2.py","file_name":"selectionSort2.py","file_ext":"py","file_size_in_byte":290,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"77"} +{"seq_id":"9699146789","text":"import requests # , time, xml\nfrom bs4 import BeautifulSoup\nimport datetime as dt\nimport re, feedparser\n\n\ndef fix_string(text, k):\n text = text.strip()\n pattern = r'[a-z][A-Z]|\\d[A-Z]|[а-я][А-Я]|[а-я][a-z]|\\d[a-z]|\\d[а-я]|[а-я][A-Z]|[a-z][а-я]'\n result_find = re.findall(pattern, text)\n for match in result_find:\n text = text.replace(match, ' '.join(match))\n if 'https://www.kommersant.ru' in text and 'Коммерсант' in k:\n text = text.split('https://www.kommersant.ru')[0]\n if text.startswith('Темная Удмуртия:'):\n text = text.lstrip('Темная Удмуртия:')\n text = text.strip()\n return text\n\ndef feeding_date(url):\n feeds = feedparser.parse(url)\n\n working_date = dt.datetime.today() - dt.timedelta(7)\n i = 1\n for feed in feeds.entries:\n datetime_object = dt.datetime.strptime(str(feed.published).rstrip(\" GMT\"), '%a, %d %b %Y %H:%M:%S')\n # published_date = dt.datetime.today().strftime('%a, %d %B %Y %H:%M:%S')\n if datetime_object < working_date:\n break\n i += 1\n return i\n\ndef parsing_udm_gov(url):\n i = 0\n udm_news = []\n for k, v in url.items():\n count = feeding_date(v)\n resp = requests.get(v)\n soup_gov = BeautifulSoup(resp.text, 'lxml')\n description_news = []\n for tagD in soup_gov.find_all('description')[:count]:\n content_news = fix_string(tagD.text, k)\n if 100 < len(content_news) and content_news.startswith('Forwarded From') == False:\n\n if 'Коммерсант' in k and 'бизнес-завтрак' not in content_news \\\n and 'бизнес-пикник' not in content_news \\\n and 'кругл' not in content_news \\\n and 'стол' not in content_news:\n # print(k)\n i += 1\n description_news.append(content_news)\n elif 'kommersant' not in content_news and 'Коммерсант' not in k:\n i += 1\n description_news.append(content_news)\n\n\n\n\n for descript in description_news:\n descript = descript.replace('\"', '\\'').rstrip(']]>')\n article = (descript[:35] + '...') # if len(descript) > 75 else (descript[:25] + '...')\n pos = article.find('http')\n article = article[:pos - 1] + '...'\n article = re.sub(r'http[^(\\s)]+', '...', article)\n date = dt.datetime.now().strftime(\"%d-%m-%y %H:%M:%S\")\n content = (article, descript, k, date)\n udm_news.append(content)\n return udm_news\n\n","repo_name":"krastykovyaz/vk_wall_poster","sub_path":"parsing_news.py","file_name":"parsing_news.py","file_ext":"py","file_size_in_byte":2688,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"24386353106","text":"# coding=utf-8\n'''\n方法1:从nums内第三个数开始,插入到之前结果的缝隙中,组成新的排列,知道numds内数字全部插完为止\neg:前两个数全排列为[[|0|1|], [|1|0|]],第三个数可以在|位置插入,一共用六种组合】\n\n方法2:定义一个used数组,记录当前数字是否在item中,如果不在,插入到item中,若item长度等于nums,说明所有数字插入完毕,将\nitem加到结果res中,item弹出最后一个数字,继续遍历。这种方法会首先固定第一个数字,将后面的数字全排列,\n'''\n\n\ndef permute(nums):\n \"\"\"\n :type nums: List[int]\n :rtype: List[List[int]]\n \"\"\"\n if not nums:\n return []\n n = nums.__len__()\n if n == 1:\n return nums\n buf_ans = [[nums[0], nums[1]], [nums[1], nums[0]]]\n res = buf_ans\n index = 2\n for v in nums[2:]:\n res = []\n for buf in buf_ans:\n for i in range(index + 1):\n res.append(buf[:i] + [v] + buf[i:])\n index += 1\n buf_ans = res\n return res\n\n\ndef permute1(nums):\n def helper(num, used, item, res):\n if item.__len__() == num.__len__():\n res.append(item[:])\n return\n for i in range(num.__len__()):\n if not used[i]:\n used[i] = True\n item.append(num[i])\n helper(num, used, item, res)\n item.pop()\n used[i] = False\n\n if not nums:\n return nums\n res = []\n used = [False for _ in range(nums.__len__())]\n item = []\n helper(nums, used, item, res)\n return res\n\ndef permute2(nums):\n import itertools\n return [i for i in itertools.permutations(nums)]\n\n\nprint(permute2([0, 1, 2]))\n","repo_name":"ChangXiaodong/Leetcode-solutions","sub_path":"4/46-Permutations.py","file_name":"46-Permutations.py","file_ext":"py","file_size_in_byte":1754,"program_lang":"python","lang":"zh","doc_type":"code","stars":4,"dataset":"github-code","pt":"77"} +{"seq_id":"26180219904","text":"from datetime import datetime, timedelta\n\nfrom flask import current_app as app\nfrom nessie.externals import canvas_data, redshift, s3\nfrom nessie.jobs.background_job import BackgroundJob, BackgroundJobError\nfrom nessie.lib import berkeley\nfrom nessie.lib.util import get_s3_canvas_daily_path\nimport pandas as pd\n\n\n\"\"\"Logic for generate canvas data catalog job.\"\"\"\n\n\nclass RefreshCanvasDataCatalog(BackgroundJob):\n\n def run(self):\n # Retrieve latest schema definitions from Canvas data API\n response = canvas_data.get_canvas_data_schema()\n external_schema = app.config['REDSHIFT_SCHEMA_CANVAS']\n redshift_iam_role = app.config['REDSHIFT_IAM_ROLE']\n canvas_schema = []\n\n # Parse and isolate table and column details\n for key, value in response['schema'].items():\n for column in value['columns']:\n # Not every column has description and length.\n description = None\n if 'description' in column:\n description = column['description']\n\n length = None\n if 'length' in column:\n length = column['length']\n\n canvas_schema.append([\n value['tableName'],\n column['name'],\n column['type'],\n description,\n length,\n ])\n # Create a dataframe\n schema_df = pd.DataFrame(canvas_schema)\n schema_df.columns = [\n 'table_name',\n 'column_name',\n 'column_type',\n 'column_description',\n 'column_length',\n ]\n\n # The schema definitions received from Canvas are Redshift compliant. We update\n # cetain column types to match Glue and Spectrum data types.\n schema_df['glue_type'] = schema_df['column_type'].replace({\n 'enum': 'varchar',\n 'guid': 'varchar',\n 'text': 'varchar(max)',\n 'date': 'timestamp',\n 'datetime': 'timestamp',\n })\n\n schema_df['transformed_column_name'] = schema_df['column_name'].replace({\n 'default': '\"default\"',\n 'percent': '\"percent\"',\n })\n # Create Hive compliant storage descriptors\n canvas_external_catalog_ddl = self.generate_external_catalog(external_schema, schema_df)\n\n # Clean up and recreate refreshed tables on Glue using Spectrum\n redshift.drop_external_schema(external_schema)\n redshift.create_external_schema(external_schema, redshift_iam_role)\n\n if redshift.execute_ddl_script(canvas_external_catalog_ddl):\n app.logger.info('Canvas schema creation job completed.')\n else:\n app.logger.error('Canvas schema creation job failed.')\n raise BackgroundJobError('Canvas schema creation job failed.')\n\n self.verify_external_data_catalog()\n return 'Canvas external schema created and verified.'\n\n def generate_external_catalog(self, external_schema, schema_df):\n canvas_path = self.generate_canvas_path()\n canvas_tables = schema_df.table_name.unique()\n s3_canvas_data_url = 's3://' + app.config['LOCH_S3_BUCKET'] + '/' + canvas_path\n s3_requests_url = 's3://{}/{}'.format(app.config['LOCH_S3_BUCKET'], berkeley.s3_canvas_data_path_current_term())\n external_table_ddl = ''\n\n for table in canvas_tables:\n table_columns = schema_df.loc[schema_df['table_name'] == table].reset_index()\n storage_descriptor_df = table_columns[['transformed_column_name', 'glue_type']]\n\n create_ddl = 'CREATE EXTERNAL TABLE {}.{}\\n(\\n'.format(external_schema, table)\n storage_descriptors = ''\n for index in storage_descriptor_df.index:\n storage_descriptors = '{} {} {}'.format(\n storage_descriptors,\n storage_descriptor_df['transformed_column_name'][index],\n storage_descriptor_df['glue_type'][index],\n )\n if (index != (len(storage_descriptor_df.index) - 1)):\n storage_descriptors = storage_descriptors + ',\\n'\n\n table_properties = '\\n) \\nROW FORMAT DELIMITED FIELDS \\nTERMINATED BY \\'\\t\\' \\nSTORED AS TEXTFILE'\n if (table != 'requests'):\n table_location = '\\nLOCATION \\'{}/{}\\''.format(s3_canvas_data_url, table)\n else:\n table_location = '\\nLOCATION \\'{}/{}\\''.format(s3_requests_url, table)\n\n external_table_ddl = '{}\\n{}{}{}{};\\n\\n'.format(\n external_table_ddl,\n create_ddl,\n storage_descriptors,\n table_properties,\n table_location,\n )\n\n # For debugging process, export to external_table_ddl to file to get a well formed SQL template for canvas-data\n return external_table_ddl\n\n # Gets an inventory of all the tables by tracking the S3 canvas-data daily location and run count verification to ensure migration was successful\n def verify_external_data_catalog(self):\n s3_client = s3.get_client()\n bucket = app.config['LOCH_S3_BUCKET']\n external_schema = app.config['REDSHIFT_SCHEMA_CANVAS']\n prefix = self.generate_canvas_path()\n app.logger.info(f'Daily path = {prefix}')\n directory_names = []\n s3_objects = s3_client.list_objects_v2(Bucket=bucket, Prefix=prefix)\n for object_summary in s3_objects['Contents']:\n # parse table names from the S3 object URLs\n directory_names.append(object_summary['Key'].split('/')[3])\n\n # Get unique table names from S3 object list\n tables = sorted(list(set(directory_names)))\n # Ensure that all tables required by downstream jobs have data present in S3.\n required_tables = [\n 'assignment_dim',\n 'assignment_override_dim',\n 'assignment_override_user_rollup_fact',\n 'course_dim',\n 'course_score_fact',\n 'course_section_dim',\n 'enrollment_dim',\n 'enrollment_fact',\n 'enrollment_term_dim',\n 'pseudonym_dim',\n 'submission_dim',\n 'submission_fact',\n 'user_dim',\n ]\n for required_table in required_tables:\n if required_table not in tables:\n raise BackgroundJobError(f'No data in S3 for external table {required_table}: aborting job.')\n\n app.logger.info(f'Tables to be verified : {tables}')\n for table in tables:\n result = redshift.fetch(f'SELECT COUNT(*) FROM {external_schema}.{table}')\n if result and result[0] and result[0]['count']:\n count = result[0]['count']\n app.logger.info(f'Verified external table {table} ({count} rows).')\n else:\n raise BackgroundJobError(f'Failed to verify external table {table}: aborting job.')\n app.logger.info(f'Canvas verification job completed successfully for {len(tables)} tables')\n return True\n\n def generate_canvas_path(self):\n canvas_path = get_s3_canvas_daily_path()\n if not s3.get_keys_with_prefix(canvas_path):\n canvas_path = get_s3_canvas_daily_path(datetime.now() - timedelta(days=1))\n if not s3.get_keys_with_prefix(canvas_path):\n raise BackgroundJobError('No timely Canvas data found, aborting')\n else:\n app.logger.info('Falling back to yesterday\\'s Canvas data')\n return canvas_path\n","repo_name":"ets-berkeley-edu/nessie","sub_path":"nessie/jobs/refresh_canvas_data_catalog.py","file_name":"refresh_canvas_data_catalog.py","file_ext":"py","file_size_in_byte":7824,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"77"} +{"seq_id":"32765980130","text":"\n\ndef part1(file):\n total = 0\n with open(file) as f:\n while True:\n line = f.readline().strip()\n if not line:\n break\n left, right = line.split(\",\")\n ll, lr = left.split(\"-\")\n rl, rr = right.split(\"-\")\n if int(ll) >= int(rl) and int(lr) <= int(rr) or int(ll) <= int(rl) and int(lr) >= int(rr):\n total += 1\n\n return total\n\n\ndef part2(file):\n total = 0\n with open(file) as f:\n while True:\n line = f.readline().strip()\n if not line:\n break\n left, right = line.split(\",\")\n ll, lr = left.split(\"-\")\n rl, rr = right.split(\"-\")\n ll, lr, rl, rr = int(ll), int(lr), int(rl), int(rr)\n if rl <= lr <= rr \\\n or rl <= ll <= rr \\\n or ll >= rl and lr <= rr \\\n or ll <= rl and lr >= rr:\n total += 1\n\n return total\n\n\ndef main():\n print(\"the answer is \")\n print(part1(\"data/test_data\"))\n print(part1(\"data/real_data\"))\n print(part2(\"data/test_data\"))\n print(part2(\"data/real_data\"))\n\n\nif __name__ == \"__main__\":\n main()\n\n","repo_name":"od107/aoc22","sub_path":"day4/pairs.py","file_name":"pairs.py","file_ext":"py","file_size_in_byte":1211,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"11673071473","text":"import threading\nimport zmq\nimport time\nimport json\nimport sys\nimport os\nsys.path.append(\"..\")\n# from controller.block_chain import *\n\n\nfrom hashlib import sha256\nimport json\nimport time\n\n\nclass Block(object):\n\n def __init__(self, transactions, previous_hash):\n self.transactions = transactions\n # self.timestamp = timestamp\n self.previous_hash = previous_hash\n self.nonce = 0\n\n def compute_hash(self):\n # A function that return the hash of the block contents.\n block_string = str(self.transactions) + str(self.nonce)\n # block_string = json.dumps(self._dict_, sort_keys = True)\n return str(sha256(block_string.encode()).hexdigest())\n\n\nclass MessageThread(threading.Thread):\n\n def __init__(self, threadID, name, counter):\n threading.Thread.__init__(self)\n self.threadID = threadID\n self.name = name\n self.counter = counter\n self.endpoint = \"tcp://127.0.0.1:5002\"\n\n def run(self): # 把要执行���代码写到run函数里面 线程在创建后会直接运行run函数\n context = zmq.Context()\n client = context.socket(zmq.SUB)\n client.connect(self.endpoint)\n client.setsockopt(zmq.SUBSCRIBE, b'') # Terminate early\n while True:\n print(\"5002 waiting...\")\n rep = client.recv_json()\n reply = json.loads(rep)\n print(\"5002 received: \", reply)\n block = Block(reply, previous_hash=0)\n hash_result, noce = self.proof_of_work(block=block)\n for key in reply.keys():\n reply[key]['noce'] = str(noce)\n reply[key]['hash_cur'] = str(hash_result)\n MessageThread.send_finish_status(reply)\n\n def proof_of_work(self, block):\n block.nonce = 0\n computed_hash = block.compute_hash()\n while not computed_hash.startswith('O' * 5):\n block.nonce += 1\n computed_hash = block.compute_hash()\n # print(computed_hash)\n if computed_hash.startswith('0' * 5):\n break\n print(' 最终结果:{}, 随机数:{}'.format(computed_hash, block.nonce))\n return computed_hash, block.nonce\n\n @staticmethod\n def send_finish_status(block_object):\n context = zmq.Context()\n socket = context.socket(zmq.REQ)\n socket.connect(\"tcp://127.0.0.1:5003\")\n block_dict = {}\n block_dict['finished'] = block_object\n block_dict['uid'] = LoginThread.uid\n block_string = json.dumps(block_dict)\n print(\"5003 send: \", block_string)\n socket.send_json(json.dumps(block_dict))\n socket.recv_string()\n socket.close()\n\n\nclass Client(threading.Thread):\n\n def __init__(self, threadID, name, counter):\n threading.Thread.__init__(self)\n self.threadID = threadID\n self.name = name\n self.counter = counter\n self.endpoint = \"tcp://127.0.0.1:5004\"\n\n def run(self):\n context = zmq.Context()\n client = context.socket(zmq.SUB)\n client.connect(self.endpoint)\n client.setsockopt(zmq.SUBSCRIBE, b'')\n while True:\n data = client.recv_json()\n reply = json.loads(data)\n print(\"5004 received\", reply)\n for key in reply.keys():\n load_data = {}\n if os.path.exists('.\\\\data\\\\block\\\\pid-' + str(reply[key]['pid']) + '.txt'):\n with open('.\\\\data\\\\block\\\\pid-' + str(reply[key]['pid']) + '.txt', 'r', encoding='utf-8') as f:\n load_data = json.load(f)\n if key in load_data:\n continue\n else:\n load_data[key] = reply[key]\n print(\"write data: \", load_data[key])\n with open('.\\\\data\\\\block\\\\pid-' + str(reply[key]['pid']) + '.txt', 'w', encoding=\"utf-8\") as f:\n f.write(json.dumps(load_data, indent=2, ensure_ascii=False))\n else:\n load_data[key] = reply[key]\n with open('.\\\\data\\\\block\\\\pid-' + str(reply[key]['pid']) + '.txt', 'w', encoding=\"utf-8\") as f:\n f.write(json.dumps(load_data, indent=2, ensure_ascii=False))\n context = zmq.Context()\n socket = context.socket(zmq.REQ)\n socket.connect(\"tcp://127.0.0.1:5001\")\n block_dict = {'ok': 'true', 'uid': LoginThread.uid}\n block_string = json.dumps(block_dict)\n print(\"5003 send: \", block_string)\n socket.send_json(json.dumps(block_dict))\n socket.recv_string()\n socket.close()\n\n\nclass LoginThread(threading.Thread):\n uid = \"\"\n\n def __init__(self, threadID, name, counter):\n threading.Thread.__init__(self)\n self.threadID = threadID\n self.name = name\n self.counter = counter\n self.endpoint = \"tcp://127.0.0.1:5006\"\n\n def login(self):\n context = zmq.Context()\n socket = context.socket(zmq.REQ)\n socket.connect(\"tcp://127.0.0.1:5005\")\n while True:\n data_dict = {}\n for filename in os.listdir(os.getcwd() + \"\\\\data\\\\block\"):\n with open(\".\\\\data\\\\block\\\\\" + filename, 'r', encoding='utf-8') as f:\n load_data = json.load(f)\n for key in load_data.keys():\n data_dict[load_data[key]['pid']] = load_data\n if data_dict:\n data_dict['uid'] = self.uid\n socket.send_json(json.dumps(data_dict))\n else:\n socket.send_json(json.dumps({'nothing': \"true\"}))\n rec = socket.recv_json()\n print(\"my received: \", rec)\n time.sleep(60)\n\n def run(self): # 把要执行的代码写到run函数里面 线程在创建后会直接运行run函数\n context = zmq.Context()\n client = context.socket(zmq.SUB)\n client.connect(self.endpoint)\n client.setsockopt(zmq.SUBSCRIBE, b'') # Terminate early\n while True:\n rep = client.recv_json()\n reply = json.loads(rep)\n print(\"5006 received: \", reply)\n for key in reply.keys():\n with open('.\\\\data\\\\block\\\\pid-' + key + '.txt', 'w', encoding=\"utf-8\") as f:\n f.write(json.dumps(reply[key], indent=2, ensure_ascii=False))\n client.close()\n\n\nif __name__ == \"__main__\":\n login = LoginThread(1, \"Thread-1\", 1)\n LoginThread.uid = input(\"your uid\")\n login_thread = threading.Thread(target=login.login)\n login_thread.start()\n login.start()\n handler2 = MessageThread(3, \"Thread-3\", 1)\n handler2.start()\n handler3 = Client(4, \"Thread-4\", 1)\n handler3.start()\n","repo_name":"kelekle/simple_block_chain","sub_path":"client/subscriber.py","file_name":"subscriber.py","file_ext":"py","file_size_in_byte":6776,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"77"} +{"seq_id":"70109434490","text":"# Modelo\n# Controllar los datos\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\nclass Person: \n def __init__(self, name, email):\n self.name = name \n self.email = email \n\nclass Student(Person):\n def __init__(self, name, last_name, phone, email, type_help):\n Person.__init__(self, name, email)\n self.last_name = last_name\n self.phone = phone\n self.type_help = type_help \n\nclass Voluntary(Person):\n def __init__(self, name, type_help, email, phone, courses):\n Person.__init__(self, name, email)\n self.courses = courses\n self.phone = phone\n self.type_help = type_help\n\nclass Donation(Person):\n def __init__(self, name, email, donated_money, others_donated):\n Person.__init__(self, name, email)\n self.donated_money = donated_money\n self.others_donated = others_donated\n \nclass Model:\n FILE_STUDENTS = \"docs//students.csv\"\n FILE_VOLUNNTEERING = \"docs//volunteering.csv\"\n FILE_INSTITUTIONS = \"docs//institutions.csv\"\n FILE_DONATIONS = \"docs//donations.csv\"\n FILE_INFORMATION = \"docs//information.csv\"\n\n def __init__(self):\n self.df_students = pd.read_csv(self.FILE_STUDENTS, encoding=\"latin1\")\n self.df_volunnteering = pd.read_csv(self.FILE_VOLUNNTEERING, encoding=\"latin1\")\n self.df_donations = pd.read_csv(self.FILE_DONATIONS, encoding=\"latin1\")\n self.df_institutions = pd.read_csv(self.FILE_INSTITUTIONS, encoding=\"latin1\")\n self.df_information = pd.read_csv(self.FILE_INFORMATION, encoding=\"latin1\")\n \n # --> Métodos para agregar datos nuevos a las \"bases de datos\"\n def add_new_student(self, student : Student):\n new_registry = {\n \"nombre\" : student.name,\n \"apellido\" : student.last_name,\n \"correo\" : student.email,\n \"numero\" : student.phone,\n \"tipo_ayuda_buscada\" : student.type_help\n }\n\n self.df_students = self.df_students.append(new_registry, ignore_index=True)\n self.df_students.to_csv(self.FILE_STUDENTS, index=False)\n return \"Información guardada con éxito\"\n\n def add_new_voluntary(self, voluntary : Voluntary):\n new_registry = {\n \"nombre_voluntariado\" : voluntary.name,\n \"tipo_ayuda\" : voluntary.phone,\n \"correo\" : voluntary.email,\n \"numero_telefonico\" : voluntary.phone,\n \"cuantcursos\" : voluntary.courses\n }\n\n self.df_volunnteering = self.df_volunnteering.append(new_registry, ignore_index=True)\n self.df_volunnteering.to_csv(self.FILE_VOLUNNTEERING, index=False)\n return \"Información guardada con éxito\"\n\n def add_new_donnation(self, donation : Donation):\n new_registry = {\n \"name\" : donation.name,\n \"email\" : donation.email,\n \"donated_money\" : donation.donated_money,\n \"others_donated\" : donation.others_donated,\n }\n\n self.df_donations = self.df_donations.append(new_registry, ignore_index=True)\n self.df_donations.to_csv(self.FILE_DONATIONS, index=False)\n return \"Información guardada con éxito\"\n\n def add_count_visite(self):\n self.df_information[\"veces_estudiante_interaccion\"][0] += 1\n self.df_information.to_csv(self.FILE_INFORMATION, index=False)\n\n # --> Métodos para llevar mostrar información en específica\n def show_recomendation_complete(self, type_help):\n total_elements = self.df_institutions.shape[0]\n for i in range(total_elements):\n if self.df_institutions[\"tipobeca\"][i] == type_help or self.df_institutions[\"tipobeca\"][i] == \"Mixto\":\n print(f\"-> {self.df_institutions['nombre'][i]} - Descripcion:\\n{self.df_institutions['detalles'][i]}\\n\")\n\n def show_other_recomendations(self):\n total_elements = self.df_institutions.shape[0]\n for i in range(total_elements):\n if self.df_institutions[\"tipobeca\"][i] != \"Completa\" or self.df_institutions[\"tipobeca\"][i] != \"Parcial\":\n print(f\"-> {self.df_institutions['nombre'][i]} - Descripcion:\\n{self.df_institutions['detalles'][i]}\\n\")\n\n # --> Métodos para llevar a cabo el análisis de datos\n def get_information_general(self):\n info = f\"\"\"\n Estudiantes que ingresaron a la pagina: {self.df_information[\"veces_estudiante_interaccion\"][0]}\n Cantidad de estudiantes registrados: {self.df_students.shape[0]} \n \"\"\"\n return info\n\n def get_donations_recived(self):\n info = f\"\" \n \n money = 0\n elements = self.df_donations.shape[0]\n # Mostrar dinero donado\n for i in range(elements): money += int(self.df_donations[\"donated_money\"][i])\n info += f\"\\nDinero donado en total: {money}\\n\"\n\n # Mostrar elementos donados\n info += f\"\\nElementos donados:\\n\"\n for i in range(elements): \n donated_elems = self.df_donations[\"others_donated\"][i].split(\"+\")\n for elem in donated_elems: \n if not len(elem) == 0:\n info += f\"-> {elem}\\n\"\n\n return info\n\n def show_grafic(self, type):\n prueba = {\"Cantidad que entraron a la página\" : self.df_information[\"veces_estudiante_interaccion\"][0],\n \"Estudiantes registrados\" : self.df_students.shape[0]} if type == 1 else {\"Estudiantes\" : self.df_students.shape[0],\n \"Voluntareados\" : self.df_volunnteering.shape[0],\n \"Instituciones\" : self.df_institutions.shape[0]}\n\n dates = list(prueba.keys())\n values = list(prueba.values())\n\n plt.bar(dates, values, color = \"red\", width = 0.4)\n\n plt.title(\"Estudiantes ingresados\")\n plt.ylabel(\"Cantidad Control\")\n plt.xlabel(\"Tipo de interacción\")\n plt.show() \n","repo_name":"CrisLayB/ProyectoHIODS","sub_path":"model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":5851,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"15703393562","text":"import numpy as np\n\nn = 2\nN = 10\n\nxes = np.random.random_sample((N, n)) # N массивов по n в длину\nyes = np.random.random_sample((N,)) # Один длинный массив длины N\n\n\nprint(xes)\nprint(\"xes\")\nprint(yes)\nprint(\"yes\")\ny_mean = np.sum(yes) / N # средний от всех y \n\n# x1_mean = np.sum(xes.transpose()[0]) / N\n# x2_mean = np.sum(xes.transpose()[1]) / N\n\n# 1 относится к тому, что массив получится одномерный\nx_1_means = np.array(\n [sum(xes.transpose()[i]) / N for i in range(n)]) # считаем среднее от всех координат x по отдельности. \n\nxy_1_means = np.array(\n [np.sum(yes * xes.transpose()[i_s]) / N for i_s in range(n)]) # \n\n# 2 относится к тому, что массив получится двумерный. Получается матричка размера n на n.\nx_2_means = np.array([np.array([\n np.sum(xes.transpose()[i] * xes.transpose()[i_s]) / N\n for i in range(n)]) for i_s in range(n)]) # \n\n\ndef matrix_function(i, i_s):\n if i_s == 0:\n # мы попали в самое первое уравнение\n if i == 0:\n return 1\n else:\n return x_1_means[i - 1]\n else:\n # мы попали в более сложный случай\n if i == 0:\n return x_1_means[i_s - 1]\n else:\n return x_2_means[i_s - 1][i - 1]\n\n\ndef right_part_function(i_s):\n if i_s == 0:\n return y_mean\n else:\n return xy_1_means[i_s - 1]\n\n# n+1 поскольку у нас 1ое уравнение есть и все остальные, которых n\n\n# для нахождения a_0 и a_i нужно будет обращать main_matrix\nmain_matrix = np.array([np.array([matrix_function(i, i_s) for i in range(n + 1)]) for i_s in range(n + 1)])\n# напишем же правую часть уравнения\nright_part = np.array([right_part_function(i_s) for i_s in range(n + 1)])\n# Это строчка вида [a_0, a_1, a_2, ...]\nour_result = np.linalg.solve(main_matrix, right_part)\nprint(our_result)","repo_name":"AlexandraKalinina/vkr","sub_path":"liner.py","file_name":"liner.py","file_ext":"py","file_size_in_byte":2192,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"33427935330","text":"import os\nimport sys\nsys.path.append(\".\")\n\nimport torch\nfrom torch.utils.data import DataLoader, random_split\n\nfrom ImageAestheticsGANs.AADB.AADB import AADB_binaries\nfrom tqdm import tqdm\nfrom ImageAestheticsGANs.models.ResNet18 import RegressionNetwork\nimport torch.nn as nn\nfrom ImageAestheticsGANs.loss_functions.focal_loss import FocalLoss\nimport argparse\n\nparser = argparse.ArgumentParser(description=\"Arguments for training loop\")\nparser.add_argument('--batch_size', type=int, help=\"Number of batches\")\nparser.add_argument('--epochs', type=int, default=200, help=\"Number of epochs\")\nparser.add_argument('--image_size', type=int,default=64, help=\"Image dimensions\")\nparser.add_argument('--load', type=bool, default=False, help=\"Loading model?\")\nparser.add_argument('--lr', type=float, default=0.0002, help=\"Learning rate\")\nparser.add_argument('--ckpt', type=str, help=\"Checkpoint for loading\")\nparser.add_argument('--beta', type=float, default=0.5, help=\"Beta for Adam optimizer\")\nparser.add_argument('--optim', type=str, default='sgd', help=\"Optimizer for the algorithm (adam/sgd)\")\nparser.add_argument('--criterion', type=str, default='bcelogits', help=\"Loss function (cross/bcelogits/focal)\")\nparser.add_argument('--results', type=str, help=\"Results folder\")\nargs = parser.parse_args()\n\nbatch_size = args.batch_size\nepochs = args.epochs\nload = args.load\nckpt = args.ckpt\nlr = args.lr\nbeta = args.beta\nimage_size = args.image_size\n\ndata_path = 'F:\\Projects\\Disertatie\\ImageAestheticsGANs\\AADB'\n\naadb = AADB_binaries(data_path, image_size)\naadb_test = AADB_binaries(data_path, image_size, test=True)\nn_classes = aadb.get_num_classes()\n\nval_size = 500\ntrain_size = len(aadb) - val_size\n\ntrain_ds, val_ds = random_split(aadb, [train_size, val_size])\ntrain_dl = DataLoader(train_ds, batch_size, shuffle=True, num_workers=0, pin_memory=True, drop_last=True)\nvalid_dl = DataLoader(val_ds, batch_size, num_workers=0, pin_memory=True)\n\n\ndef get_default_device():\n '''Pick GPU if available'''\n if torch.cuda.is_available():\n return torch.device('cuda')\n else:\n return torch.device('cpu')\n\n\ndef to_device(data, device):\n '''Move tensors to chosen device'''\n if isinstance(data, (list, tuple)):\n return [to_device(x, device) for x in data]\n return data.to(device, non_blocking=True).to(torch.float32)\n\n\nclass DeviceDataLoader():\n def __init__(self, dl, device):\n self.dl = dl\n self.device = device\n\n def __iter__(self):\n for b in self.dl:\n yield to_device(b, self.device)\n\n def __len__(self):\n return len(self.dl)\n\n\ndevice = get_default_device()\ntrain_dl = DeviceDataLoader(train_dl, device)\nvalid_dl = DeviceDataLoader(valid_dl, device)\n\nif args.criterion == \"bcelogits\":\n criterion = nn.BCEWithLogitsLoss()\nelif args.criterion == \"focal\":\n criterion = FocalLoss()\nelif args.criterion == \"cross\":\n criterion = nn.CrossEntropyLoss()\n\nmodel = RegressionNetwork(backbone='resnet18', num_attributes=n_classes, pretrained=True)\nmodel = model.to('cuda')\n\nif args.optim == 'adam':\n opt = torch.optim.Adam(model.parameters(), lr=lr, betas=(beta, 0.999))\nelif args.optim == 'sgd':\n opt = torch.optim.SGD(params=model.parameters(), lr=lr, momentum=0.9)\n\nif load:\n print(\"Loading checkpoint...\")\n\n checkpoint = torch.load(ckpt)\n last_epoch = checkpoint['epoch'] + 1\n\n train_losses = checkpoint['train_losses']\n val_losses = checkpoint['val_losses']\n\n loss = train_losses[-1]\n\n model.load_state_dict(checkpoint['model'])\n opt.load_state_dict(checkpoint['optimizer'])\n model.eval()\n\nelse:\n last_epoch = 0\n\n train_losses = []\n val_losses = []\n\nfor epoch in range(last_epoch, epochs):\n\n # Training Phase\n model.train()\n\n pbar = tqdm(enumerate(train_dl), total=len(train_dl))\n for batch, (images, labels) in pbar:\n opt.zero_grad()\n\n outputs = model(images)\n loss = criterion(outputs, labels)\n loss.backward()\n\n # predicted = outputs.detach() > 0.5\n\n # correct = (predicted == labels.type(torch.uint8))\n\n # accuracy = correct.sum().item() / (len(correct) * n_classes)\n\n opt.step()\n\n # pbar.set_description(\"Epoch {}, Loss: {:.4f}, Accuracy: {:.4f}\".format(\n # epoch, float(loss), float(accuracy)))\n pbar.set_description(\"Epoch {}, Loss: {:.4f}\".format(\n epoch, float(loss)))\n train_losses.append(loss)\n\n # Evaluation Phase\n model.eval()\n correct = 0\n total = 0\n pbar = tqdm(enumerate(valid_dl), total=len(valid_dl))\n for batch, (images, labels) in pbar:\n with torch.no_grad():\n outputs = model(images)\n val_loss = loss = criterion(outputs, labels)\n\n predicted = outputs > 0.5\n\n correct += (predicted == labels.type(torch.uint8)).sum().item()\n total += len(labels) * n_classes\n\n accuracy = correct / total\n val_losses.append(val_loss)\n print('Accuracy of all test images: %.3f' % (accuracy * 100))\n if epoch % 10 == 0:\n filename = \"{}_epoch_{}_accuracy_{:.4f}_.pt\".format('AADB', epoch, accuracy)\n torch.save({'epoch': epoch,\n 'model': model.state_dict(),\n 'optimizer': opt.state_dict(),\n 'train_losses': train_losses,\n 'val_losses': val_losses\n }, os.path.join(args.results, filename))\n load = False\n","repo_name":"PetreBogdan/ImageAestheticGANs","sub_path":"ImageAestheticsGANs/resnet18_train.py","file_name":"resnet18_train.py","file_ext":"py","file_size_in_byte":5430,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"35067457979","text":"import inspect\nimport tempfile\nimport os.path\nimport urllib.request\nimport textwrap\nfrom collections import namedtuple\nimport random\nimport numpy as np\nimport uuid\nfrom typing import List\nimport yaml\nimport re\nimport os\nimport sys\n\nSPLITS = ('train', 'val', 'test')\n\n\nclass VergeMLError(Exception):\n\n def __init__(self, message, suggestion=None, help_topic=None, hint_type=None, hint_key=None):\n super().__init__(message)\n self.suggestion = suggestion\n self.message = message\n self.hint_type = hint_type\n self.hint_key = hint_key\n self.help_topic = help_topic\n \n def __str__(self):\n if self.suggestion:\n if len(self.message + self.suggestion) < 80:\n return self.message + \" \" + self.suggestion\n else:\n return self.message + \"\\n\" + self.suggestion\n else:\n return self.message\n\n\ndef wrap_text(text):\n # TODO check terminal width\n res = []\n for para in text.split(\"\\n\\n\"):\n if para.splitlines()[0].strip().endswith(\":\"):\n res.append(para)\n else:\n res.append(textwrap.fill(para, drop_whitespace=True, fix_sentence_endings=True))\n return \"\\n\\n\".join(res)\n\n\ndef print_text(text):\n print(wrap_text(text))\n\n\n_Intro = namedtuple('_Intro', ['args', 'defaults', 'types'])\n\n\ndef introspect(call):\n spec = inspect.getfullargspec(call)\n args = spec.args\n defaults = dict(zip(reversed(spec.args), reversed(spec.defaults or [])))\n types = spec.annotations\n return _Intro(args, defaults, types)\n\n\n# taken from here: https://www.python-course.eu/levenshtein_distance.php\ndef _iterative_levenshtein(s, t):\n \"\"\" \n iterative_levenshtein(s, t) -> ldist\n ldist is the Levenshtein distance between the strings \n s and t.\n For all i and j, dist[i,j] will contain the Levenshtein \n distance between the first i characters of s and the \n first j characters of t\n \"\"\"\n rows = len(s)+1\n cols = len(t)+1\n dist = [[0 for x in range(cols)] for x in range(rows)]\n # source prefixes can be transformed into empty strings \n # by deletions:\n for i in range(1, rows):\n dist[i][0] = i\n # target prefixes can be created from an empty source string\n # by inserting the characters\n for i in range(1, cols):\n dist[0][i] = i\n \n for col in range(1, cols):\n for row in range(1, rows):\n if s[row-1] == t[col-1]:\n cost = 0\n else:\n cost = 1\n dist[row][col] = min(dist[row-1][col] + 1, # deletion\n dist[row][col-1] + 1, # insertion\n dist[row-1][col-1] + cost) # substitution\n \n return dist[row][col]\n\ndef did_you_mean(candidates, value, fmt=\"'{}'\"):\n candidates = list(candidates)\n names = list(sorted(map(lambda n: (_iterative_levenshtein(value, n), n), candidates)))\n names = list(filter(lambda dn: dn[0] <= 2, names))\n return 'Did you mean ' + fmt.format(names[0][1]) + '?' if names else None\n\n\ndef dict_set_path(d, path, value):\n c = d\n path = path.split(\".\")\n for key in path[:-1]:\n c = c.setdefault( key, {} )\n c[path[-1]] = value\n\ndef dict_del_path(d, path):\n if isinstance(path, str):\n path = path.split(\".\")\n if len(path) == 1:\n del[d[path[0]]]\n else:\n p, *rest = path\n dict_del_path(d[p], rest)\n if not d[p]:\n del d[p]\n\ndef dict_has_path(d, path):\n c = d\n for p in path.split(\".\"):\n if isinstance(c, dict) and p in c:\n c = c[p]\n else:\n return False\n return True\n\n_DEFAULT = object()\ndef dict_get_path(d, path, default=_DEFAULT):\n c = d\n for p in path.split(\".\"):\n if p in c:\n c = c[p]\n elif default != _DEFAULT:\n return default\n else:\n raise KeyError(path)\n return c\n\ndef dict_merge(dict1, dict2):\n if not isinstance(dict1, dict) or not isinstance(dict2, dict):\n return dict2\n for k in dict2:\n if k in dict1:\n dict1[k] = dict_merge(dict1[k], dict2[k])\n else:\n dict1[k] = dict2[k]\n return dict1\n\ndef dict_paths(d, path=None):\n res = []\n if path:\n if not dict_has_path(d, path):\n return res\n value = dict_get_path(d, path)\n else:\n value = d\n if not isinstance(d, dict):\n return res\n def _collect_path(d, path):\n for k, v in d.items():\n npath = f\"{path}.{k}\" if path is not None else k\n if isinstance(v, dict):\n _collect_path(v, npath)\n else:\n res.append(npath)\n _collect_path(value, path)\n return res\n\n\ndef parse_ai_names(argv):\n names = []\n for part in argv:\n if re.match(\"^@[a-zA-Z0-9_-]+$\", part):\n names.append(part[1:])\n else:\n break\n rest = argv[len(names):]\n return names, rest\n\ndef parse_split(value):\n \"\"\"Decodes the split value.\n \n Returns a tuple (type, value) where type is either perc, num or dir set.\n \"\"\"\n assert isinstance(value, (int, str))\n\n if isinstance(value, int):\n return ('num', value)\n elif value.endswith(\"%\"):\n return ('perc', float(value.rstrip(\"%\").strip()))\n elif value.isdigit():\n return ('num', int(value))\n else:\n return ('dir', value)\n\ndef format_info_text(text, indent=0, width=70):\n text = text.strip(\"\\n\")\n res = []\n for line in text.splitlines():\n if line.startswith(\" \"):\n res.append(line)\n elif line.strip() == \"\":\n res.append(line)\n else:\n res.extend(textwrap.wrap(line, width=width-indent))\n if indent:\n indstr = str(' ' * indent)\n res = list(map(lambda l: indstr + l, res))\n return \"\\n\".join(res)\n","repo_name":"tspannhw/vergeml","sub_path":"vergeml/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":5902,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"77"} +{"seq_id":"35515066025","text":"\"\"\"\nThis example uses the light sensor on the CPB, located net to the picture of the eye on the board.\nOnce you have the library loaded, try shining a flashlight on your CP to watch the number of\nNeoPixels lit up increase, or try covering up the light sensor to watch the number decrease.\n\"\"\"\nimport time\nfrom adafruit_circuitplayground import cp\n\ncp.pixels.auto_write = False\ncp.pixels.brightness = 0.3\n\n\ndef scale_range(value):\n \"\"\"Scale a value from 0-320 (light range) to 0-9 (NeoPixel range, 10 total LEDs).\n Allows remapping light value to pixel position for light meter demo.\"\"\"\n return round(value / 320 * 10)\n\n\nwhile True:\n # light value remapped to pixel position\n peak = scale_range(cp.light)\n print(cp.light)\n print(int(peak))\n\n for i in range(0, 10, 1):\n if i <= peak:\n cp.pixels[i] = (0, 255, 255)\n else:\n cp.pixels[i] = (0, 0, 0)\n cp.pixels.show()\n time.sleep(0.05)\n","repo_name":"kattni/PyCon2023","sub_path":"Circuit_Playground_Bluefruit/Circuit_Playground_Bluefruit_cp_Library_Examples/cp_light_neopixels.py","file_name":"cp_light_neopixels.py","file_ext":"py","file_size_in_byte":947,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"77"} +{"seq_id":"43052922104","text":"import sys\nimport time\nfrom mongoengine import *\nimport datetime as dt\nimport json\nfrom flask_cors import CORS\nfrom flask_restful import Resource, Api, reqparse\nfrom flask import Flask\nsys.path.append(\"..\")\n\nimport utils.credentials\nfrom database.Request import Request as dbRequest\nfrom database.Station import Station as dbStation\nfrom database.Proposition import Proposition as dbProposition\nfrom database.TrainRecord import TrainRecord as dbTrainRecord\n\n\napp = Flask(__name__)\napi = Api(app)\nCORS(app)\n\n\n# Requests\n# shows a list of requests and lets you POST to add new requests in the database\nclass Requests(Resource):\n\n @staticmethod\n def get():\n \"\"\"\n Get the list of all the requests registered in the database.\n :return: A list of JSON each containing a request.\n \"\"\"\n start = time.time()\n db_requests = dbRequest.objects\n requests = json.loads(db_requests.to_json())\n for k in range(len(requests)):\n requests[k]['date'] = str((db_requests[k]['date']))\n requests[k]['destination'] = db_requests[k]['destination'].name\n requests[k]['origin'] = db_requests[k]['origin'].name\n end = time.time()\n print(\"GET /requests took \"+str(end-start)+\" s\")\n return requests, 200\n\n @staticmethod\n def post():\n \"\"\"\n Add a NEW request that will be registered in the database.\n :param: Arguments of the POST request.\n origin: Code of the origin station (i.e. FRADI) - Required\n destination: Code of the destination station (i.e. FRAFJ) - Required\n date: Date of the first request with the format %Y-%m-%d %H:%M:%S - Required\n gapTime: Number of days you want to execute the request - Default is 0\n :return: A JSON file of the request newly registered.\n \"\"\"\n\n # requests parser\n requests_parser = reqparse.RequestParser()\n requests_parser.add_argument(\n name='origin', type=str, required=True, help=\"The code of the origin station\")\n requests_parser.add_argument(name='destination', type=str, required=True,\n help=\"The code of the destination station\")\n requests_parser.add_argument(name='date', type=str, required=True,\n help=\"The date of the first request; format : '%Y-%m-%d %H:%M:%S'\")\n requests_parser.add_argument(name='gapTime', default=0, type=int,\n help=\"The number of days you want to execute the request; 0 just for once\")\n requests_args = requests_parser.parse_args()\n\n # gets the corresponding Station object\n origin_station = dbStation().get_station_by_code(\n requests_args['origin'])\n destination_station = dbStation().get_station_by_code(\n requests_args['destination'])\n\n if requests_args['gapTime'] == 0:\n unique_date = True\n else:\n unique_date = False\n\n # checks if the request already exists in the database\n request_exist = dbRequest.objects(origin=origin_station, destination=destination_station,\n uniqueDate=unique_date,\n date=dt.datetime.strptime(\n requests_args['date'], \"%Y-%m-%d %H:%M:%S\"),\n gapTime=requests_args['gapTime']).first() is not None\n\n if request_exist:\n request_id = dbRequest.objects(origin=origin_station,\n destination=destination_station,\n uniqueDate=unique_date,\n date=dt.datetime.strptime(\n requests_args['date'], \"%Y-%m-%d %H:%M:%S\"),\n gapTime=requests_args['gapTime']).first().id\n return \"The request already exists at id {}\".format(request_id), 208\n else:\n request = dbRequest(origin=origin_station,\n destination=destination_station,\n uniqueDate=unique_date,\n date=dt.datetime.strptime(\n requests_args['date'], \"%Y-%m-%d %H:%M:%S\"),\n gapTime=requests_args['gapTime'])\n request.save()\n\n request = json.loads(request.to_json())\n request['date'] = requests_args['date']\n request['destination'] = destination_station.name\n request['origin'] = origin_station.name\n return request, 201\n\n\napi.add_resource(Requests, '/requests')\n\n\n# Request\n# shows a single request item and lets you PUT or DELETE a request item in the database\nclass Request(Resource):\n\n @staticmethod\n def get(request_id):\n \"\"\"\n Get a single request registered in the database.\n :param: request_id: Id of the request to get\n :return: A JSON file of the request.\n \"\"\"\n if dbRequest.objects(id=request_id).first() is not None:\n db_request = dbRequest.objects(id=request_id).first()\n request = json.loads(db_request.to_json())\n request['date'] = str((db_request['date']))\n request['destination'] = db_request['destination'].name\n request['origin'] = db_request['origin'].name\n return request, 200\n else:\n return \"Request not found at this id {}\".format(request_id), 404\n\n @staticmethod\n def delete(request_id):\n \"\"\"\n Delete from the database a single request registered in the database.\n :param: request_id: Id of the request to delete\n :return: 204.\n \"\"\"\n dbRequest.objects(id=request_id).delete()\n return \"\", 204\n\n @staticmethod\n def put(request_id):\n \"\"\"\n Update a request that is registered in the database.\n :param: Arguments of the PUT request.\n origin: Code of the origin station (i.e. FRADI) - Default is the request's one\n destination: Code of the destination station (i.e. FRAFJ) - Default is the request's one\n date: Date of the first request with the format %Y-%m-%d %H:%M:%S - Default is the request's one\n gapTime: Number of days you want to execute the request - Default is the request's one\n :return: A JSON file of the request newly updated.\n \"\"\"\n\n # request parser\n request_parser = reqparse.RequestParser()\n request_parser.add_argument(\n name='origin', type=str, help=\"The code of the origin station\")\n request_parser.add_argument(\n name='destination', type=str, help=\"The code of the destination station\")\n request_parser.add_argument(name='date', type=str,\n help=\"The date of the first request; format : '%Y-%m-%d %H:%M:%S'\")\n request_parser.add_argument(name='gapTime', type=int,\n help=\"The number of days you want to execute the request; 0 just for once\")\n request_args = request_parser.parse_args()\n\n request = dbRequest.objects(id=request_id).first()\n\n # get default values\n if request_args['origin'] is None:\n request_args['origin'] = request.origin.code\n if request_args['destination'] is None:\n request_args['destination'] = request.destination.code\n if request_args['date'] is None:\n request_args['date'] = dt.datetime.strftime(\n request.date, \"%Y-%m-%d %H:%M:%S\")\n if request_args['gapTime'] is None:\n request_args['gapTime'] = request.gapTime\n\n if request_args['gapTime'] == 0:\n unique_date = True\n else:\n unique_date = False\n\n origin_station = dbStation().get_station_by_code(\n request_args['origin'])\n destination_station = dbStation().get_station_by_code(\n request_args['destination'])\n\n dbRequest.objects(id=request_id).update_one(set__origin=origin_station,\n set__destination=destination_station, set__uniqueDate=unique_date,\n set__date=dt.datetime.strptime(request_args['date'],\n \"%Y-%m-%d %H:%M:%S\"),\n set__gapTime=request_args['gapTime'])\n\n db_request = dbRequest.objects(id=request_id).first()\n request = json.loads(db_request.to_json())\n request['date'] = str((db_request['date']))\n request['destination'] = db_request['destination'].name\n request['origin'] = db_request['origin'].name\n return request, 200\n\n\napi.add_resource(Request, '/requests/')\n\n\n# Stations\n# shows a list of stations and lets you POST to add new stations in the database\nclass Stations(Resource):\n @staticmethod\n def get():\n \"\"\"\n Get the list of all the stations registered in the database.\n :return: A list of JSON each containing a station.\n \"\"\"\n\n # stations parser\n stations_parser = reqparse.RequestParser()\n stations_parser.add_argument(\n name='name', type=str, help=\"The name of the station\")\n stations_args = stations_parser.parse_args()\n\n start = time.time()\n if stations_args['name'] is not None:\n stations = json.loads(dbStation.search_station(stations_args['name']).order_by(\"name\").to_json())\n else:\n stations = json.loads(dbStation.objects.order_by(\"name\").to_json())\n end = time.time()\n print(\"GET /stations took \"+str(end-start)+\" s\")\n return stations, 200\n\n @staticmethod\n def post():\n \"\"\"\n Add a NEW station that will be registered in the database.\n :param: Arguments of the POST request.\n code: Code of the station (i.e. FRAFJ) - Required\n name: Name of the station - Required\n :return: A JSON file of the station newly registered.\n \"\"\"\n\n # stations parser\n stations_parser = reqparse.RequestParser()\n stations_parser.add_argument(\n name='code', type=str, required=True, help=\"The code of the station (i.e. FRAFJ)\")\n stations_parser.add_argument(\n name='name', type=str, required=True, help=\"The name of the station\")\n stations_args = stations_parser.parse_args()\n\n # checks if the station already exists in the database\n station_exist = dbStation.objects(\n code=stations_args['code'], name=stations_args['name']).first() is not None\n\n if station_exist:\n station_id = dbStation.objects(\n code=stations_args['code'], name=stations_args['name']).first().id\n return \"The station already exists at id {}\".format(station_id), 208\n else:\n station = dbStation(\n code=stations_args['code'], name=stations_args['name'])\n station.save()\n return json.loads(station.to_json()), 201\n\n\napi.add_resource(Stations, '/stations')\n\n\n# Station\n# shows a single station item and lets you PUT or DELETE a station item in the database\nclass Station(Resource):\n\n @staticmethod\n def get(station_id):\n \"\"\"\n Get a single station registered in the database.\n :param: station_id: Id of the station to get\n :return: A JSON file of the station.\n \"\"\"\n if dbStation.objects(id=station_id).first() is not None:\n return json.loads(dbStation.objects(id=station_id).first().to_json()), 200\n else:\n return \"Station not found at this id {}\".format(station_id), 404\n\n @staticmethod\n def delete(station_id):\n \"\"\"\n Delete from the database a single station registered in the database.\n :param: request_id: Id of the station to delete\n :return: 204.\n \"\"\"\n dbStation.objects(id=station_id).delete()\n return \"\", 204\n\n @staticmethod\n def put(station_id):\n \"\"\"\n Update a station that is registered in the database.\n :param: code: Code of the origin station (i.e. FRADI) - Default is the station's one\n name: Code of the destination station (i.e. FRAFJ) - Default is the station's one\n :return: A JSON file of the station newly updated.\n \"\"\"\n\n # station parser\n station_parser = reqparse.RequestParser()\n station_parser.add_argument(\n name='code', type=str, help=\"The code of the station (i.e. FRAFJ)\")\n station_parser.add_argument(\n name='name', type=str, help=\"The name of the station\")\n station_args = station_parser.parse_args()\n\n station = dbStation.objects(id=station_id).first()\n\n # get default values\n if station_args['code'] is None:\n station_args['code'] = station.code\n if station_args['name'] is None:\n station_args['name'] = station.name\n\n dbStation.objects(id=station_id).update_one(\n set__code=station_args['code'], set__name=station_args['name'])\n return json.loads(dbStation.objects(id=station_id).first().to_json()), 200\n\n\napi.add_resource(Station, '/stations/')\n\n\n# Propositions\n# shows a list of propositions and lets you POST to add new propositions in the database\nclass Propositions(Resource):\n @staticmethod\n def get():\n \"\"\"\n Get the list of all the propositions registered in the database.\n :return: A list of JSON each containing a proposition.\n \"\"\"\n return json.loads(dbProposition.objects.to_json()), 200\n\n # @staticmethod\n # def post():\n # \"\"\"\n # Add a NEW proposition that will be registered in the database.\n # :param: Arguments of the POST request.\n # amount: Price of the proposition - Required\n # remainingSeat: Number of remaining seats for the proposition - Required\n # :return: A JSON file of the proposition newly registered.\n # \"\"\"\n\n # # propositions parser\n # propositions_parser = reqparse.RequestParser()\n # propositions_parser.add_argument(name='amount', type=int, required=True, help=\"The price of the proposition\")\n # propositions_parser.add_argument(name='remainingSeat', type=float, required=True,\n # help=\"The number of remaining seats for the proposition\")\n # propositions_args = propositions_parser.parse_args()\n\n # # checks if the proposition already exists in the database\n # proposition_exist = dbProposition.objects(amount=propositions_args['amount'],\n # remainingSeat=propositions_args['remainingSeat']).first() is not None\n # print(proposition_exist, dbProposition.objects(amount=propositions_args['amount'],\n # remainingSeat=propositions_args['remainingSeat']))\n # if proposition_exist:\n # proposition_id = dbProposition.objects(amount=propositions_args['amount'],\n # remainingSeat=propositions_args['remainingSeat']).first().id\n # return \"The proposition already exists at id {}\".format(proposition_id), 208\n # else:\n # proposition = dbProposition(amount=propositions_args['amount'],\n # remainingSeat=propositions_args['remainingSeat'])\n # proposition.save()\n # return json.loads(proposition.to_json()), 201\n\n\napi.add_resource(Propositions, '/propositions')\n\n\n# Proposition\n# shows a single proposition item and lets you PUT or DELETE a proposition item in the database\nclass Proposition(Resource):\n\n @staticmethod\n def get(proposition_id):\n \"\"\"\n Get a single proposition registered in the database.\n :param: proposition_id: Id of the proposition to get\n :return: A JSON file of the proposition.\n \"\"\"\n if dbProposition.objects(id=proposition_id).first() is not None:\n return json.loads(dbProposition.objects(id=proposition_id).first().to_json()), 200\n else:\n return \"Proposition not found at this id {}\".format(proposition_id), 404\n\n @staticmethod\n def delete(proposition_id):\n \"\"\"\n Delete from the database a single proposition registered in the database.\n :param: proposition_id: Id of the proposition to delete\n :return: 204.\n \"\"\"\n dbProposition.objects(id=proposition_id).delete()\n return \"\", 204\n\n # @staticmethod\n # def put(proposition_id):\n # \"\"\"\n # Update a proposition that is registered in the database.\n # :param: amount: Price of the proposition - Default is the proposition's one\n # remainingSeat: Number of remaining seats for the proposition - Default is the proposition's one\n # :return: A JSON file of the proposition newly updated.\n # \"\"\"\n\n # # proposition parser\n # proposition_parser = reqparse.RequestParser()\n # proposition_parser.add_argument(name='amount', type=int, help=\"The price of the proposition\")\n # proposition_parser.add_argument(name='remainingSeat', type=float,\n # help=\"The number of remaining seats for the proposition\")\n # proposition_args = proposition_parser.parse_args()\n\n # proposition = dbProposition.objects(id=proposition_id).first()\n\n # # get default values\n # if proposition_args['amount'] is None:\n # proposition_args['amount'] = proposition.amount\n # if proposition_args['remainingSeat'] is None:\n # proposition_args['remainingSeat'] = proposition.remainingSeat\n\n # dbProposition.objects(id=proposition_id).update_one(set__amount=proposition_args['amount'],\n # set__remainingSeat=proposition_args['remainingSeat'])\n # return json.loads(dbProposition.objects(id=proposition_id).first().to_json()), 200\n\n\napi.add_resource(Proposition, '/propositions/')\n\n\n# TrainRecords\n# shows a list of train records in the database\nclass TrainRecords(Resource):\n @staticmethod\n def get():\n \"\"\"\n Get the list of all the train records registered in the database by page.\n :param: page: The page you want to get at, 0 to get all pages. Each page contains 3 train records. - Default is 1\n :return: A list of JSON each containing a train record.\n \"\"\"\n start = time.time()\n # trainrecords parser\n trainrecords_parser = reqparse.RequestParser()\n trainrecords_parser.add_argument(\n name='page', type=int, default=1, help=\"The page you want to get at, 0 to get all pages. Each page contains 3 train records. - Default is 1\")\n trainrecords_args = trainrecords_parser.parse_args()\n\n page_id = trainrecords_args['page']\n offset = 3\n \n if not(page_id):\n db_trainrecords = dbTrainRecord.objects.order_by('departureTime')\n else:\n db_trainrecords = dbTrainRecord.objects.order_by('departureTime')[(page_id-1)*offset:page_id*offset]\n trainrecords = json.loads(db_trainrecords.to_json())\n\n for k in range(len(trainrecords)):\n #step1 = time.time()\n #trainrecords[k]['recordedTime'] = str((db_trainrecords[k]['recordedTime']).isoformat())\n #step2 = time.time()\n #print(\"step 1 took {} s\".format(step2-step1))\n trainrecords[k]['arrivalTime'] = str(\n (db_trainrecords[k].arrivalTime.isoformat()))\n #step3 = time.time()\n #print(\"step 2 took {} s\".format(step3-step2))\n trainrecords[k]['departureTime'] = str(\n (db_trainrecords[k].departureTime.isoformat()))\n #step4 = time.time()\n #print(\"step 3 took {} s\".format(step4-step3))\n trainrecords[k]['propositions'] = []\n for db_propositions in db_trainrecords[k].propositions:\n content = {}\n for db_proposition in db_propositions.content:\n content[db_proposition.type] = {\n 'amount': db_proposition.amount, 'seats': db_proposition.remainingSeat}\n trainrecords[k]['propositions'].append(\n {'recordedTime': db_propositions.recordedTime.isoformat(), 'content': content})\n #step5 = time.time()\n #print(\"step 4 took {} s\".format(step5-step4))\n trainrecords[k]['destination'] = db_trainrecords[k].destination.name\n trainrecords[k]['origin'] = db_trainrecords[k].origin.name\n #print(\"One trainrecord took {} s\".format(step5-step1))\n\n end = time.time()\n print(\"GET /trainrecords took \"+str(end-start)+\" s\")\n return trainrecords, 200\n\n\napi.add_resource(TrainRecords, '/trainrecords')\n\n# class TrainRecordsPages(Resource):\n# @staticmethod\n# def get(page_id):\n# \"\"\"\n# Get the list of all the train records registered in the database by page.\n# :param: page_id: The page you want to get at, 0 to get all pages. Each page contains 3 train records. - Default is 1\n# :return: A list of JSON each containing a train record.\n# \"\"\"\n# start = time.time()\n# page_id=int(page_id)\n# offset = 3\n# if not(page_id):\n# db_trainrecords = dbTrainRecord.objects.order_by('departureTime')\n# else:\n# db_trainrecords = dbTrainRecord.objects.order_by('departureTime')[(page_id-1)*offset:page_id*offset]\n# trainrecords = json.loads(db_trainrecords.to_json())\n# for k in range(len(trainrecords)):\n# #step1 = time.time()\n# #trainrecords[k]['recordedTime'] = str((db_trainrecords[k]['recordedTime']).isoformat())\n# #step2 = time.time()\n# #print(\"step 1 took {} s\".format(step2-step1))\n# trainrecords[k]['arrivalTime'] = str(\n# (db_trainrecords[k].arrivalTime.isoformat()))\n# #step3 = time.time()\n# #print(\"step 2 took {} s\".format(step3-step2))\n# trainrecords[k]['departureTime'] = str(\n# (db_trainrecords[k].departureTime.isoformat()))\n# #step4 = time.time()\n# #print(\"step 3 took {} s\".format(step4-step3))\n# trainrecords[k]['propositions'] = []\n# for db_propositions in db_trainrecords[k].propositions:\n# content = {}\n# for db_proposition in db_propositions.content:\n# content[db_proposition.type] = {\n# 'amount': db_proposition.amount, 'seats': db_proposition.remainingSeat}\n# trainrecords[k]['propositions'].append(\n# {'recordedTime': db_propositions.recordedTime.isoformat(), 'content': content})\n# #step5 = time.time()\n# #print(\"step 4 took {} s\".format(step5-step4))\n# trainrecords[k]['destination'] = db_trainrecords[k].destination.name\n# trainrecords[k]['origin'] = db_trainrecords[k].origin.name\n# #print(\"One trainrecord took {} s\".format(step5-step1))\n\n# end = time.time()\n# print(\"GET /trainrecords/pages/\"+str(page_id)+\" took \"+str(end-start)+\" s\")\n# return trainrecords, 200\n\n\n# api.add_resource(TrainRecordsPages, '/trainrecords/pages/')\n\n\n# TrainRecord\n# shows a single train record item and lets you DELETE a train record item in the database\nclass TrainRecord(Resource):\n\n @staticmethod\n def get(trainrecord_id):\n \"\"\"\n Get a single train record registered in the database.\n :param: trainrecord_id: Id of the train record to get\n :return: A JSON file of the train record.\n \"\"\"\n if dbTrainRecord.objects(id=trainrecord_id).first() is not None:\n db_trainrecord = dbTrainRecord.objects(id=trainrecord_id).first()\n trainrecord = json.loads(db_trainrecord.to_json())\n #trainrecord['recordedTime'] = str((db_trainrecord['recordedTime']).isoformat())\n trainrecord['arrivalTime'] = str(\n (db_trainrecord['arrivalTime']).isoformat())\n trainrecord['departureTime'] = str(\n (db_trainrecord['departureTime']).isoformat())\n trainrecord['propositions'] = []\n for db_propositions in db_trainrecord.propositions:\n content = {}\n for db_proposition in db_propositions.content:\n content[db_proposition.type] = {\n 'amount': db_proposition.amount, 'seats': db_proposition.remainingSeat}\n trainrecord['propositions'].append(\n {'recordedTime': db_propositions.recordedTime.isoformat(), 'content': content})\n trainrecord['origin'] = db_trainrecord['origin'].name\n return trainrecord, 200\n else:\n return \"Train record not found at this id {}\".format(trainrecord_id), 404\n\n @staticmethod\n def delete(trainrecord_id):\n \"\"\"\n Delete from the database a single train record registered in the database.\n :param: trainrecord_id: Id of the train record to delete\n :return: 204.\n \"\"\"\n dbTrainRecord.objects(id=trainrecord_id).delete()\n return \"\", 204\n\n\napi.add_resource(TrainRecord, '/trainrecords/')\n\n\nif __name__ == '__main__':\n app.run(port='8080', debug=True)\n","repo_name":"Kornflex28/train-tracker","sub_path":"webserver/webserver.py","file_name":"webserver.py","file_ext":"py","file_size_in_byte":25924,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"17201185563","text":"import numpy as np\n\ndef transform_to_text(pred_start, pred_end, text, offset, sentiment):\n\n def decode(pred_start, pred_end, text, offset):\n decoded_text = \"\"\n for i in range(pred_start, pred_end+1):\n decoded_text += text[offset[i][0]:offset[i][1]]\n if (i+1) < len(offset) and offset[i][1] < offset[i+1][0]:\n decoded_text += \" \"\n return decoded_text\n\n decoded_predictions = []\n for i in range(len(text)):\n # if sentiment[i] == \"neutral\" or len(text[i].split()) < 2:\n # decoded_text = text[i]\n # else:\n idx_start = np.argmax(pred_start[i])\n # idx_end = np.argmax(pred_end[i])\n candidates_end = np.argsort(pred_end[i])[::-1]\n\n j = 0\n while 1:\n idx_end = candidates_end[j]\n if idx_start <= idx_end:\n break\n j += 1\n\n decoded_text = str(decode(idx_start, idx_end, text[i], offset[i]))\n # if len(decoded_text) == 0:\n # decoded_text = text[i]\n decoded_predictions.append(decoded_text)\n\n return decoded_predictions\n\n\ndef compute_jaccard(selected_text, selected_text_pred):\n\n def jaccard(str1, str2):\n a = set(str1.lower().split())\n b = set(str2.lower().split())\n c = a.intersection(b)\n return float(len(c)) / (len(a) + len(b) - len(c))\n\n jaccard_mean = 0.\n for i in range(len(selected_text)):\n jaccard_mean += jaccard(selected_text[i], selected_text_pred[i])\n return jaccard_mean / len(selected_text)\n","repo_name":"akensert/kaggle-tweet-sentiment-extraction","sub_path":"src/tweet-sentiment-extraction/common/prediction_utils.py","file_name":"prediction_utils.py","file_ext":"py","file_size_in_byte":1550,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"3368654006","text":"\n# -*- coding: utf-8 -*-\nfrom locale import *\nimport sys,os\n\nproject_dir = '../tr/tr/'\n\nsys.path.append(project_dir)\nos.environ['DJANGO_SETTINGS_MODULE'] = 'settings'\nimport django\ndjango.setup()\n\nimport soundcloud\nfrom music.models import *\nfrom datetime import datetime, date, time\n\n\nclient = soundcloud.Client(client_id='dce5652caa1b66331903493735ddd64d')\npage_size = 200\ngenres_list = SoundGenres.objects.values('name')\ngenres_list_names = [name['name'] for name in genres_list]\n\nп_rus_list_1 = [\n\"П. Луспекаев\",\n\"Павел Артемьев И Ирина Тонева\",\n\"Павел Балтийский\",\n\"Павел Беккерман\",\n\"Павел Вишерский\",\n\"Павел Воробьёв\",\n\"Павел Данилов\",\n\"Павел Кашин\",\n\"Павел Козлов\",\n\"Павел Красношлык\",\n\"Павел Мах\",\n\"Павел Михайлов\",\n\"Павел Мурашов\",\n\"Павел Нарочанский\",\n\"Павел Павлецов\",\n\"Павел Пиковский и Группа Хьюго\",\n\"Павел Родни\",\n\"Павел Соколов\",\n\"Павел Фёдоров\",\n\"Павел Федоров (Paulo)\",\n\"Павел Филатов\",\n\"Павел Филатов & Настя\",\n\"Павел Филатов и группа Вне Зоны\",\n\"Павел Чумаков\",\n\"Павел Шевцов\",\n\"Павел Шубин и Андрей Якиманский\",\n\"Павла и Денис Ковальский\",\n\"Павлентий Чернов\",\n\"Павло Табаков\",\n\"Пальчики Оближешь\",\n\"Пан and Dino MC 47\",\n\"Панакота\",\n\"Панда feat. tompSON\",\n\"Паола\",\n\"Папины Дети\",\n\"Пара нормальных\",\n\"Пара Совпала\",\n\"Параллельные\",\n\"ПараТайн\",\n\"Парень Из Союза\",\n\"Пари\",\n\"Парк Удовольствий\",\n\"Паскаль\",\n\"ПатриотЪ\",\n\"Паук feat. Togga & Kvadrat\",\n\"Паулина Андреева feat. Баста\",\n\"Пацанка\",\n\"Пацаны\",\n\"Пачуля\",\n\"Паша Proorok\",\n\"Паша Вайти\",\n\"Паша Захарчук\",\n\"Паша Климат feat. Сюзанна Абдулла\",\n\"Паша Ли\",\n\"Паша Люмин и Даша Шувалова\",\n\"Паша Мос\",\n\"Паша Панамо\",\n\"Паша Руденко\",\n\"Паша Сли\",\n\"Паша Цветомузыка\",\n\"Паша Юдин\",\n\"Пающие Трусы\",\n\"Певица Афродита\",\n\"Пелих Ангелина\",\n\"Пепел Роза\",\n\"Первая Zаповедь & Савва Тихий\",\n\"Первая Zаповедь, Ahimas & Чак (M.Family)\",\n\"Первый Контакт\",\n\"Первый поворот\",\n\"Песняры\",\n\"Петкун, Голубев, Макарский\",\n\"Петлюра\",\n\"Петр Гара\",\n\"Пётр Дранга\",\n\"Петр Елфимов\",]\n\nп_rus_list_2 = [\n\"Петр Ильич Чайковский\",\n\"Пётр Казаков\",\n\"Петр Лещенко\",\n\"Петр Налич\",\n\"Петр Сергеев\",\n\"Петя Черный\",\n\"ПЗЖЕ feat. Рыбос\",\n\"Пиджаков\",\n\"Пикник\",\n\"Пилот\",\n\"Пиноккио\",\n\"Пионерский Хор Им. В.У. Попова\",\n\"Питер Пэн\",\n\"Пицца\",\n\"Пламя\",\n\"Планета 90\",\n\"Планка\",\n\"Пласти��а\",\n\"Платина\",\n\"Плохиш\",\n\"По Ту Сторону\",\n\"По Фрейду\",\n\"Под Одним Небом\",\n\"ПодZемка\",\n\"Подиум\",\n\"Поднимаем Руки Вверх\",\n\"Подпольная Траектория feat. Ahimas\",\n\"Подруги\",\n\"Подстрелов\",\n\"Подъём!\",\n\"Позитив and Напильник\",\n\"Покахонтас\",\n\"Полежаев\",\n\"Полиграф ШарикOFF\",\n\"Полина Богатикова\",\n\"Полина Богусевич\",\n\"Полина Буторина feat. DJ Groove\",\n\"Полина Гагарина\",\n\"Полина Гриффис\",\n\"Полина Зизак\",\n\"Полина Кузовкова (Pollykuu)\",\n\"Полина Ростова\",\n\"Полина Смолова\",\n\"Полина Сокольская\",\n\"Полина Соя\",\n\"Полнолуние\",\n\"Положительный Заряд\",\n\"Полтергейст\",\n\"Полумягкие\",\n\"Полюса\",\n\"Попанбэнд\",\n\"ПопКорн\",\n\"После 11\",\n\"После Вчерашнего\",\n\"Потапов Владимир\",\n\"Потемковский\",\n\"Потехин Бэнд\",\n\"Потехин, Трэк и Блюз\",\n\"Поющие вместе\",\n\"Поющие трусы\",\n\"Президент И Амазонка\",\n\"Премьер-Министр\",\n\"Приключения Мишек Гамми\",\n\"Приключения Спин И Марти\",\n\"Приключения Тигрули\",\n\"Приключения Флика\",\n\"Принцесса Авенью\",\n\"Принцесса И Лягушка\",\n\"Принцип (ZM)\",\n\"Провинция 42 feat. Bizaro\",\n\"Прогульщики\",\n\"Продавцы Новостей\",\n\"Проект Димac\",\n\"Проект Жить\",\n\"Проект Увечье\",]\n\nп_rus_list_3 = [\n\"Проект-22\",\n\"Прокофьев\",\n\"ПромЗона\",\n\"Пропаганда\",\n\"Пропорции\",\n\"Проспект 64\",\n\"Против Правил\",\n\"Профессор Лебединский\",\n\"Профилактика\",\n\"Профсоюзный Ансамбль Песни И Пляски\",\n\"Прохор Шаляпин\",\n\"Психо\",\n\"Птаха\",\n\"Пугачева Алла\",\n\"Пульсы\",\n\"Пуля\",\n\"Путевка В Жизнь\",\n\"Пушкашу & Випи\",\n\"Пыльца\",\n\"Пьер Нарцисс\",\n\"Пьера\",\n\"Пэссо\",\n\"Пятилетка\",\n\"Пятница 13-е\",\n]\n\nlitera = SoundSymbol.objects.get(name=\"П\")\n\ncount = 0\n\nfor tag in п_rus_list_1:\n tracks = client.get('/tracks', q=tag, limit=page_size, linked_partitioning=1)\n if tracks:\n for track in tracks.collection:\n created_at = track.created_at\n created_at = datetime.strptime('Jun 1 2005 1:33PM', '%b %d %Y %I:%M%p')\n if track.description:\n description = track.description[:500]\n else:\n description=None\n try:\n Music.objects.get(id=track.id)\n except:\n if track.genre and track.release_year and track.duration > 90000 and track.genre in genres_list_names:\n try:\n self_tag = SoundTags.objects.get(name=tag, symbol=litera)\n except:\n self_tag = SoundTags.objects.create(name=tag, symbol=litera)\n genre =SoundGenres.objects.get(name=track.genre.replace(\"'\", '') )\n new_track = Music.objects.create(id=track.id, tag=self_tag, artwork_url=track.artwork_url, created_at=created_at, duration=track.duration, genre=genre, description=description, title=track.title, uri=track.uri, release_year=track.release_year)\n count = count + 1\n while tracks.next_href != None and count < 2000:\n tracks = client.get(tracks.next_href, limit=page_size, linked_partitioning=1)\n for track in tracks.collection:\n created_at = track.created_at\n created_at = datetime.strptime('Jun 1 2005 1:33PM', '%b %d %Y %I:%M%p')\n if track.description:\n description = track.description[:500]\n else:\n description=None\n try:\n Music.objects.get(id=track.id)\n except:\n if track.genre and track.release_year and track.duration > 90000 and track.genre in genres_list_names:\n try:\n self_tag = SoundTags.objects.get(name=tag, symbol=litera)\n except:\n self_tag = SoundTags.objects.create(name=tag, symbol=litera)\n genre =SoundGenres.objects.get(name=track.genre.replace(\"'\", '') )\n new_track = Music.objects.create(id=track.id, tag=self_tag, artwork_url=track.artwork_url, created_at=created_at, duration=track.duration, genre=genre, description=description, title=track.title, uri=track.uri, release_year=track.release_year)\n count = count + 1\n","repo_name":"interesnij/django-social-network","sub_path":"common/parsing_soundcloud/rus/parsing_п_rus.py","file_name":"parsing_п_rus.py","file_ext":"py","file_size_in_byte":8284,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"19433842524","text":"from rest_framework.views import APIView, Request, Response\nfrom rest_framework.permissions import IsAuthenticated\nfrom .models import Movie\nfrom .serializers import MovieSerializer, MovieOrderSerializer\nfrom rest_framework.pagination import PageNumberPagination\n\n\nclass MoviesView(APIView, PageNumberPagination):\n def get(self, request: Request) -> Response:\n movies = Movie.objects.all().order_by(\"id\")\n\n result_page = self.paginate_queryset(movies, request, view=self)\n\n serializer = MovieSerializer(result_page, many=True)\n\n return self.get_paginated_response(serializer.data)\n\n def post(self, request: Request) -> Response:\n data = request.data\n\n serializer = MovieSerializer(data=data)\n\n if not serializer.is_valid():\n return Response(serializer.errors, 400)\n\n serializer.save(user=request.user)\n\n return Response(serializer.data, 201)\n\n\nclass MoviesIdView(APIView):\n def delete(self, request: Request, movie_id: int) -> Response:\n try:\n movie = Movie.objects.get(id=movie_id)\n movie.delete()\n return Response(status=204)\n\n except Movie.DoesNotExist:\n return Response({\"detail\": \"Not found\"}, 404)\n\n\nclass MovieOrderView(APIView):\n permission_classes = [IsAuthenticated]\n\n def post(self, request: Request, movie_id: int) -> Response:\n try:\n data = request.data\n movie = Movie.objects.get(pk=movie_id)\n serializer = MovieOrderSerializer(data=data)\n\n if not serializer.is_valid():\n return Response(serializer.errors, 400)\n\n serializer.save(buyed_by=request.user, movie=movie)\n\n return Response(serializer.data, 201)\n\n except Movie.DoesNotExist:\n return Response({\"detail\": \"Not found\"})\n","repo_name":"micaias-silva/kenzie-buster-micaias-silva","sub_path":"movies/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1844,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"6002225984","text":"from collections import defaultdict\n\n\ndef first_recurrent(string):\n \"\"\"\n This problem was asked by Google.\n\n Given a string, return the first recurring character in it, or null if there is no recurring character.\n\n For example, given the string \"acbbac\", return \"b\". Given the string \"abcdef\", return null.\n :return:\n \"\"\"\n count = defaultdict(int)\n for char in string:\n if count[char]:\n return char\n else:\n count[char] += 1\n return \"Null\"\n\n\nif __name__ == \"__main__\":\n print(first_recurrent(\"acbbac\"))","repo_name":"Michael-Mbajwa/coding_challenges","sub_path":"first_recurrent.py","file_name":"first_recurrent.py","file_ext":"py","file_size_in_byte":567,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"27862292979","text":"class Car:\n\n def __init__(self):\n self.plate_num = ''\n self.digit = 0\n self.date = 0\n self.day_week = 0\n self.name = ''\n self.hour = 0\n self.m = 0\n self.restriction_time = ''\n self.car_move = ''\n\n def get_plate_validation(self):\n import re\n valid_plate = False\n while not valid_plate:\n try:\n plate_let, self.plate_num = input(\"Plate number in the format AAA-1234: \").split('-')\n\n if plate_let == [] or self.plate_num == []:\n print(\"Incorrect format! empty\")\n elif 3 < len(plate_let) or len(plate_let) < 3 or not re.match(\"^[A-Z]*$\", plate_let):\n print(\"Incorrect format! three uppercase letters\")\n elif 4 < len(self.plate_num) or len(self.plate_num) < 3 or not re.match(\"^[0-9]*$\", self.plate_num):\n print(\"Incorrect format! three or four numbers\")\n else:\n valid_plate = True\n except ValueError:\n print(\"Incorrect format! AAA-1234\")\n\n self.digit = int(self.plate_num[-1])\n\n return self.digit\n\n def get_date_validation(self):\n import datetime\n valid_date = False\n while not valid_date:\n date_input = input(\"Date in the format dd/mm/yy: \")\n try:\n self.date = datetime.datetime.strptime(date_input, \"%d/%m/%y\")\n valid_date = True\n except ValueError:\n print(\"Incorrect format! dd/mm/yy\")\n\n self.day_week = self.date.weekday()\n day_names = {'Monday': 0, 'Tuesday': 1, 'Wednesday': 2, 'Thursday': 3, 'Friday': 4}\n list_items = day_names.items()\n for item in list_items:\n if item[1] == self.day_week:\n self.name = item[0]\n\n return self.day_week, self.name\n\n def get_time_validation(self):\n import datetime\n import time\n valid_time = False\n while not valid_time:\n time_input = input(\"Time in the format hour,min: \")\n try:\n time1 = time.strptime(time_input, '%H,%M')\n valid_time = True\n except ValueError:\n print(\"Incorrect format! hour,min\")\n\n time1 = time.strftime('%H:%M', time1)\n self.hour, self.m = time1.split(':')\n\n time_in = datetime.time(int(self.hour), int(self.m))\n if (datetime.time(7, 0) <= time_in <= datetime.time(9, 30)) or (\n datetime.time(16, 0) <= time_in <= datetime.time(19, 30)):\n self.restriction_time = 1\n else:\n self.restriction_time = 0\n\n return self.restriction_time\n\n def transit(self, digit_plate, day_week, restriction_time):\n\n if (digit_plate == 1 or digit_plate == 2) and day_week == 0: # Monday\n move = 0\n elif (digit_plate == 3 or digit_plate == 4) and day_week == 1: # Tuesday\n move = 0\n elif (digit_plate == 5 or digit_plate == 6) and day_week == 2: # Wednesday\n move = 0\n elif (digit_plate == 7 or digit_plate == 8) and day_week == 3: # Thursday\n move = 0\n elif (digit_plate == 9 or digit_plate == 0) and day_week == 4: # Friday\n move = 0\n else:\n move = 1\n\n if move == 0 and restriction_time == 1:\n self.car_move = \"cannot transit\"\n else:\n self.car_move = \"can transit\"\n\n return self.car_move\n\n def run_inspection(self):\n\n plate_dig = self.get_plate_validation()\n day_num, day_name = self.get_date_validation()\n time_restriction = self.get_time_validation()\n\n move_car = self.transit(plate_dig, day_num, time_restriction)\n\n print(\"Last Digit: \" + str(plate_dig) + \" in \" + day_name + \" at \" + str(self.hour) + \":\" + str(\n self.m) + \" ==> \" + str(move_car))\n\n\nif __name__ == '__main__':\n\n import datetime\n import time\n import re\n my_car = Car()\n\n print(\"PICO Y PLACA PREDICTOR\")\n\n while True:\n\n my_car.run_inspection()\n\n","repo_name":"siCaet/PicoyPlacaProject","sub_path":"car_restrictions.py","file_name":"car_restrictions.py","file_ext":"py","file_size_in_byte":4125,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"25967147636","text":"# 字母rot13,数字rot5\n\n# ROT5、13、18 解密\n\nimport string\n\n# 引入string的定义字符串\nascii_lowercase = string.ascii_lowercase # 小写字符串\nascii_uppercase = string.ascii_uppercase # 大写字符串\ndigits = string.digits\n\n# rot-18\n# ROT18:这是一个异类,本来没有,它是将ROT5和ROT13组合在一起,为了好称呼,将其命名为ROT18。\n\n# rot-5\n# ROT5:只对数字进行编码,用当前数字往前数的第5个数字替换当前数字,例如当前为0,编码后变成5,当前为1,编码后变成6,以此类推顺序循环。\ndigits_dict = {}\nfor i in range(len(digits)):\n digits_dict[digits[i]] = digits[i - 5]\n\n# rot-13\n# ROT13:只对字母进行编码,用当前字母往前数的第13个字母替换当前字母,例如当前为A,编码后变成N,当前为B,编码后变成O,以此类推顺序循环。\nlookup_dict = {}\n# 大写字符串填充\nfor i in range(len(ascii_uppercase)):\n lookup_dict[ascii_uppercase[i]] = ascii_uppercase[i - 13]\n# 小写字符串填充\nfor i in range(len(ascii_lowercase)):\n lookup_dict[ascii_lowercase[i]] = ascii_lowercase[i - 13]\n\n# 判断输入是否为数字、字母 后转换\n# 这里有一个很有意思的发现:中文被if判断为alpha\n\n\nimport string\n# a = string.ascii_letters + string.digits\ncipher = input(\"what's your cipher str:\")\n# cipher = a\nclear = ''\nfor i in cipher:\n if i.isdigit():\n a_digit = digits_dict[i]\n elif i.isalpha():\n a_digit = lookup_dict[i]\n else:\n a_digit = i\n clear += a_digit\n\nprint(clear)\n# print(a)\n\n","repo_name":"wgf4242/text","sub_path":"docs/ctf/scripts/Crypto_rot18.py","file_name":"Crypto_rot18.py","file_ext":"py","file_size_in_byte":1593,"program_lang":"python","lang":"zh","doc_type":"code","stars":19,"dataset":"github-code","pt":"77"} +{"seq_id":"72891666170","text":"import time\nfrom tkinter import *\nfrom models.Setting import Setting\nimport json\n\n\n# xử lý sự kiện nút\n\n\n\n# khởi tạo màn hình\ndef start():\n def vao_game():\n s = Setting(hang.get(), cot.get(), dkt.get())\n j = json.dumps(s.__dict__)\n # f = open(\"setting.json\", \"w\")\n # f.write(j)\n # f.close()\n root.setvar(\"setting\", j)\n root.destroy()\n return s\n root = Tk()\n root.geometry(\"250x250\")\n # tạo số mặc định\n hang = StringVar(root)\n cot = StringVar(root)\n # dkt = StringVar(root)\n hang.set(\"3\")\n cot.set(\"3\")\n # dkt.set(\"3\")\n # tên chính\n l = Label(root, text=\"Cài đặt chung\")\n # label cho chọn\n lb_chon_hang = Label(root, text=\"Chọn số hàng\")\n lb_chon_cot = Label(root, text=\"Chọn số cột\")\n lb_chon_win = Label(root, text=\"Chọn luật thắng\")\n # chọn số hàng và cột luật thắng\n chon_so_hang = Spinbox(root, from_=3, to=10, textvariable=hang)\n chon_so_cot = Spinbox(root, from_=3, to=10, textvariable=cot)\n # chon_so_win = Spinbox(root, from_=3, to=10, textvariable=dkt)\n\n # vào game\n b1 = Button(root, text=\"Vào game\", command=vao_game)\n\n # thoát game\n b2 = Button(root, text=\"Thoát\",\n command=root.destroy)\n\n # hiển thị trên màn hình\n l.pack()\n lb_chon_hang.pack()\n chon_so_hang.pack()\n lb_chon_cot.pack()\n chon_so_cot.pack()\n lb_chon_win.pack()\n # chon_so_win.pack()\n b1.pack()\n b2.pack()\n\n\n root.mainloop()\n return root\n","repo_name":"thien2001git/TTNT6","sub_path":"windows/Window1.py","file_name":"Window1.py","file_ext":"py","file_size_in_byte":1566,"program_lang":"python","lang":"vi","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"536670536","text":"#Projectile Coordinates\n\nimport math\n\ndef plot_coordinates():\n for i in range(1, 100):\n t = i * 0.1\n x = 20.0 * t * math.cos(math.radians(70))\n y = (20.0 * t * math.sin(math.radians(70))) - ((9.81 * (t**2))/2)\n print(round(x), round(y), sep=',')\n if y <= 0:\n break\n\nif __name__ == '__main__':\n plot_coordinates()","repo_name":"vinmen/learnpy","sub_path":"projectile.py","file_name":"projectile.py","file_ext":"py","file_size_in_byte":364,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"37035480876","text":"# Florence has N integers, Her objective is to have N equal integers by transforming some of them.\n\n# She may transform each integer at most once. Transforming an integer X into another integer Y costs her (x-y)^2 pounds. Even if ai = aj, she has to pay the cost separately for transforming each of them (see Sample 2).\n\n# Find the minimum total cost to achieve her objective.\n\n# Input Format\n\n# Given standard input string as follows:\n\n# N\n# a1,a2,a3,...,an\n\n#30/30\n\n# Enter your code here. Read input from STDIN. Print output to STDOUT\nN = int(input())\nlis = [eval(i) for i in input().split(\" \")]\n\navg = round (sum(lis)/len(lis))\nrt = 0\nfor i in range(len(lis)):\n rt += (avg-lis[i])**2\n \nprint(int(rt))\n\n","repo_name":"Arsenic-33/GUTSCodeHackathon","sub_path":"BTogether.py","file_name":"BTogether.py","file_ext":"py","file_size_in_byte":713,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"19446383307","text":"from sys import platform\nfrom functools import wraps, partial\nfrom itertools import count\nfrom weakref import WeakValueDictionary\nfrom errno import errorcode\n\nfrom six import text_type as _text_type\nfrom six import integer_types as integer_types\n\nfrom OpenSSL._util import (\n ffi as _ffi,\n lib as _lib,\n exception_from_error_queue as _exception_from_error_queue,\n native as _native)\n\nfrom OpenSSL.crypto import (\n FILETYPE_PEM, _PassphraseHelper, PKey, X509Name, X509, X509Store)\n\n_unspecified = object()\n\ntry:\n _memoryview = memoryview\nexcept NameError:\n class _memoryview(object):\n pass\n\nOPENSSL_VERSION_NUMBER = _lib.OPENSSL_VERSION_NUMBER\nSSLEAY_VERSION = _lib.SSLEAY_VERSION\nSSLEAY_CFLAGS = _lib.SSLEAY_CFLAGS\nSSLEAY_PLATFORM = _lib.SSLEAY_PLATFORM\nSSLEAY_DIR = _lib.SSLEAY_DIR\nSSLEAY_BUILT_ON = _lib.SSLEAY_BUILT_ON\n\nSENT_SHUTDOWN = _lib.SSL_SENT_SHUTDOWN\nRECEIVED_SHUTDOWN = _lib.SSL_RECEIVED_SHUTDOWN\n\nSSLv2_METHOD = 1\nSSLv3_METHOD = 2\nSSLv23_METHOD = 3\nTLSv1_METHOD = 4\nTLSv1_1_METHOD = 5\nTLSv1_2_METHOD = 6\n\nOP_NO_SSLv2 = _lib.SSL_OP_NO_SSLv2\nOP_NO_SSLv3 = _lib.SSL_OP_NO_SSLv3\nOP_NO_TLSv1 = _lib.SSL_OP_NO_TLSv1\n\nOP_NO_TLSv1_1 = getattr(_lib, \"SSL_OP_NO_TLSv1_1\", 0)\nOP_NO_TLSv1_2 = getattr(_lib, \"SSL_OP_NO_TLSv1_2\", 0)\n\ntry:\n MODE_RELEASE_BUFFERS = _lib.SSL_MODE_RELEASE_BUFFERS\nexcept AttributeError:\n pass\n\nOP_SINGLE_DH_USE = _lib.SSL_OP_SINGLE_DH_USE\nOP_EPHEMERAL_RSA = _lib.SSL_OP_EPHEMERAL_RSA\nOP_MICROSOFT_SESS_ID_BUG = _lib.SSL_OP_MICROSOFT_SESS_ID_BUG\nOP_NETSCAPE_CHALLENGE_BUG = _lib.SSL_OP_NETSCAPE_CHALLENGE_BUG\nOP_NETSCAPE_REUSE_CIPHER_CHANGE_BUG = _lib.SSL_OP_NETSCAPE_REUSE_CIPHER_CHANGE_BUG\nOP_SSLREF2_REUSE_CERT_TYPE_BUG = _lib.SSL_OP_SSLREF2_REUSE_CERT_TYPE_BUG\nOP_MICROSOFT_BIG_SSLV3_BUFFER = _lib.SSL_OP_MICROSOFT_BIG_SSLV3_BUFFER\ntry:\n OP_MSIE_SSLV2_RSA_PADDING = _lib.SSL_OP_MSIE_SSLV2_RSA_PADDING\nexcept AttributeError:\n pass\nOP_SSLEAY_080_CLIENT_DH_BUG = _lib.SSL_OP_SSLEAY_080_CLIENT_DH_BUG\nOP_TLS_D5_BUG = _lib.SSL_OP_TLS_D5_BUG\nOP_TLS_BLOCK_PADDING_BUG = _lib.SSL_OP_TLS_BLOCK_PADDING_BUG\nOP_DONT_INSERT_EMPTY_FRAGMENTS = _lib.SSL_OP_DONT_INSERT_EMPTY_FRAGMENTS\nOP_CIPHER_SERVER_PREFERENCE = _lib.SSL_OP_CIPHER_SERVER_PREFERENCE\nOP_TLS_ROLLBACK_BUG = _lib.SSL_OP_TLS_ROLLBACK_BUG\nOP_PKCS1_CHECK_1 = _lib.SSL_OP_PKCS1_CHECK_1\nOP_PKCS1_CHECK_2 = _lib.SSL_OP_PKCS1_CHECK_2\nOP_NETSCAPE_CA_DN_BUG = _lib.SSL_OP_NETSCAPE_CA_DN_BUG\nOP_NETSCAPE_DEMO_CIPHER_CHANGE_BUG= _lib.SSL_OP_NETSCAPE_DEMO_CIPHER_CHANGE_BUG\ntry:\n OP_NO_COMPRESSION = _lib.SSL_OP_NO_COMPRESSION\nexcept AttributeError:\n pass\n\nOP_NO_QUERY_MTU = _lib.SSL_OP_NO_QUERY_MTU\nOP_COOKIE_EXCHANGE = _lib.SSL_OP_COOKIE_EXCHANGE\nOP_NO_TICKET = _lib.SSL_OP_NO_TICKET\n\nOP_ALL = _lib.SSL_OP_ALL\n\nVERIFY_PEER = _lib.SSL_VERIFY_PEER\nVERIFY_FAIL_IF_NO_PEER_CERT = _lib.SSL_VERIFY_FAIL_IF_NO_PEER_CERT\nVERIFY_CLIENT_ONCE = _lib.SSL_VERIFY_CLIENT_ONCE\nVERIFY_NONE = _lib.SSL_VERIFY_NONE\n\nSESS_CACHE_OFF = _lib.SSL_SESS_CACHE_OFF\nSESS_CACHE_CLIENT = _lib.SSL_SESS_CACHE_CLIENT\nSESS_CACHE_SERVER = _lib.SSL_SESS_CACHE_SERVER\nSESS_CACHE_BOTH = _lib.SSL_SESS_CACHE_BOTH\nSESS_CACHE_NO_AUTO_CLEAR = _lib.SSL_SESS_CACHE_NO_AUTO_CLEAR\nSESS_CACHE_NO_INTERNAL_LOOKUP = _lib.SSL_SESS_CACHE_NO_INTERNAL_LOOKUP\nSESS_CACHE_NO_INTERNAL_STORE = _lib.SSL_SESS_CACHE_NO_INTERNAL_STORE\nSESS_CACHE_NO_INTERNAL = _lib.SSL_SESS_CACHE_NO_INTERNAL\n\nSSL_ST_CONNECT = _lib.SSL_ST_CONNECT\nSSL_ST_ACCEPT = _lib.SSL_ST_ACCEPT\nSSL_ST_MASK = _lib.SSL_ST_MASK\nSSL_ST_INIT = _lib.SSL_ST_INIT\nSSL_ST_BEFORE = _lib.SSL_ST_BEFORE\nSSL_ST_OK = _lib.SSL_ST_OK\nSSL_ST_RENEGOTIATE = _lib.SSL_ST_RENEGOTIATE\n\nSSL_CB_LOOP = _lib.SSL_CB_LOOP\nSSL_CB_EXIT = _lib.SSL_CB_EXIT\nSSL_CB_READ = _lib.SSL_CB_READ\nSSL_CB_WRITE = _lib.SSL_CB_WRITE\nSSL_CB_ALERT = _lib.SSL_CB_ALERT\nSSL_CB_READ_ALERT = _lib.SSL_CB_READ_ALERT\nSSL_CB_WRITE_ALERT = _lib.SSL_CB_WRITE_ALERT\nSSL_CB_ACCEPT_LOOP = _lib.SSL_CB_ACCEPT_LOOP\nSSL_CB_ACCEPT_EXIT = _lib.SSL_CB_ACCEPT_EXIT\nSSL_CB_CONNECT_LOOP = _lib.SSL_CB_CONNECT_LOOP\nSSL_CB_CONNECT_EXIT = _lib.SSL_CB_CONNECT_EXIT\nSSL_CB_HANDSHAKE_START = _lib.SSL_CB_HANDSHAKE_START\nSSL_CB_HANDSHAKE_DONE = _lib.SSL_CB_HANDSHAKE_DONE\n\n\nclass Error(Exception):\n \"\"\"\n An error occurred in an `OpenSSL.SSL` API.\n \"\"\"\n\n\n\n_raise_current_error = partial(_exception_from_error_queue, Error)\n\n\nclass WantReadError(Error):\n pass\n\n\n\nclass WantWriteError(Error):\n pass\n\n\n\nclass WantX509LookupError(Error):\n pass\n\n\n\nclass ZeroReturnError(Error):\n pass\n\n\n\nclass SysCallError(Error):\n pass\n\n\n\nclass _VerifyHelper(object):\n def __init__(self, connection, callback):\n self._problems = []\n\n @wraps(callback)\n def wrapper(ok, store_ctx):\n cert = X509.__new__(X509)\n cert._x509 = _lib.X509_STORE_CTX_get_current_cert(store_ctx)\n error_number = _lib.X509_STORE_CTX_get_error(store_ctx)\n error_depth = _lib.X509_STORE_CTX_get_error_depth(store_ctx)\n\n try:\n result = callback(connection, cert, error_number, error_depth, ok)\n except Exception as e:\n self._problems.append(e)\n return 0\n else:\n if result:\n _lib.X509_STORE_CTX_set_error(store_ctx, _lib.X509_V_OK)\n return 1\n else:\n return 0\n\n self.callback = _ffi.callback(\n \"int (*)(int, X509_STORE_CTX *)\", wrapper)\n\n\n def raise_if_problem(self):\n if self._problems:\n try:\n _raise_current_error()\n except Error:\n pass\n raise self._problems.pop(0)\n\n\n\ndef _asFileDescriptor(obj):\n fd = None\n if not isinstance(obj, integer_types):\n meth = getattr(obj, \"fileno\", None)\n if meth is not None:\n obj = meth()\n\n if isinstance(obj, integer_types):\n fd = obj\n\n if not isinstance(fd, integer_types):\n raise TypeError(\"argument must be an int, or have a fileno() method.\")\n elif fd < 0:\n raise ValueError(\n \"file descriptor cannot be a negative integer (%i)\" % (fd,))\n\n return fd\n\n\n\ndef SSLeay_version(type):\n \"\"\"\n Return a string describing the version of OpenSSL in use.\n\n :param type: One of the SSLEAY_ constants defined in this module.\n \"\"\"\n return _ffi.string(_lib.SSLeay_version(type))\n\n\n\nclass Session(object):\n pass\n\n\n\nclass Context(object):\n \"\"\"\n :py:obj:`OpenSSL.SSL.Context` instances define the parameters for setting up\n new SSL connections.\n \"\"\"\n _methods = {\n SSLv3_METHOD: \"SSLv3_method\",\n SSLv23_METHOD: \"SSLv23_method\",\n TLSv1_METHOD: \"TLSv1_method\",\n TLSv1_1_METHOD: \"TLSv1_1_method\",\n TLSv1_2_METHOD: \"TLSv1_2_method\",\n }\n _methods = dict(\n (identifier, getattr(_lib, name))\n for (identifier, name) in _methods.items()\n if getattr(_lib, name, None) is not None)\n\n\n def __init__(self, method):\n \"\"\"\n :param method: One of SSLv2_METHOD, SSLv3_METHOD, SSLv23_METHOD, or\n TLSv1_METHOD.\n \"\"\"\n if not isinstance(method, integer_types):\n raise TypeError(\"method must be an integer\")\n\n try:\n method_func = self._methods[method]\n except KeyError:\n raise ValueError(\"No such protocol\")\n\n method_obj = method_func()\n if method_obj == _ffi.NULL:\n # TODO: This is untested.\n _raise_current_error()\n\n context = _lib.SSL_CTX_new(method_obj)\n if context == _ffi.NULL:\n # TODO: This is untested.\n _raise_current_error()\n context = _ffi.gc(context, _lib.SSL_CTX_free)\n\n self._context = context\n self._passphrase_helper = None\n self._passphrase_callback = None\n self._passphrase_userdata = None\n self._verify_helper = None\n self._verify_callback = None\n self._info_callback = None\n self._tlsext_servername_callback = None\n self._app_data = None\n\n # SSL_CTX_set_app_data(self->ctx, self);\n # SSL_CTX_set_mode(self->ctx, SSL_MODE_ENABLE_PARTIAL_WRITE |\n # SSL_MODE_ACCEPT_MOVING_WRITE_BUFFER |\n # SSL_MODE_AUTO_RETRY);\n self.set_mode(_lib.SSL_MODE_ENABLE_PARTIAL_WRITE)\n\n\n def load_verify_locations(self, cafile, capath=None):\n \"\"\"\n Let SSL know where we can find trusted certificates for the certificate\n chain\n\n :param cafile: In which file we can find the certificates\n :param capath: In which directory we can find the certificates\n :return: None\n \"\"\"\n if cafile is None:\n cafile = _ffi.NULL\n elif not isinstance(cafile, bytes):\n raise TypeError(\"cafile must be None or a byte string\")\n\n if capath is None:\n capath = _ffi.NULL\n elif not isinstance(capath, bytes):\n raise TypeError(\"capath must be None or a byte string\")\n\n load_result = _lib.SSL_CTX_load_verify_locations(self._context, cafile, capath)\n if not load_result:\n _raise_current_error()\n\n\n def _wrap_callback(self, callback):\n @wraps(callback)\n def wrapper(size, verify, userdata):\n return callback(size, verify, self._passphrase_userdata)\n return _PassphraseHelper(\n FILETYPE_PEM, wrapper, more_args=True, truncate=True)\n\n\n def set_passwd_cb(self, callback, userdata=None):\n \"\"\"\n Set the passphrase callback\n\n :param callback: The Python callback to use\n :param userdata: (optional) A Python object which will be given as\n argument to the callback\n :return: None\n \"\"\"\n if not callable(callback):\n raise TypeError(\"callback must be callable\")\n\n self._passphrase_helper = self._wrap_callback(callback)\n self._passphrase_callback = self._passphrase_helper.callback\n _lib.SSL_CTX_set_default_passwd_cb(\n self._context, self._passphrase_callback)\n self._passphrase_userdata = userdata\n\n\n def set_default_verify_paths(self):\n \"\"\"\n Use the platform-specific CA certificate locations\n\n :return: None\n \"\"\"\n set_result = _lib.SSL_CTX_set_default_verify_paths(self._context)\n if not set_result:\n # TODO: This is untested.\n _raise_current_error()\n\n\n def use_certificate_chain_file(self, certfile):\n \"\"\"\n Load a certificate chain from a file\n\n :param certfile: The name of the certificate chain file\n :return: None\n \"\"\"\n if isinstance(certfile, _text_type):\n # Perhaps sys.getfilesystemencoding() could be better?\n certfile = certfile.encode(\"utf-8\")\n\n if not isinstance(certfile, bytes):\n raise TypeError(\"certfile must be bytes or unicode\")\n\n result = _lib.SSL_CTX_use_certificate_chain_file(self._context, certfile)\n if not result:\n _raise_current_error()\n\n\n def use_certificate_file(self, certfile, filetype=FILETYPE_PEM):\n \"\"\"\n Load a certificate from a file\n\n :param certfile: The name of the certificate file\n :param filetype: (optional) The encoding of the file, default is PEM\n :return: None\n \"\"\"\n if isinstance(certfile, _text_type):\n # Perhaps sys.getfilesystemencoding() could be better?\n certfile = certfile.encode(\"utf-8\")\n if not isinstance(certfile, bytes):\n raise TypeError(\"certfile must be bytes or unicode\")\n if not isinstance(filetype, integer_types):\n raise TypeError(\"filetype must be an integer\")\n\n use_result = _lib.SSL_CTX_use_certificate_file(self._context, certfile, filetype)\n if not use_result:\n _raise_current_error()\n\n\n def use_certificate(self, cert):\n \"\"\"\n Load a certificate from a X509 object\n\n :param cert: The X509 object\n :return: None\n \"\"\"\n if not isinstance(cert, X509):\n raise TypeError(\"cert must be an X509 instance\")\n\n use_result = _lib.SSL_CTX_use_certificate(self._context, cert._x509)\n if not use_result:\n _raise_current_error()\n\n\n def add_extra_chain_cert(self, certobj):\n \"\"\"\n Add certificate to chain\n\n :param certobj: The X509 certificate object to add to the chain\n :return: None\n \"\"\"\n if not isinstance(certobj, X509):\n raise TypeError(\"certobj must be an X509 instance\")\n\n copy = _lib.X509_dup(certobj._x509)\n add_result = _lib.SSL_CTX_add_extra_chain_cert(self._context, copy)\n if not add_result:\n # TODO: This is untested.\n _lib.X509_free(copy)\n _raise_current_error()\n\n\n def _raise_passphrase_exception(self):\n if self._passphrase_helper is None:\n _raise_current_error()\n exception = self._passphrase_helper.raise_if_problem(Error)\n if exception is not None:\n raise exception\n\n\n def use_privatekey_file(self, keyfile, filetype=_unspecified):\n \"\"\"\n Load a private key from a file\n\n :param keyfile: The name of the key file\n :param filetype: (optional) The encoding of the file, default is PEM\n :return: None\n \"\"\"\n if isinstance(keyfile, _text_type):\n # Perhaps sys.getfilesystemencoding() could be better?\n keyfile = keyfile.encode(\"utf-8\")\n\n if not isinstance(keyfile, bytes):\n raise TypeError(\"keyfile must be a byte string\")\n\n if filetype is _unspecified:\n filetype = FILETYPE_PEM\n elif not isinstance(filetype, integer_types):\n raise TypeError(\"filetype must be an integer\")\n\n use_result = _lib.SSL_CTX_use_PrivateKey_file(\n self._context, keyfile, filetype)\n if not use_result:\n self._raise_passphrase_exception()\n\n\n def use_privatekey(self, pkey):\n \"\"\"\n Load a private key from a PKey object\n\n :param pkey: The PKey object\n :return: None\n \"\"\"\n if not isinstance(pkey, PKey):\n raise TypeError(\"pkey must be a PKey instance\")\n\n use_result = _lib.SSL_CTX_use_PrivateKey(self._context, pkey._pkey)\n if not use_result:\n self._raise_passphrase_exception()\n\n\n def check_privatekey(self):\n \"\"\"\n Check that the private key and certificate match up\n\n :return: None (raises an exception if something's wrong)\n \"\"\"\n\n def load_client_ca(self, cafile):\n \"\"\"\n Load the trusted certificates that will be sent to the client (basically\n telling the client \"These are the guys I trust\"). Does not actually\n imply any of the certificates are trusted; that must be configured\n separately.\n\n :param cafile: The name of the certificates file\n :return: None\n \"\"\"\n\n def set_session_id(self, buf):\n \"\"\"\n Set the session identifier. This is needed if you want to do session\n resumption.\n\n :param buf: A Python object that can be safely converted to a string\n :returns: None\n \"\"\"\n\n def set_session_cache_mode(self, mode):\n \"\"\"\n Enable/disable session caching and specify the mode used.\n\n :param mode: One or more of the SESS_CACHE_* flags (combine using\n bitwise or)\n :returns: The previously set caching mode.\n \"\"\"\n if not isinstance(mode, integer_types):\n raise TypeError(\"mode must be an integer\")\n\n return _lib.SSL_CTX_set_session_cache_mode(self._context, mode)\n\n\n def get_session_cache_mode(self):\n \"\"\"\n :returns: The currently used cache mode.\n \"\"\"\n return _lib.SSL_CTX_get_session_cache_mode(self._context)\n\n\n def set_verify(self, mode, callback):\n \"\"\"\n Set the verify mode and verify callback\n\n :param mode: The verify mode, this is either VERIFY_NONE or\n VERIFY_PEER combined with possible other flags\n :param callback: The Python callback to use\n :return: None\n\n See SSL_CTX_set_verify(3SSL) for further details.\n \"\"\"\n if not isinstance(mode, integer_types):\n raise TypeError(\"mode must be an integer\")\n\n if not callable(callback):\n raise TypeError(\"callback must be callable\")\n\n self._verify_helper = _VerifyHelper(self, callback)\n self._verify_callback = self._verify_helper.callback\n _lib.SSL_CTX_set_verify(self._context, mode, self._verify_callback)\n\n\n def set_verify_depth(self, depth):\n \"\"\"\n Set the verify depth\n\n :param depth: An integer specifying the verify depth\n :return: None\n \"\"\"\n if not isinstance(depth, integer_types):\n raise TypeError(\"depth must be an integer\")\n\n _lib.SSL_CTX_set_verify_depth(self._context, depth)\n\n\n def get_verify_mode(self):\n \"\"\"\n Get the verify mode\n\n :return: The verify mode\n \"\"\"\n return _lib.SSL_CTX_get_verify_mode(self._context)\n\n\n def get_verify_depth(self):\n \"\"\"\n Get the verify depth\n\n :return: The verify depth\n \"\"\"\n return _lib.SSL_CTX_get_verify_depth(self._context)\n\n\n def load_tmp_dh(self, dhfile):\n \"\"\"\n Load parameters for Ephemeral Diffie-Hellman\n\n :param dhfile: The file to load EDH parameters from\n :return: None\n \"\"\"\n if not isinstance(dhfile, bytes):\n raise TypeError(\"dhfile must be a byte string\")\n\n bio = _lib.BIO_new_file(dhfile, b\"r\")\n if bio == _ffi.NULL:\n _raise_current_error()\n bio = _ffi.gc(bio, _lib.BIO_free)\n\n dh = _lib.PEM_read_bio_DHparams(bio, _ffi.NULL, _ffi.NULL, _ffi.NULL)\n dh = _ffi.gc(dh, _lib.DH_free)\n _lib.SSL_CTX_set_tmp_dh(self._context, dh)\n\n\n def set_cipher_list(self, cipher_list):\n \"\"\"\n Change the cipher list\n\n :param cipher_list: A cipher list, see ciphers(1)\n :return: None\n \"\"\"\n if isinstance(cipher_list, _text_type):\n cipher_list = cipher_list.encode(\"ascii\")\n\n if not isinstance(cipher_list, bytes):\n raise TypeError(\"cipher_list must be bytes or unicode\")\n\n result = _lib.SSL_CTX_set_cipher_list(self._context, cipher_list)\n if not result:\n _raise_current_error()\n\n\n def set_client_ca_list(self, certificate_authorities):\n \"\"\"\n Set the list of preferred client certificate signers for this server context.\n\n This list of certificate authorities will be sent to the client when the\n server requests a client certificate.\n\n :param certificate_authorities: a sequence of X509Names.\n :return: None\n \"\"\"\n name_stack = _lib.sk_X509_NAME_new_null()\n if name_stack == _ffi.NULL:\n # TODO: This is untested.\n _raise_current_error()\n\n try:\n for ca_name in certificate_authorities:\n if not isinstance(ca_name, X509Name):\n raise TypeError(\n \"client CAs must be X509Name objects, not %s objects\" % (\n type(ca_name).__name__,))\n copy = _lib.X509_NAME_dup(ca_name._name)\n if copy == _ffi.NULL:\n # TODO: This is untested.\n _raise_current_error()\n push_result = _lib.sk_X509_NAME_push(name_stack, copy)\n if not push_result:\n _lib.X509_NAME_free(copy)\n _raise_current_error()\n except:\n _lib.sk_X509_NAME_free(name_stack)\n raise\n\n _lib.SSL_CTX_set_client_CA_list(self._context, name_stack)\n\n\n def add_client_ca(self, certificate_authority):\n \"\"\"\n Add the CA certificate to the list of preferred signers for this context.\n\n The list of certificate authorities will be sent to the client when the\n server requests a client certificate.\n\n :param certificate_authority: certificate authority's X509 certificate.\n :return: None\n \"\"\"\n if not isinstance(certificate_authority, X509):\n raise TypeError(\"certificate_authority must be an X509 instance\")\n\n add_result = _lib.SSL_CTX_add_client_CA(\n self._context, certificate_authority._x509)\n if not add_result:\n # TODO: This is untested.\n _raise_current_error()\n\n\n def set_timeout(self, timeout):\n \"\"\"\n Set session timeout\n\n :param timeout: The timeout in seconds\n :return: The previous session timeout\n \"\"\"\n if not isinstance(timeout, integer_types):\n raise TypeError(\"timeout must be an integer\")\n\n return _lib.SSL_CTX_set_timeout(self._context, timeout)\n\n\n def get_timeout(self):\n \"\"\"\n Get the session timeout\n\n :return: The session timeout\n \"\"\"\n return _lib.SSL_CTX_get_timeout(self._context)\n\n\n def set_info_callback(self, callback):\n \"\"\"\n Set the info callback\n\n :param callback: The Python callback to use\n :return: None\n \"\"\"\n @wraps(callback)\n def wrapper(ssl, where, return_code):\n callback(Connection._reverse_mapping[ssl], where, return_code)\n self._info_callback = _ffi.callback(\n \"void (*)(const SSL *, int, int)\", wrapper)\n _lib.SSL_CTX_set_info_callback(self._context, self._info_callback)\n\n\n def get_app_data(self):\n \"\"\"\n Get the application data (supplied via set_app_data())\n\n :return: The application data\n \"\"\"\n return self._app_data\n\n\n def set_app_data(self, data):\n \"\"\"\n Set the application data (will be returned from get_app_data())\n\n :param data: Any Python object\n :return: None\n \"\"\"\n self._app_data = data\n\n\n def get_cert_store(self):\n \"\"\"\n Get the certificate store for the context.\n\n :return: A X509Store object or None if it does not have one.\n \"\"\"\n store = _lib.SSL_CTX_get_cert_store(self._context)\n if store == _ffi.NULL:\n # TODO: This is untested.\n return None\n\n pystore = X509Store.__new__(X509Store)\n pystore._store = store\n return pystore\n\n\n def set_options(self, options):\n \"\"\"\n Add options. Options set before are not cleared!\n\n :param options: The options to add.\n :return: The new option bitmask.\n \"\"\"\n if not isinstance(options, integer_types):\n raise TypeError(\"options must be an integer\")\n\n return _lib.SSL_CTX_set_options(self._context, options)\n\n\n def set_mode(self, mode):\n \"\"\"\n Add modes via bitmask. Modes set before are not cleared!\n\n :param mode: The mode to add.\n :return: The new mode bitmask.\n \"\"\"\n if not isinstance(mode, integer_types):\n raise TypeError(\"mode must be an integer\")\n\n return _lib.SSL_CTX_set_mode(self._context, mode)\n\n\n def set_tlsext_servername_callback(self, callback):\n \"\"\"\n Specify a callback function to be called when clients specify a server name.\n\n :param callback: The callback function. It will be invoked with one\n argument, the Connection instance.\n \"\"\"\n @wraps(callback)\n def wrapper(ssl, alert, arg):\n callback(Connection._reverse_mapping[ssl])\n return 0\n\n self._tlsext_servername_callback = _ffi.callback(\n \"int (*)(const SSL *, int *, void *)\", wrapper)\n _lib.SSL_CTX_set_tlsext_servername_callback(\n self._context, self._tlsext_servername_callback)\n\nContextType = Context\n\n\n\nclass Connection(object):\n \"\"\"\n \"\"\"\n _reverse_mapping = WeakValueDictionary()\n\n def __init__(self, context, socket=None):\n \"\"\"\n Create a new Connection object, using the given OpenSSL.SSL.Context\n instance and socket.\n\n :param context: An SSL Context to use for this connection\n :param socket: The socket to use for transport layer\n \"\"\"\n if not isinstance(context, Context):\n raise TypeError(\"context must be a Context instance\")\n\n ssl = _lib.SSL_new(context._context)\n self._ssl = _ffi.gc(ssl, _lib.SSL_free)\n self._context = context\n\n self._reverse_mapping[self._ssl] = self\n\n if socket is None:\n self._socket = None\n # Don't set up any gc for these, SSL_free will take care of them.\n self._into_ssl = _lib.BIO_new(_lib.BIO_s_mem())\n self._from_ssl = _lib.BIO_new(_lib.BIO_s_mem())\n\n if self._into_ssl == _ffi.NULL or self._from_ssl == _ffi.NULL:\n # TODO: This is untested.\n _raise_current_error()\n\n _lib.SSL_set_bio(self._ssl, self._into_ssl, self._from_ssl)\n else:\n self._into_ssl = None\n self._from_ssl = None\n self._socket = socket\n set_result = _lib.SSL_set_fd(self._ssl, _asFileDescriptor(self._socket))\n if not set_result:\n # TODO: This is untested.\n _raise_current_error()\n\n\n def __getattr__(self, name):\n \"\"\"\n Look up attributes on the wrapped socket object if they are not found on\n the Connection object.\n \"\"\"\n return getattr(self._socket, name)\n\n\n def _raise_ssl_error(self, ssl, result):\n if self._context._verify_helper is not None:\n self._context._verify_helper.raise_if_problem()\n\n error = _lib.SSL_get_error(ssl, result)\n if error == _lib.SSL_ERROR_WANT_READ:\n raise WantReadError()\n elif error == _lib.SSL_ERROR_WANT_WRITE:\n raise WantWriteError()\n elif error == _lib.SSL_ERROR_ZERO_RETURN:\n raise ZeroReturnError()\n elif error == _lib.SSL_ERROR_WANT_X509_LOOKUP:\n # TODO: This is untested.\n raise WantX509LookupError()\n elif error == _lib.SSL_ERROR_SYSCALL:\n if _lib.ERR_peek_error() == 0:\n if result < 0:\n if platform == \"win32\":\n errno = _ffi.getwinerror()[0]\n else:\n errno = _ffi.errno\n raise SysCallError(errno, errorcode[errno])\n else:\n raise SysCallError(-1, \"Unexpected EOF\")\n else:\n # TODO: This is untested.\n _raise_current_error()\n elif error == _lib.SSL_ERROR_NONE:\n pass\n else:\n _raise_current_error()\n\n\n def get_context(self):\n \"\"\"\n Get session context\n \"\"\"\n return self._context\n\n\n def set_context(self, context):\n \"\"\"\n Switch this connection to a new session context\n\n :param context: A :py:class:`Context` instance giving the new session\n context to use.\n \"\"\"\n if not isinstance(context, Context):\n raise TypeError(\"context must be a Context instance\")\n\n _lib.SSL_set_SSL_CTX(self._ssl, context._context)\n self._context = context\n\n\n def get_servername(self):\n \"\"\"\n Retrieve the servername extension value if provided in the client hello\n message, or None if there wasn't one.\n\n :return: A byte string giving the server name or :py:data:`None`.\n \"\"\"\n name = _lib.SSL_get_servername(self._ssl, _lib.TLSEXT_NAMETYPE_host_name)\n if name == _ffi.NULL:\n return None\n\n return _ffi.string(name)\n\n\n def set_tlsext_host_name(self, name):\n \"\"\"\n Set the value of the servername extension to send in the client hello.\n\n :param name: A byte string giving the name.\n \"\"\"\n if not isinstance(name, bytes):\n raise TypeError(\"name must be a byte string\")\n elif b\"\\0\" in name:\n raise TypeError(\"name must not contain NUL byte\")\n\n # XXX I guess this can fail sometimes?\n _lib.SSL_set_tlsext_host_name(self._ssl, name)\n\n\n def pending(self):\n \"\"\"\n Get the number of bytes that can be safely read from the connection\n\n :return: The number of bytes available in the receive buffer.\n \"\"\"\n return _lib.SSL_pending(self._ssl)\n\n\n def send(self, buf, flags=0):\n \"\"\"\n Send data on the connection. NOTE: If you get one of the WantRead,\n WantWrite or WantX509Lookup exceptions on this, you have to call the\n method again with the SAME buffer.\n\n :param buf: The string to send\n :param flags: (optional) Included for compatibility with the socket\n API, the value is ignored\n :return: The number of bytes written\n \"\"\"\n if isinstance(buf, _memoryview):\n buf = buf.tobytes()\n if not isinstance(buf, bytes):\n raise TypeError(\"data must be a byte string\")\n\n result = _lib.SSL_write(self._ssl, buf, len(buf))\n self._raise_ssl_error(self._ssl, result)\n return result\n write = send\n\n\n def sendall(self, buf, flags=0):\n \"\"\"\n Send \"all\" data on the connection. This calls send() repeatedly until\n all data is sent. If an error occurs, it's impossible to tell how much\n data has been sent.\n\n :param buf: The string to send\n :param flags: (optional) Included for compatibility with the socket\n API, the value is ignored\n :return: The number of bytes written\n \"\"\"\n if isinstance(buf, _memoryview):\n buf = buf.tobytes()\n if not isinstance(buf, bytes):\n raise TypeError(\"buf must be a byte string\")\n\n left_to_send = len(buf)\n total_sent = 0\n data = _ffi.new(\"char[]\", buf)\n\n while left_to_send:\n result = _lib.SSL_write(self._ssl, data + total_sent, left_to_send)\n self._raise_ssl_error(self._ssl, result)\n total_sent += result\n left_to_send -= result\n\n\n def recv(self, bufsiz, flags=None):\n \"\"\"\n Receive data on the connection. NOTE: If you get one of the WantRead,\n WantWrite or WantX509Lookup exceptions on this, you have to call the\n method again with the SAME buffer.\n\n :param bufsiz: The maximum number of bytes to read\n :param flags: (optional) Included for compatibility with the socket\n API, the value is ignored\n :return: The string read from the Connection\n \"\"\"\n buf = _ffi.new(\"char[]\", bufsiz)\n result = _lib.SSL_read(self._ssl, buf, bufsiz)\n self._raise_ssl_error(self._ssl, result)\n return _ffi.buffer(buf, result)[:]\n read = recv\n\n\n def _handle_bio_errors(self, bio, result):\n if _lib.BIO_should_retry(bio):\n if _lib.BIO_should_read(bio):\n raise WantReadError()\n elif _lib.BIO_should_write(bio):\n # TODO: This is untested.\n raise WantWriteError()\n elif _lib.BIO_should_io_special(bio):\n # TODO: This is untested. I think io_special means the socket\n # BIO has a not-yet connected socket.\n raise ValueError(\"BIO_should_io_special\")\n else:\n # TODO: This is untested.\n raise ValueError(\"unknown bio failure\")\n else:\n # TODO: This is untested.\n _raise_current_error()\n\n\n def bio_read(self, bufsiz):\n \"\"\"\n When using non-socket connections this function reads the \"dirty\" data\n that would have traveled away on the network.\n\n :param bufsiz: The maximum number of bytes to read\n :return: The string read.\n \"\"\"\n if self._from_ssl is None:\n raise TypeError(\"Connection sock was not None\")\n\n if not isinstance(bufsiz, integer_types):\n raise TypeError(\"bufsiz must be an integer\")\n\n buf = _ffi.new(\"char[]\", bufsiz)\n result = _lib.BIO_read(self._from_ssl, buf, bufsiz)\n if result <= 0:\n self._handle_bio_errors(self._from_ssl, result)\n\n return _ffi.buffer(buf, result)[:]\n\n\n def bio_write(self, buf):\n \"\"\"\n When using non-socket connections this function sends \"dirty\" data that\n would have traveled in on the network.\n\n :param buf: The string to put into the memory BIO.\n :return: The number of bytes written\n \"\"\"\n if self._into_ssl is None:\n raise TypeError(\"Connection sock was not None\")\n\n if not isinstance(buf, bytes):\n raise TypeError(\"buf must be a byte string\")\n\n result = _lib.BIO_write(self._into_ssl, buf, len(buf))\n if result <= 0:\n self._handle_bio_errors(self._into_ssl, result)\n return result\n\n\n def renegotiate(self):\n \"\"\"\n Renegotiate the session\n\n :return: True if the renegotiation can be started, false otherwise\n \"\"\"\n\n def do_handshake(self):\n \"\"\"\n Perform an SSL handshake (usually called after renegotiate() or one of\n set_*_state()). This can raise the same exceptions as send and recv.\n\n :return: None.\n \"\"\"\n result = _lib.SSL_do_handshake(self._ssl)\n self._raise_ssl_error(self._ssl, result)\n\n\n def renegotiate_pending(self):\n \"\"\"\n Check if there's a renegotiation in progress, it will return false once\n a renegotiation is finished.\n\n :return: Whether there's a renegotiation in progress\n \"\"\"\n\n def total_renegotiations(self):\n \"\"\"\n Find out the total number of renegotiations.\n\n :return: The number of renegotiations.\n \"\"\"\n return _lib.SSL_total_renegotiations(self._ssl)\n\n\n def connect(self, addr):\n \"\"\"\n Connect to remote host and set up client-side SSL\n\n :param addr: A remote address\n :return: What the socket's connect method returns\n \"\"\"\n _lib.SSL_set_connect_state(self._ssl)\n return self._socket.connect(addr)\n\n\n def connect_ex(self, addr):\n \"\"\"\n Connect to remote host and set up client-side SSL. Note that if the socket's\n connect_ex method doesn't return 0, SSL won't be initialized.\n\n :param addr: A remove address\n :return: What the socket's connect_ex method returns\n \"\"\"\n connect_ex = self._socket.connect_ex\n self.set_connect_state()\n return connect_ex(addr)\n\n\n def accept(self):\n \"\"\"\n Accept incoming connection and set up SSL on it\n\n :return: A (conn,addr) pair where conn is a Connection and addr is an\n address\n \"\"\"\n client, addr = self._socket.accept()\n conn = Connection(self._context, client)\n conn.set_accept_state()\n return (conn, addr)\n\n\n def bio_shutdown(self):\n \"\"\"\n When using non-socket connections this function signals end of\n data on the input for this connection.\n\n :return: None\n \"\"\"\n if self._from_ssl is None:\n raise TypeError(\"Connection sock was not None\")\n\n _lib.BIO_set_mem_eof_return(self._into_ssl, 0)\n\n\n def shutdown(self):\n \"\"\"\n Send closure alert\n\n :return: True if the shutdown completed successfully (i.e. both sides\n have sent closure alerts), false otherwise (i.e. you have to\n wait for a ZeroReturnError on a recv() method call\n \"\"\"\n result = _lib.SSL_shutdown(self._ssl)\n if result < 0:\n # TODO: This is untested.\n _raise_current_error()\n elif result > 0:\n return True\n else:\n return False\n\n\n def get_cipher_list(self):\n \"\"\"\n Get the session cipher list\n\n :return: A list of cipher strings\n \"\"\"\n ciphers = []\n for i in count():\n result = _lib.SSL_get_cipher_list(self._ssl, i)\n if result == _ffi.NULL:\n break\n ciphers.append(_native(_ffi.string(result)))\n return ciphers\n\n\n def get_client_ca_list(self):\n \"\"\"\n Get CAs whose certificates are suggested for client authentication.\n\n :return: If this is a server connection, a list of X509Names representing\n the acceptable CAs as set by :py:meth:`OpenSSL.SSL.Context.set_client_ca_list` or\n :py:meth:`OpenSSL.SSL.Context.add_client_ca`. If this is a client connection,\n the list of such X509Names sent by the server, or an empty list if that\n has not yet happened.\n \"\"\"\n ca_names = _lib.SSL_get_client_CA_list(self._ssl)\n if ca_names == _ffi.NULL:\n # TODO: This is untested.\n return []\n\n result = []\n for i in range(_lib.sk_X509_NAME_num(ca_names)):\n name = _lib.sk_X509_NAME_value(ca_names, i)\n copy = _lib.X509_NAME_dup(name)\n if copy == _ffi.NULL:\n # TODO: This is untested.\n _raise_current_error()\n\n pyname = X509Name.__new__(X509Name)\n pyname._name = _ffi.gc(copy, _lib.X509_NAME_free)\n result.append(pyname)\n return result\n\n\n def makefile(self):\n \"\"\"\n The makefile() method is not implemented, since there is no dup semantics\n for SSL connections\n\n :raise NotImplementedError\n \"\"\"\n raise NotImplementedError(\"Cannot make file object of OpenSSL.SSL.Connection\")\n\n\n def get_app_data(self):\n \"\"\"\n Get application data\n\n :return: The application data\n \"\"\"\n return self._app_data\n\n\n def set_app_data(self, data):\n \"\"\"\n Set application data\n\n :param data - The application data\n :return: None\n \"\"\"\n self._app_data = data\n\n\n def get_shutdown(self):\n \"\"\"\n Get shutdown state\n\n :return: The shutdown state, a bitvector of SENT_SHUTDOWN, RECEIVED_SHUTDOWN.\n \"\"\"\n return _lib.SSL_get_shutdown(self._ssl)\n\n\n def set_shutdown(self, state):\n \"\"\"\n Set shutdown state\n\n :param state - bitvector of SENT_SHUTDOWN, RECEIVED_SHUTDOWN.\n :return: None\n \"\"\"\n if not isinstance(state, integer_types):\n raise TypeError(\"state must be an integer\")\n\n _lib.SSL_set_shutdown(self._ssl, state)\n\n\n def state_string(self):\n \"\"\"\n Get a verbose state description\n\n :return: A string representing the state\n \"\"\"\n\n def server_random(self):\n \"\"\"\n Get a copy of the server hello nonce.\n\n :return: A string representing the state\n \"\"\"\n if self._ssl.session == _ffi.NULL:\n return None\n return _ffi.buffer(\n self._ssl.s3.server_random,\n _lib.SSL3_RANDOM_SIZE)[:]\n\n\n def client_random(self):\n \"\"\"\n Get a copy of the client hello nonce.\n\n :return: A string representing the state\n \"\"\"\n if self._ssl.session == _ffi.NULL:\n return None\n return _ffi.buffer(\n self._ssl.s3.client_random,\n _lib.SSL3_RANDOM_SIZE)[:]\n\n\n def master_key(self):\n \"\"\"\n Get a copy of the master key.\n\n :return: A string representing the state\n \"\"\"\n if self._ssl.session == _ffi.NULL:\n return None\n return _ffi.buffer(\n self._ssl.session.master_key,\n self._ssl.session.master_key_length)[:]\n\n\n def sock_shutdown(self, *args, **kwargs):\n \"\"\"\n See shutdown(2)\n\n :return: What the socket's shutdown() method returns\n \"\"\"\n return self._socket.shutdown(*args, **kwargs)\n\n\n def get_peer_certificate(self):\n \"\"\"\n Retrieve the other side's certificate (if any)\n\n :return: The peer's certificate\n \"\"\"\n cert = _lib.SSL_get_peer_certificate(self._ssl)\n if cert != _ffi.NULL:\n pycert = X509.__new__(X509)\n pycert._x509 = _ffi.gc(cert, _lib.X509_free)\n return pycert\n return None\n\n\n def get_peer_cert_chain(self):\n \"\"\"\n Retrieve the other side's certificate (if any)\n\n :return: A list of X509 instances giving the peer's certificate chain,\n or None if it does not have one.\n \"\"\"\n cert_stack = _lib.SSL_get_peer_cert_chain(self._ssl)\n if cert_stack == _ffi.NULL:\n return None\n\n result = []\n for i in range(_lib.sk_X509_num(cert_stack)):\n # TODO could incref instead of dup here\n cert = _lib.X509_dup(_lib.sk_X509_value(cert_stack, i))\n pycert = X509.__new__(X509)\n pycert._x509 = _ffi.gc(cert, _lib.X509_free)\n result.append(pycert)\n return result\n\n\n def want_read(self):\n \"\"\"\n Checks if more data has to be read from the transport layer to complete an\n operation.\n\n :return: True iff more data has to be read\n \"\"\"\n return _lib.SSL_want_read(self._ssl)\n\n\n def want_write(self):\n \"\"\"\n Checks if there is data to write to the transport layer to complete an\n operation.\n\n :return: True iff there is data to write\n \"\"\"\n return _lib.SSL_want_write(self._ssl)\n\n\n def set_accept_state(self):\n \"\"\"\n Set the connection to work in server mode. The handshake will be handled\n automatically by read/write.\n\n :return: None\n \"\"\"\n _lib.SSL_set_accept_state(self._ssl)\n\n\n def set_connect_state(self):\n \"\"\"\n Set the connection to work in client mode. The handshake will be handled\n automatically by read/write.\n\n :return: None\n \"\"\"\n _lib.SSL_set_connect_state(self._ssl)\n\n\n def get_session(self):\n \"\"\"\n Returns the Session currently used.\n\n @return: An instance of :py:class:`OpenSSL.SSL.Session` or :py:obj:`None` if\n no session exists.\n \"\"\"\n session = _lib.SSL_get1_session(self._ssl)\n if session == _ffi.NULL:\n return None\n\n pysession = Session.__new__(Session)\n pysession._session = _ffi.gc(session, _lib.SSL_SESSION_free)\n return pysession\n\n\n def set_session(self, session):\n \"\"\"\n Set the session to be used when the TLS/SSL connection is established.\n\n :param session: A Session instance representing the session to use.\n :returns: None\n \"\"\"\n if not isinstance(session, Session):\n raise TypeError(\"session must be a Session instance\")\n\n result = _lib.SSL_set_session(self._ssl, session._session)\n if not result:\n _raise_current_error()\n\nConnectionType = Connection\n\n# This is similar to the initialization calls at the end of OpenSSL/crypto.py\n# but is exercised mostly by the Context initializer.\n_lib.SSL_library_init()\n","repo_name":"sorig/moodle-scraper","sub_path":"venv/lib/python2.7/site-packages/OpenSSL/SSL.py","file_name":"SSL.py","file_ext":"py","file_size_in_byte":43264,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"77"} +{"seq_id":"19536288437","text":"from tqdm import tqdm\nfrom subprocess import Popen\nimport os\nimport sys\nimport logging\n\nlogging.basicConfig()\nlogger = logging.getLogger(\"createDataset\")\nlogger.setLevel(logging.INFO)\n\n\ndef createDataset(datasetPath):\n numberOfMatchesPerConfiguration = 10\n contestantAIs = [\"MctsAi\", \"JerryMizunoAI\", \"LoadTorchWeightAI\", \"RandomAI\"]\n availableCharacters = [\"ZEN\", \"GARNET\", \"LUD\"]\n positions = [\"player 1\", \"player 2\"]\n\n datasetPath += 'dataset/'\n if not os.path.isdir(datasetPath):\n os.mkdir(datasetPath)\n createTemporaryFileWithDatasetLocation(datasetPath)\n\n try:\n for contestant in tqdm(contestantAIs):\n for position in positions:\n for character1 in availableCharacters:\n for character2 in availableCharacters:\n prepareTemporaryFile(datasetPath, contestant, character1, character2, position)\n playMatch(contestant, character1, character2, position, numberOfGames=numberOfMatchesPerConfiguration)\n except KeyboardInterrupt:\n os.remove('.datasetPath.yaml')\n logger.info(\"DATASET CREATION STOPPED BY USER\")\n\n\ndef createTemporaryFileWithDatasetLocation(datasetPath):\n with open('.datasetPath.yaml', 'w') as f:\n f.write('datasetPath: ' + datasetPath)\n\n\ndef prepareTemporaryFile(datasetPath, contestant, character1, character2, playerPosition):\n content = 'contestantAI: {}\\ncharacter1: {}\\ncharacter2: {}\\n'.format(contestant, character1, character2)\n with open(datasetPath + \".temp_match_info.yaml\", \"w+\") as f:\n f.write(content)\n f.truncate()\n\n\ndef playMatch(contestant, character1, character2, position, numberOfGames):\n classpath = \"bin:lib/logback/:lib/slf4j-1.7.25/slf4j-api-1.7.25.jar:lib/slf4j-1.7.25/slf4j-simple-1.7.25.jar:lib/snakeyaml-1.17.jar:lib/natives/linux/lwjgl-glfw-natives-linux.jar:data/aiData/:lib/natives/linux/lwjgl-natives-linux.jar:lib/natives/linux/lwjgl-openal-natives-linux.jar:lib/natives/linux/lwjgl-opengl-natives-linux.jar:FightingICE.jar:lib/lwjgl/lwjgl_util.jar:lib/lwjgl/lwjgl-glfw.jar:lib/lwjgl/lwjgl-openal.jar:lib/lwjgl/lwjgl-opengl.jar:lib/lwjgl/lwjgl.jar:lib/javax.json-1.0.4.jar:lib/py4j0.10.4.jar\"\n mainClass = \"Main\"\n\n numberOfMatches = '-n {}'.format(numberOfGames)\n matchContestants = '--a1 DatasetCreator --a2 {}'.format(contestant) if position == \"player 1\" else '--a1 {} --a2 DatasetCreator'.format(contestant)\n playableCharacters = '--c1 {} --c2 {}'.format(character1, character2)\n otherFlags = '--grey-bg --inverted-player 1 --mute'\n flags = (numberOfMatches + ' ' + matchContestants + ' ' + playableCharacters + ' ' + otherFlags).split(' ')\n\n p = Popen([\"java\", \"-classpath\", classpath, mainClass] + flags)\n p.wait()\n\n\nif __name__ == '__main__':\n if len(sys.argv) != 2:\n print('\\n')\n logger.error(\"The script takes exactly 1 argument, which represents the path where the dataset will be created\")\n print('\\n')\n else:\n datasetPath = str(sys.argv[1])\n createDataset(datasetPath)\n","repo_name":"Danielhp95/CIG-FightingICE-2018-entry","sub_path":"FTG4.30/createDataset.py","file_name":"createDataset.py","file_ext":"py","file_size_in_byte":3076,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"41506508358","text":"\n\ndef frac_knapsack(arr : list, weight):\n arr.sort(key=lambda x : x[1]/x[0], reverse=True)\n res = 0.0\n for item in arr:\n if item[0] <= weight:\n res += item[1]\n weight -= item[0]\n else:\n res += item[1] * weight/item[0]\n break\n\n return res\n\n\nprint(frac_knapsack([(10,60), (40,40), (20,100), (30,120)], 50))","repo_name":"anmol211/TrailStuff","sub_path":"Greedy/fractional_knapsack.py","file_name":"fractional_knapsack.py","file_ext":"py","file_size_in_byte":374,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"25569331664","text":"# Definition for a binary tree node\n# class TreeNode:\n# def __init__(self, x):\n# self.val = x\n# self.left = None\n# self.right = None\n\nclass Solution:\n # @param root, a tree node\n # @return a list of lists of integers\n result = []\n def zigzagLevelOrder(self, root):\n if (root == None) :\n return []\n if (root.left == None and root.right == None):\n return [[root.val]]\n self.result = []\n self.result.append([root.val])\n self.helper([root])\n return self.result\n \n def helper(self, treeArray):\n tempResult = []\n tempTreeArray = []\n for i in treeArray:\n if(i.left != None):\n tempResult.append(i.left.val)\n tempTreeArray.append(i.left)\n if(i.right != None):\n tempResult.append(i.right.val)\n tempTreeArray.append(i.right)\n \n if (tempResult == [] or tempTreeArray == []):\n return\n if(len(self.result) % 2 == 1):\n tempResult.reverse()\n self.result.append(tempResult)\n self.helper (tempTreeArray)\n \n","repo_name":"jinmingmu/codeingInterview","sub_path":"Binary_Tree_Zigzag_Level_Order_Traversal.py","file_name":"Binary_Tree_Zigzag_Level_Order_Traversal.py","file_ext":"py","file_size_in_byte":1178,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"15161296649","text":"from flask import Flask,render_template,request\nimport pickle\napp=Flask(__name__)\n\n@app.route(\"/\")\ndef index():\n return render_template('index.html')\n\n@app.route(\"/form\")\ndef form():\n return render_template('form.html')\n\n@app.route('/result',methods=['POST'])\ndef insurance_cost():\n age=int(request.form['age'])\n bmi=float(request.form['bmi'])\n children=int(request.form['no_of_children'])\n sex=request.form['gender']\n smoker=request.form['smoker']\n region=request.form['region']\n r1=0\n r2=0\n r3=0\n if(region==1):\n r1=1\n r2=0\n r3=0\n elif(region==2):\n r1=0\n r2=0\n r3=0\n elif(region==3):\n r1=0\n r2=1\n r3=0\n elif(region==4):\n r1=0\n r2=0\n r3=1\n \n print(r1,r2,r3)\n filename = 'model.sav'\n loaded_model = pickle.load(open(filename, 'rb'))\n result = round(float(loaded_model.predict([[age,bmi,children,sex,smoker,r1,r2,r3]])),2)\n return render_template('result.html',result=result)\n\nif __name__=='__main__':\n app.run(debug=True)","repo_name":"anushavc/Insurance-Cost-Predictor","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1071,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"39430726816","text":"import xbmc\nimport xbmcgui\nimport xbmcvfs\nimport datetime\nimport resources.lib.utils as utils\nfrom json import loads\nfrom string import Formatter\nfrom collections import defaultdict\nfrom resources.lib.plugin import Plugin\nfrom resources.lib.kodilibrary import KodiLibrary\nfrom resources.lib.traktapi import TraktAPI\nfrom resources.lib.listitem import ListItem\ntry:\n from urllib.parse import quote_plus # Py3\nexcept ImportError:\n from urllib import quote_plus # Py2\n\n\ndef string_format_map(fmt, d):\n try:\n str.format_map\n except AttributeError:\n parts = Formatter().parse(fmt)\n return fmt.format(**{part[1]: d[part[1]] for part in parts})\n else:\n return fmt.format(**d)\n\n\nclass Player(Plugin):\n def __init__(self):\n super(Player, self).__init__()\n self.traktapi = TraktAPI()\n self.search_movie, self.search_episode, self.play_movie, self.play_episode = [], [], [], []\n self.item = defaultdict(lambda: '+')\n self.itemlist, self.actions, self.players, self.identifierlist = [], [], {}, []\n self.is_local = None\n self.dp_movies = self.addon.getSettingString('default_player_movies')\n self.dp_episodes = self.addon.getSettingString('default_player_episodes')\n self.dp_movies_id = None\n self.dp_episodes_id = None\n self.fallbacks = {}\n\n def setup_players(self, tmdbtype=None, details=False, clearsetting=False, assertplayers=True):\n self.build_players(tmdbtype)\n if details:\n self.build_details()\n self.build_selectbox(clearsetting, assertplayers)\n\n def get_fallback(self, dp_file, dp_action):\n fallback = self.players.get(dp_file, {}).get('fallback', {}).get(dp_action)\n if not fallback: # No fallback so prompt dialog\n return xbmcgui.Dialog().select(self.addon.getLocalizedString(32042), self.itemlist)\n if fallback in self.identifierlist: # Found a fallback in list so play that\n return self.identifierlist.index(fallback)\n fb_file, fb_action = fallback.split()\n return self.get_fallback(fb_file, fb_action) # Fallback not in list so let's check fallback's fallback\n\n def get_playerindex(self, force_dialog=False):\n if force_dialog or (self.itemtype == 'movie' and not self.dp_movies) or (self.itemtype == 'episode' and not self.dp_episodes):\n idx = xbmcgui.Dialog().select(self.addon.getLocalizedString(32042), self.itemlist) # Ask user to select player\n if self.itemtype == 'movie':\n self.dp_movies = self.itemlist[idx].getLabel()\n self.dp_movies_id = self.identifierlist[idx]\n elif self.itemtype == 'episode':\n self.dp_episodes = self.itemlist[idx].getLabel()\n self.dp_episodes_id = self.identifierlist[idx]\n return idx\n\n for i in range(0, len(self.itemlist)):\n label = self.itemlist[i].getLabel()\n if (\n (label == self.dp_movies and self.itemtype == 'movie') or\n (label == self.dp_episodes and self.itemtype == 'episode') or\n (label == u'{0} {1}'.format(self.addon.getLocalizedString(32061), 'Kodi'))):\n return i # Play local or with default player if found\n\n # Check for fallbacks\n if self.itemtype == 'movie' and self.dp_movies_id:\n dp_file, dp_action = self.dp_movies_id.split()\n return self.get_fallback(dp_file, dp_action)\n if self.itemtype == 'episode' and self.dp_episodes_id:\n dp_file, dp_action = self.dp_episodes_id.split()\n return self.get_fallback(dp_file, dp_action)\n\n return -1\n\n def play_external(self, force_dialog=False, playerindex=-1):\n if playerindex > -1: # Previous iteration didn't find an item to play so remove it and retry\n xbmcgui.Dialog().notification(self.itemlist[playerindex].getLabel(), self.addon.getLocalizedString(32040))\n del self.actions[playerindex] # Item not found so remove the player's action list\n del self.itemlist[playerindex] # Item not found so remove the player's select dialog entry\n del self.identifierlist[playerindex] # Item not found so remove the player's index\n\n playerindex = self.get_playerindex(force_dialog=force_dialog)\n\n # User cancelled dialog\n if not playerindex > -1:\n return False\n\n player = self.actions[playerindex]\n if not player or not player[1]:\n return False\n\n # External player has list of actions so let's iterate through them to find our item\n resolve_url = False\n if isinstance(player[1], list):\n actionlist = player[1]\n player = (False, actionlist[0])\n for d in actionlist[1:]:\n if player[0]:\n break # Playable item was found in last action so let's break and play it\n folder = KodiLibrary().get_directory(string_format_map(player[1], self.item)) # Get the next folder from the plugin\n\n if d.get('dialog'): # Special option to show dialog of items to select from\n d_items = []\n for f in folder: # Create our list of items\n if not f.get('label') or f.get('label') == 'None':\n continue\n lb_list = []\n label_a = f.get('label')\n if f.get('year') and f.get('year') != 1601:\n label_a = u'{} ({})'.format(label_a, f.get('year'))\n if utils.try_parse_int(f.get('season', 0)) > 0 and utils.try_parse_int(f.get('episode', 0)) > 0:\n label_a = u'{}x{}. {}'.format(f.get('season'), f.get('episode'), label_a)\n if f.get('streamdetails'):\n sdv_list = f.get('streamdetails', {}).get('video', [{}]) or [{}]\n sda_list = f.get('streamdetails', {}).get('audio', [{}]) or [{}]\n sdv, sda = sdv_list[0], sda_list[0]\n if sdv.get('width') or sdv.get('height'):\n lb_list.append(u'{}x{}'.format(sdv.get('width'), sdv.get('height')))\n if sdv.get('codec'):\n lb_list.append(u'{}'.format(sdv.get('codec', '').upper()))\n if sda.get('codec'):\n lb_list.append(u'{}'.format(sda.get('codec', '').upper()))\n if sda.get('channels'):\n lb_list.append(u'{} CH'.format(sda.get('channels', '')))\n for i in sda_list:\n if i.get('language'):\n lb_list.append(u'{}'.format(i.get('language', '').upper()))\n if sdv.get('duration'):\n lb_list.append(u'{} mins'.format(utils.try_parse_int(sdv.get('duration', 0)) // 60))\n if f.get('size'):\n lb_list.append(u'{}'.format(utils.normalise_filesize(f.get('size', 0))))\n label_b = ' | '.join(lb_list) if lb_list else ''\n d_items.append(ListItem(label=label_a, label2=label_b, icon=f.get('thumbnail')).set_listitem())\n if d_items:\n idx = 0\n if d.get('dialog', '').lower() != 'auto' or len(d_items) != 1:\n idx = xbmcgui.Dialog().select('Select Item', d_items, useDetails=True)\n if idx == -1: # User exited the dialog so return and do nothing\n return\n resolve_url = True if folder[idx].get('filetype') == 'file' else False # Set true for files so we can play\n player = (resolve_url, folder[idx].get('file')) # Set the folder path to open/play\n break # Move onto next action\n else: # Ask user to select a different player if no items in dialog\n return self.play_external(force_dialog=force_dialog, playerindex=playerindex)\n\n x = 0\n for f in folder: # Iterate through plugin folder looking for a matching item\n x += 1 # Keep an index for position matching\n for k, v in d.items(): # Iterate through our key (infolabel) / value (infolabel must match) pairs of our action\n if k == 'position': # We're looking for an item position not an infolabel\n if utils.try_parse_int(string_format_map(v, self.item)) != x: # Format our position value\n break # Not the item position we want so let's go to next item in folder\n elif not f.get(k) or string_format_map(v, self.item) not in u'{}'.format(f.get(k, '')): # Format our value and check if it matches the infolabel key\n break # Item's key value doesn't match value we are looking for so let's got to next item in folder\n else: # Item matched our criteria so let's open it up\n resolve_url = True if f.get('filetype') == 'file' else False # Set true for files so we can play\n player = (resolve_url, f.get('file')) # Get ListItem.FolderPath for item\n break # Move onto next action (either open next folder or play file)\n else:\n return self.play_external(force_dialog=force_dialog, playerindex=playerindex) # Ask user to select a different player\n\n # Play/Search found item\n if player and player[1]:\n action = string_format_map(player[1], self.item)\n if player[0] and action.endswith('.strm'): # Action is play and is a strm so PlayMedia\n xbmc.executebuiltin(utils.try_decode_string(u'PlayMedia({0})'.format(action)))\n elif player[0]: # Action is play and not a strm so play with player\n xbmc.Player().play(action, ListItem(library='video', **self.details).set_listitem())\n else:\n action = u'Container.Update({0})'.format(action) if xbmc.getCondVisibility(\"Window.IsMedia\") else u'ActivateWindow(videos,{0},return)'.format(action)\n xbmc.executebuiltin(utils.try_decode_string(action))\n return action\n\n def play(self, itemtype, tmdb_id, season=None, episode=None, force_dialog=False):\n \"\"\" Entry point for player method \"\"\"\n if not tmdb_id or not itemtype:\n return\n\n # Get the details for the item\n self.itemtype, self.tmdb_id, self.season, self.episode = itemtype, tmdb_id, season, episode\n self.tmdbtype = 'tv' if self.itemtype in ['episode', 'tv'] else 'movie'\n self.details = self.tmdb.get_detailed_item(self.tmdbtype, tmdb_id, season=season, episode=episode)\n self.item['imdb_id'] = self.details.get('infolabels', {}).get('imdbnumber')\n self.item['originaltitle'] = self.details.get('infolabels', {}).get('originaltitle')\n self.item['title'] = self.details.get('infolabels', {}).get('tvshowtitle') or self.details.get('infolabels', {}).get('title')\n self.item['year'] = self.details.get('infolabels', {}).get('year')\n\n # Check if we have a local file\n # TODO: Add option to auto play local\n if self.details and self.itemtype == 'movie':\n self.is_local = self.localmovie()\n if self.details and self.itemtype == 'episode':\n self.is_local = self.localepisode()\n\n self.setup_players(details=True)\n\n if not self.itemlist:\n return False\n\n return self.play_external(force_dialog=force_dialog)\n\n def build_details(self):\n self.item['id'] = self.tmdb_id\n self.item['tmdb'] = self.tmdb_id\n self.item['imdb'] = self.details.get('infolabels', {}).get('imdbnumber')\n self.item['name'] = u'{0} ({1})'.format(self.item.get('title'), self.item.get('year'))\n self.item['firstaired'] = self.details.get('infolabels', {}).get('premiered')\n self.item['premiered'] = self.details.get('infolabels', {}).get('premiered')\n self.item['released'] = self.details.get('infolabels', {}).get('premiered')\n self.item['showname'] = self.item.get('title')\n self.item['clearname'] = self.item.get('title')\n self.item['tvshowtitle'] = self.item.get('title')\n self.item['title'] = self.item.get('title')\n self.item['thumbnail'] = self.details.get('thumb')\n self.item['poster'] = self.details.get('poster')\n self.item['fanart'] = self.details.get('fanart')\n self.item['now'] = datetime.datetime.now().strftime('%Y%m%d%H%M%S%f')\n\n if self.traktapi:\n slug_type = utils.type_convert(self.tmdbtype, 'trakt')\n trakt_details = self.traktapi.get_details(slug_type, self.traktapi.get_traktslug(slug_type, 'tmdb', self.tmdb_id))\n self.item['trakt'] = trakt_details.get('ids', {}).get('trakt')\n self.item['imdb'] = trakt_details.get('ids', {}).get('imdb')\n self.item['tvdb'] = trakt_details.get('ids', {}).get('tvdb')\n self.item['slug'] = trakt_details.get('ids', {}).get('slug')\n\n if self.itemtype == 'episode': # Do some special episode stuff\n self.item['id'] = self.item.get('tvdb')\n self.item['title'] = self.details.get('infolabels', {}).get('title') # Set Episode Title\n self.item['name'] = u'{0} S{1:02d}E{2:02d}'.format(self.item.get('showname'), int(utils.try_parse_int(self.season)), int(utils.try_parse_int(self.episode)))\n self.item['season'] = self.season\n self.item['episode'] = self.episode\n\n if self.traktapi and self.itemtype == 'episode':\n trakt_details = self.traktapi.get_details(slug_type, self.item.get('slug'), season=self.season, episode=self.episode)\n self.item['epid'] = trakt_details.get('ids', {}).get('tvdb')\n self.item['epimdb'] = trakt_details.get('ids', {}).get('imdb')\n self.item['eptmdb'] = trakt_details.get('ids', {}).get('tmdb')\n self.item['eptrakt'] = trakt_details.get('ids', {}).get('trakt')\n\n for k, v in self.item.copy().items():\n v = u'{0}'.format(v)\n self.item[k] = v.replace(',', '')\n self.item[k + '_+'] = v.replace(' ', '+')\n self.item[k + '_-'] = v.replace(' ', '-')\n self.item[k + '_escaped'] = v.replace(' ', '%2520')\n self.item[k + '_escaped+'] = v.replace(' ', '%252B')\n self.item[k + '_url'] = quote_plus(utils.try_encode_string(v))\n\n def build_players(self, tmdbtype=None):\n basedirs = ['special://profile/addon_data/plugin.video.themoviedb.helper/players/']\n if self.addon.getSettingBool('bundled_players'):\n basedirs.append('special://home/addons/plugin.video.themoviedb.helper/resources/players/')\n for basedir in basedirs:\n files = [x for x in xbmcvfs.listdir(basedir)[1] if x.endswith('.json')]\n for file in files:\n vfs_file = xbmcvfs.File(basedir + file)\n try:\n content = vfs_file.read()\n meta = loads(content) or {}\n finally:\n vfs_file.close()\n\n self.players[file] = meta\n if not meta.get('plugin') or not xbmc.getCondVisibility(u'System.HasAddon({0})'.format(meta.get('plugin'))):\n continue # Don't have plugin so skip\n\n tmdbtype = tmdbtype or self.tmdbtype\n priority = utils.try_parse_int(meta.get('priority')) or 1000\n if tmdbtype == 'movie' and meta.get('search_movie'):\n self.search_movie.append((file, priority))\n if tmdbtype == 'movie' and meta.get('play_movie'):\n self.play_movie.append((file, priority))\n if tmdbtype == 'tv' and meta.get('search_episode'):\n self.search_episode.append((file, priority))\n if tmdbtype == 'tv' and meta.get('play_episode'):\n self.play_episode.append((file, priority))\n\n def build_playeraction(self, playerfile, action, assertplayers=True):\n player = self.players.get(playerfile, {})\n isplay = True if action.startswith('play_') else False\n prefix = self.addon.getLocalizedString(32061) if action.startswith('play_') else xbmc.getLocalizedString(137)\n label = u'{0} {1}'.format(prefix, player.get('name', ''))\n\n # Check if matches default player and set default player id\n if label == self.dp_movies:\n self.dp_movies_id = '{} {}'.format(playerfile, action)\n if label == self.dp_episodes:\n self.dp_episodes_id = '{} {}'.format(playerfile, action)\n\n # Check that asserted values exist\n if assertplayers:\n for i in player.get('assert', {}).get(action, []):\n if i.startswith('!'):\n if self.item.get(i[1:]) and self.item.get(i[1:]) != 'None':\n return # inverted assert - has value but we don't want it so don't build that player\n else:\n if not self.item.get(i) or self.item.get(i) == 'None':\n return # missing / empty asserted value so don't build that player\n\n # Add player action to list for dialog\n self.append_playeraction(\n label=label, action=player.get(action, ''), isplay=isplay,\n identifier='{} {}'.format(playerfile, action))\n\n def append_playeraction(self, label, action, isplay=True, identifier=''):\n self.itemlist.append(xbmcgui.ListItem(label))\n self.actions.append((isplay, action))\n self.identifierlist.append(identifier)\n\n def build_selectbox(self, clearsetting=False, assertplayers=True):\n self.itemlist, self.actions = [], []\n if clearsetting:\n self.itemlist.append(xbmcgui.ListItem(xbmc.getLocalizedString(13403))) # Clear Default\n if self.is_local:\n self.append_playeraction(u'{0} {1}'.format(self.addon.getLocalizedString(32061), 'Kodi'), self.is_local, identifier='play_kodi')\n for i in sorted(self.play_movie, key=lambda x: x[1]):\n self.build_playeraction(i[0], 'play_movie', assertplayers=assertplayers)\n for i in sorted(self.search_movie, key=lambda x: x[1]):\n self.build_playeraction(i[0], 'search_movie', assertplayers=assertplayers)\n for i in sorted(self.play_episode, key=lambda x: x[1]):\n self.build_playeraction(i[0], 'play_episode', assertplayers=assertplayers)\n for i in sorted(self.search_episode, key=lambda x: x[1]):\n self.build_playeraction(i[0], 'search_episode', assertplayers=assertplayers)\n\n def localfile(self, file):\n if not file:\n return\n if file.endswith('.strm'):\n f = xbmcvfs.File(file)\n contents = f.read()\n f.close()\n if contents.startswith('plugin://plugin.video.themoviedb.helper'):\n return\n return file\n\n def localmovie(self):\n fuzzy_match = self.addon.getSettingBool('fuzzymatch_movie')\n return self.localfile(KodiLibrary(dbtype='movie').get_info('file', fuzzy_match=fuzzy_match, **self.item))\n\n def localepisode(self):\n fuzzy_match = self.addon.getSettingBool('fuzzymatch_tv')\n fuzzy_match = True # TODO: Get tvshow year to match against but for now force fuzzy match\n dbid = KodiLibrary(dbtype='tvshow').get_info('dbid', fuzzy_match=fuzzy_match, **self.item)\n return self.localfile(KodiLibrary(dbtype='episode', tvshowid=dbid).get_info('file', season=self.season, episode=self.episode))\n","repo_name":"cbec-dev/plugin.video.themoviedb.helper","sub_path":"resources/lib/player.py","file_name":"player.py","file_ext":"py","file_size_in_byte":20050,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"26097327560","text":"from typing import Optional, TYPE_CHECKING\nimport zulip\nfrom running.plugin.runbms import RunbmsPlugin\nfrom running.util import Moma, register, MomaReservationStatus, config_index_to_chr\nimport logging\nimport copy\nfrom running.suite import is_dry_run\nfrom running.command.runbms import hfac_str\nfrom datetime import datetime, timedelta\nif TYPE_CHECKING:\n from running.benchmark import Benchmark\n\nRESERVATION_WARNING_HOURS = 12\nRESERVATION_WARNING_THRESHOLD = timedelta(\n seconds=RESERVATION_WARNING_HOURS * 60 * 60)\n\n\n@register(RunbmsPlugin)\nclass Zulip(RunbmsPlugin):\n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n self.config_file = kwargs.get(\"config_file\", \"~/.zuliprc\")\n self.client = zulip.Client(config_file=self.config_file)\n self.request = kwargs.get(\"request\", {})\n if type(self.request) is not dict:\n raise TypeError(\"request of Zulip must be a dictionary\")\n if self.request.get(\"type\") not in [\"private\", \"stream\"]:\n raise ValueError(\"Request type must be either private or stream\")\n if self.request.get(\"type\") == \"stream\" and \"topic\" not in self.request:\n raise KeyError(\"Stream messages must have a topic\")\n if \"to\" not in self.request:\n raise KeyError(\"Request must have a to field\")\n self.nop = is_dry_run()\n self.moma = Moma()\n self.last_message_id = None\n self.last_message_content = None\n\n def send_message(self, content):\n message_data = copy.deepcopy(self.request)\n message_data[\"content\"] = \"{}\\n{}{}\\n\".format(\n self.run_id,\n self.get_reservation_message(),\n content\n )\n try:\n result = self.client.send_message(message_data=message_data)\n if result[\"result\"] != \"success\":\n logging.warning(\"Zulip send_message failed\\n{}\".format(result))\n else:\n self.last_message_id = result[\"id\"]\n self.last_message_content = message_data[\"content\"]\n except:\n logging.exception(\"Unhandled Zulip send_message exception\")\n\n def modify_message(self, content):\n request = {\n \"message_id\": self.last_message_id,\n \"content\": content,\n }\n try:\n result = self.client.update_message(request)\n if result[\"result\"] != \"success\":\n logging.warning(\n \"Zulip update_message failed\\n{}\".format(result))\n else:\n self.last_message_content = content\n except:\n logging.exception(\"Unhandled Zulip update_message exception\")\n\n def __str__(self) -> str:\n return \"Zulip {}\".format(self.name)\n\n def start_hfac(self, hfac: Optional[float]):\n if self.nop:\n return\n self.send_message(\"hfac {} started\".format(\n hfac_str(hfac) if hfac is not None else \"None\"))\n\n def end_hfac(self, hfac: Optional[float]):\n if self.nop:\n return\n self.send_message(\"hfac {} ended\".format(\n hfac_str(hfac) if hfac is not None else \"None\"))\n\n def start_benchmark(self, _hfac: Optional[float], _size: Optional[int], bm: \"Benchmark\"):\n if self.nop:\n return\n self.send_message(\"benchmark {} started\".format(bm.name))\n\n def end_benchmark(self, _hfac: Optional[float], _size: Optional[int], bm: \"Benchmark\"):\n if self.nop:\n return\n self.send_message(\"benchmark {} ended\".format(bm.name))\n\n def start_invocation(self, _hfac: Optional[float], _size: Optional[int], _bm: \"Benchmark\", invocation: int):\n if self.nop:\n return\n if self.last_message_id and self.last_message_content:\n self.modify_message(self.last_message_content + str(invocation))\n\n def end_invocation(self, _hfac: Optional[float], _size: Optional[int], _bm: \"Benchmark\", _invocation: int):\n if self.nop:\n return\n\n def start_config(self, _hfac: Optional[float], _size: Optional[int], _bm: \"Benchmark\", _invocation: int, _config: str, _config_index: int):\n if self.nop:\n return\n\n def end_config(self, _hfac: Optional[float], _size: Optional[int], _bm: \"Benchmark\", _invocation: int, _config: str, config_index: int, passed: bool):\n if self.nop:\n return\n if self.last_message_id and self.last_message_content:\n if passed:\n self.modify_message(self.last_message_content +\n config_index_to_chr(config_index))\n else:\n self.modify_message(self.last_message_content + \".\")\n\n def get_reservation_message(self) -> str:\n reservation = self.moma.get_reservation()\n if reservation is None:\n return \"\"\n if reservation.status is MomaReservationStatus.NOT_MOMA:\n return \"# ** Warning: not running on a moma machine. **\\n\"\n elif reservation.status is MomaReservationStatus.NOT_RESERVED:\n return \"# ** Warning: machine not reserved. **\\n\"\n elif reservation.status is MomaReservationStatus.RESERVED_BY_OTHERS:\n return \"# ** Warning: machine reserved by {}, ends at {}. **\\n\".format(\n reservation.user,\n reservation.end\n )\n elif reservation.status is MomaReservationStatus.RESERVED_BY_ME:\n assert reservation.end is not None\n delta = reservation.end - datetime.now()\n if delta < RESERVATION_WARNING_THRESHOLD:\n return \"# ** Warning: less than {} hours of reservation left. Current reservation ends at {}. **\\n\".format(\n RESERVATION_WARNING_HOURS,\n reservation.end\n )\n else:\n return \"\"\n else:\n raise ValueError(\"Unhandled reservation status value\")\n","repo_name":"anupli/running-ng","sub_path":"src/running/plugin/runbms/zulip.py","file_name":"zulip.py","file_ext":"py","file_size_in_byte":5917,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"77"} +{"seq_id":"5940867039","text":"import argparse\nimport os\nimport subprocess\nimport pandas as pd\n\nparser = argparse.ArgumentParser(description='reads')\nparser.add_argument('--ind', help=\"ind to run this on\")\nargs = parser.parse_args()\nind = args.ind\n\n\nseq = '/scratch/lsa_flux/baizm/reference_genome/AloPal_combined.a.lines.fasta'\nread_dir = '/scratch/lsa_flux/baizm/Lane_1/cortes/flash_out/'\nout_dir = '/scratch/lsa_flux/baizm/alignments/'\n\n\ndef align_seq_pe(ind, out_dir, read_dir, seq):\n\tr1 = '%s%s.notCombined_1.fastq.gz' % (read_dir, ind)\n\tr2 = '%s%s.notCombined_2.fastq.gz' % (read_dir, ind)\n\t\n\tout1 = '%s%s_pe.sam' % (out_dir, ind)\n\tout2 = '%s%s_pe.mateFixed.bam' % (out_dir, ind)\n\tout3 = '%s%s_pe.mateFixed.sorted' % (out_dir, ind)\n\n\t# align\n\tsubprocess.call(\"bwa mem -t 20 %s %s %s > %s\" % (seq, r1, r2, out1), shell=True)\n\t# fixmate\n\tsubprocess.call(\"samtools fixmate %s %s\" % (out1, out2), shell=True)\n\t# sorted\n\tsubprocess.call(\"samtools sort %s %s\" % (out2, out3), shell=True)\n\t\n\t\ndef align_seq_ext(ind, out_dir, read_dir, seq):\n\tex = '%s%s.extendedFrags.fastq.gz' % (read_dir, ind)\n\n\tout1 = '%s%s_ext.sam' % (out_dir, ind)\n\tout2 = '%s%s_ext.bam' % (out_dir, ind)\n\tout3 = '%s%s_ext.sorted' % (out_dir, ind)\n\t\n\t# align\n\tsubprocess.call(\"bwa mem -t 20 %s %s > %s\" % (seq, ex, out1), shell=True)\n\t# bam\n\tsubprocess.call(\"samtools view -uS %s > %s\" % (out1, out2), shell=True)\n\t# sorted\n\tsubprocess.call(\"samtools sort %s %s\" % (out2, out3), shell=True)\n\n\ndef merge_and_rg(ind, out_dir, read_dir, seq):\n\n\tout1 = '%s%s_pe.mateFixed.sorted.bam' % (out_dir, ind)\n\tout2 = '%s%s_ext.sorted.bam' % (out_dir, ind)\n\tout3 = '%s%s_all.bam' % (out_dir, ind)\n\tout4 = '%s%s_all.rg.bam' % (out_dir, ind)\n\tintervals = '%s%s_all.intervals' % (out_dir, ind)\n\tout5 = '%s%s_all.realigned.bam' % (out_dir, ind)\n\tout6 = '%s%s_all.bwamem.unique.bam' % (out_dir, ind)\n\n\n\t# merge bam files for alignments of paired extended reads \n\tsubprocess.call(\"samtools merge %s %s %s\" % (out3, out1, out2), shell=True) \n\t# readgroup\n\tsubprocess.call(\"java -jar ./picard.jar AddOrReplaceReadGroups INPUT=%s OUTPUT=%s RGLB=%s RGPL=Illumina RGPU=%s RGSM=%s\" % (out3, out4, ind, ind, ind), shell=True)\n\tsubprocess.call(\"samtools index %s\" % out4, shell=True)\n\t\n\n\n# align all the way until time to call SNPs\nalign_seq_pe(ind, out_dir, read_dir, seq)\nalign_seq_ext(ind, out_dir, read_dir, seq)\nmerge_and_rg(ind, out_dir, read_dir, seq)\n","repo_name":"baizm/snp-calling","sub_path":"align_reads.py","file_name":"align_reads.py","file_ext":"py","file_size_in_byte":2372,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"39235338718","text":"from django.shortcuts import render\nfrom sigirexperiments import performancemeasure\nfrom ERtasks.models import Cora_labeled,Cora\nfrom sigirexperiments import models\nfrom register.models import WorkLog, WorkerInfo\nfrom pyweb import dxaconstants\nimport json\nimport dedupe\nimport os\nfrom django.http import HttpResponse\nfrom baselinealloy.castgather import searchmh,findtail\nfrom baselinealloy import headcast\nfrom baselinealloy import models as alloymodels\nfrom django.core.paginator import Paginator, EmptyPage, PageNotAnInteger\nfrom sigirexperiments import recordsampling,featuresampling,clustering,processhelper,dextrapreclustering,patternRecommendation,hbf,trainingGenerator\nfrom django.db.models import Q\n\ntraining_file = 'cora_records_randsampling80_training.json'\nsettings_file = 'cora_records_randsampling80_learned_settings'\n\nworkeroperatiionNum = 10\n#samplemth = dxaconstants.RecordSamplingMethod.UncertainSampling\n#samplemth = dxaconstants.RecordSamplingMethod.SearchSampling\n# samplemth = dxaconstants.RecordSamplingMethod.DEXTRARandomSamplingIG\n# samplemth = dxaconstants.RecordSamplingMethod.DEXTRARandomSamplingIGPattern\n# samplemth = dxaconstants.RecordSamplingMethod.DEXTRARandomSamplingBasic\n# samplemth = dxaconstants.RecordSamplingMethod.DEXTRADIDSamplingBasic\n# samplemth = dxaconstants.RecordSamplingMethod.DEXTRADIDSamplingIGPattern\n# samplemth = dxaconstants.RecordSamplingMethod.DEXTRADIDIGPatternHBFsimple4\n# samplemth = dxaconstants.RecordSamplingMethod.hbfaffinegapOnlyclusterSimple\n# samplemth = dxaconstants.RecordSamplingMethod.hbfClusterViewSimpleMinhash\n# samplemth = dxaconstants.RecordSamplingMethod.hbfClusterViewSimpleMinhashaffinegap\nsamplemth = dxaconstants.RecordSamplingMethod.hbfClusterViewComplexMinhashaffinegap\n# samplemth = 'attributeClassifiers'\ntaskc = dxaconstants.ERTASK.Cora\nattrExplorationDisplayNum = 3\npattern_siblingDisplayNum = 3\ncontent_DisplayNum = 10\nattrBufferPoolSize = 15\nrecordBufferPoolSize = 200\nentity_view_threshold = 2\n\nexpriment_result_path = 'expriment_result.json'\notherInfo = 'hbf'\nclustering_result_path = 'expriment_clsuterresult.json'\n\nDID_flag = False\n# if samplemth == dxaconstants.RecordSamplingMethod.DEXTRADIDSamplingBasic:\n# DID_flag = True\ndataset = Cora_labeled.objects.order_by('?')\nif taskc == dxaconstants.ERTASK.Cora:\n if samplemth == dxaconstants.RecordSamplingMethod.DEXTRARandomSamplingIG or samplemth == dxaconstants.RecordSamplingMethod.DEXTRARandomSamplingIGPattern or samplemth == dxaconstants.RecordSamplingMethod.DEXTRARandomSamplingBasic:\n dataset = Cora_labeled.objects.order_by('?')\n elif DID_flag:\n coradataset = Cora_labeled.objects.order_by('?') #init\n clustdict = dextrapreclustering.minhashPreClustering(coradataset)\n BPid = processhelper.fulfillRecordBufferPool(clustdict=clustdict,BP_size=recordBufferPoolSize)\n # BP = [coradataset.get(id=item) for item in BPid]\n # dataset = recordsampling.DIDsamplingInit(dataset=coradataset,BF=BP,beta=1,clustdict=clustdict)\n # dataset = dataset.order_by('orderscore').reverse()\n # dataset = recordsampling.DIDsamplingLittleInit(dataset=coradataset,beta=1,clustdict=clustdict)\n dataset = coradataset\n\n\n\n\n\ndef exploreperformance(request):\n\n # nmi = record_uncertainsampling_temp(request)\n # return render(request,'sigir/exploreperformance.html',{'RecordSamplingMethod':dxaconstants.RecordSamplingMethod.UncertainSampling,'workerOperationNum':workeroperatiionNum,'data':nmi})\n # dict = record_uncertainsampling_multimeasure(request,workeroperatiionNum)\n # # aa(request)\n\n # #record_common_cluster_vs_opnum(username=username)\n # record_common_cluster_measure(username=username)\n # headcast.test('1,p,a',2,9)\n # headcast.searchtfidf(seedid=885,kw='sympos,1993,tolerant',num=9)\n username = request.session['username']\n #recordsampling.DIDsampling()\n # featuresampling.IG()\n data = WorkerInfo.objects.filter(user=username).values_list('id', flat=True)\n # outputpath = 'E:\\experiment_temp\\dextra_randomsampling_IG_Pattern\\_trainingdata\\DEXTRARandomSamplingIGPattern_op10_data.json'\n # clustering.dextra_attrbute_hcluster(samplingMethod=samplemth,workeropNum=10,outputpath=outputpath,userid=data[0],username=username)\n # performancemeasure.record_common_cluster_measure(samplemth=samplemth, list=[10])\n # currentattr = models.sigirSynonymsSeedTemp.objects.filter(user=username)[0].cattr\n # dict = patternRecommendation.synonymsForCurrentAttr(data=dataset,currentAttr=currentattr)\n # for k, v in dict.items():\n # print(k, v)\n # print(data[0])\n\n\n\n ahbf = hbf.constructHBFfordataset(data[0])\n print(json.dumps(ahbf))\n hbf.printHBF(ahbf)\n store_hbf_path = '_'.join([taskc.name,samplemth.name,str(workeroperatiionNum),otherInfo,'op.json'])\n fw = open(store_hbf_path, 'w', encoding='utf-8')\n json.dump(ahbf, fw, ensure_ascii=False, indent=4)\n orderhbf = hbf.constructOrderedHBF(ahbf=ahbf,dataset=dataset)\n orderhbf = hbf.computeEdges(orderhbf)\n ordered_layers_dict = orderhbf.get_orderlayers_dict()\n print(json.dumps(ordered_layers_dict))\n # hbf.printOrderedHBF(orderhbf)\n # hbf.printOrderedHBFByOrder(orderhbf)\n hbf.printOrderedHBFByOrderOnly(orderhbf)\n sum_dict = hbf.estimateN_ACR(orderhbf)\n attris = models.sigirCoraAttr.objects.filter(userid=data[0])\n values = models.sigirCoraAttrValue.objects.filter(attr_id__in=[ attr.id for attr in attris])\n syns = models.sigirCoraValueSynonym.objects.filter(value_id__in=[ value.id for value in values])\n experimentmsg = {'workerName':username,'worderid':data[0],'task':taskc.name,'method':samplemth.name,'opNum':workeroperatiionNum,'hbfstoredPath':store_hbf_path,'acr_measure':sum_dict,'U_size':attris.count(),'V_size':values.count(),'D_size':syns.count()}\n f = open(expriment_result_path,'a')\n json.dump(experimentmsg, f, ensure_ascii=False, indent=4)\n f.write('\\n')\n f.close()\n print(json.dumps(experimentmsg))\n cluster_dict = hbf.hbfClusterViewComplexMinhashaffinegap(corahbf=orderhbf,dataset=dataset,acr_threshold=0.3,username=username,dis_threshold=3)\n for k,v in cluster_dict.items():\n models.CoraPerformanceLog.objects.create(\n explorationMethod=samplemth, clusterid=v,\n cora_id=k, confidence=-1, workerOperationNum=workeroperatiionNum)\n clusterresultdict = performancemeasure.record_sampling_clustermultimeasure_vs_opnum(wn=workeroperatiionNum,samplemth=samplemth)\n ff = open(clustering_result_path, 'a')\n json.dump(clusterresultdict, ff, ensure_ascii=False, indent=4)\n ff.write('\\n')\n ff.close()\n\n\n # traingdata_path = '_'.join(['training',taskc.name, samplemth.name, str(workeroperatiionNum), otherInfo, '.train'])\n # testingdata_path = '_'.join(['testing', taskc.name, samplemth.name, str(workeroperatiionNum), otherInfo, '.test'])\n # validationdata_path = '_'.join(['validation', taskc.name, samplemth.name, str(workeroperatiionNum), otherInfo, '.dev'])\n # traingdata, testingdata, validationdata = trainingGenerator.basicTrainingDataDenseGenerator(username=username)\n # processhelper.writeTraingTest(traingdata_path,traingdata)\n # processhelper.writeTraingTest(testingdata_path,testingdata)\n # processhelper.writeTraingTest(validationdata_path, validationdata)\n\n # recommendation_from_classifiers = processhelper.preocessPredictedResult(r'C:\\Users\\sayarara\\Desktop\\experiment_classifiers\\basic_dense\\predicted.txt')\n # filtered_dict = processhelper.filterPredictedResult(recommendation_from_classifiers,userid=data[0])\n # for k,v in filtered_dict.items():\n # print(k,len(v))\n # print(v)\n # f = open('filtered_recommendation_from_basic_classifiers','a')\n # json.dump(filtered_dict, f, ensure_ascii=False, indent=4)\n # attras = models.sigirCoraAttr.objects.filter(userid=data[0])\n # for attra in attras:\n # traingdata, testingdata, validationdata = trainingGenerator.AttributeTrainingDataGenerator(username=username,attr_id=attra.id)\n # traingdata_path = '_'.join([attra.attrname,taskc.name, samplemth.name, str(workeroperatiionNum), otherInfo, '.train'])\n # testingdata_path = '_'.join([attra.attrname, taskc.name, samplemth.name, str(workeroperatiionNum), otherInfo, '.test'])\n # validationdata_path = '_'.join([attra.attrname, taskc.name, samplemth.name, str(workeroperatiionNum), otherInfo, '.dev'])\n # processhelper.writeTraingTest(traingdata_path,traingdata)\n # processhelper.writeTraingTest(testingdata_path,testingdata)\n # processhelper.writeTraingTest(validationdata_path, validationdata)\n # allennlp_test_path = '_'.join([attra.attrname, taskc.name, samplemth.name, str(workeroperatiionNum), otherInfo, 'json.test'])\n # processhelper.writeAllenNLPTestFormat(allennlptestpath=allennlp_test_path,originalpath=testingdata_path)\n\n\n\n # predicted_names_list = ['predicted_author.txt', 'predicted_confshortname.txt', 'predicted_period.txt',\n # 'predicted_press.txt', 'predicted_year.txt', 'predicted_pages.txt', 'predicted_place.txt']\n # dir = r'C:\\Users\\sayarara\\Desktop\\experiment_classifiers\\attributes_clean\\predict'\n # d = []\n # synos = models.sigirCoraValueSynonym.objects.filter(userid=data[0])\n # syns_list = [syn.synonym for syn in synos]\n # for name in predicted_names_list:\n # path = dir + '\\\\' + name\n # print(path)\n # dict,t = processhelper.aaa(path=path)\n # d.append(dict['NULL'])\n # t = set(t).difference(set(syns_list))\n # print(t)\n # print(len(t))\n # nn = list(set(dict['NULL']).difference(set(syns_list)))\n # processhelper.writeTraingTest(dir + '\\\\' + 'filtered_related_'+name, t)\n # processhelper.writeTraingTest(dir + '\\\\' + 'filtered_not_related_' + name, nn)\n # print(d)\n #\n # inter = d[0]\n # for i in range(1, len(d)):\n # inter = set(inter).intersection(set(d[i]))\n # print(inter)\n # inter = list(set(inter).difference(set(syns_list)))\n # processhelper.writeTraingTest(dir + '\\\\' + 'filtered_not_related_inter', inter)\n #\n # union = d[0]\n # for i in range(1, len(d)):\n # union = set(union).union(set(d[i]))\n # print(union)\n # union = list(set(union).difference(set(syns_list)))\n # processhelper.writeTraingTest(dir + '\\\\' + 'filtered_not_related_union', union)\n\n # aa = patternRecommendation.synsUsingWordNet('eighth')\n # print(aa)\n\n\n\n\n\n # processhelper.test(dataset=dataset,username=username,userid=15)\n # testa(username=username)\n # processhelper.simpleAutoMatchDection()\n #performancemeasure.record_sampling_clustermultimeasure_vs_opnum(wn=10,samplemth=samplemth)\n\n # processhelper.testre(str='156')\n # testb()\n # cora = Cora_labeled.objects.all()\n # # featuresampling.minHashFastClusterIG(clusternum=100,cora=cora)\n # data = WorkerInfo.objects.filter(user=username).values_list('id', flat=True)\n # processhelper.sos(attrsynonym='oxford',taskc=taskc,samplemth=samplemth,user=data[0])\n\n\n\n\n\n dict = {}\n return render(request, 'sigir/exploreperformance.html',\n dict)\n\ndeduper = 0\ntemp_d = {}\n\n\n\ndef baselinededupeinit():\n cora = Cora_labeled.objects.all()\n # c_r = {}\n # temp_d = dict((item.id, item.text) for item in cora)\n # count = 1\n for item in cora:\n # c_r['id']=item.id\n # c_r['text'] = item.text\n # clean_row = dict([('text',item.text),('id',item.id)])\n clean_row = dict([('text', item.text), ('id', str(item.id))])\n temp_d[item.id] = dict(clean_row)\n fields = [{'field': 'text', 'type': 'Text'}]\n # Create a new deduper object and pass our data model to it.\n deduper = dedupe.Dedupe(fields)\n deduper.sample(temp_d)\n\ndef testb():\n cora = Cora_labeled.objects.all()\n for item in cora:\n item.cleantext = processhelper.simpledatacleaning(item.text)\n item.save()\n print(\"done\")\n\n\ndef testa(username):\n cora = Cora_labeled.objects.all()\n # c_r = {}\n # temp_d = dict((item.id, item.text) for item in cora)\n # count = 1\n for item in cora:\n # c_r['id']=item.id\n # c_r['text'] = item.text\n # clean_row = dict([('text',item.text),('id',item.id)])\n clean_row = dict([('text', item.text), ('id', str(item.id))])\n temp_d[item.id] = dict(clean_row)\n fields = [{'field': 'text', 'type': 'Text'}]\n # Create a new deduper object and pass our data model to it.\n deduper = dedupe.Dedupe(fields)\n deduper.sample(temp_d)\n clustering.testnosamplingclustering(deduper=deduper,username=username,samplingMethod=dxaconstants.RecordSamplingMethod.UncertainSampling,workeropNum=20,temp_d=temp_d)\n# Create your views here.\n\ndef entityview(request):\n username = request.session['username']\n # f = open('_'.join([username, str(entity_view_threshold),'entityview.json']), encoding='utf-8')\n # entitydict = json.load(f)\n f = open('_'.join([username, str(entity_view_threshold),'entityview.json']), \"r\") # 设置文件对象\n strr = f.read() # 将txt文件的所有内容读入到字符串str中\n f.close() # 将文件关闭\n entitydict = eval(strr)\n dict = {}\n for k,v in entitydict.items():\n print(k)\n if v:\n print(v)\n else:\n print('NULL')\n\n # kstr = []\n # for i in range(entity_view_threshold):\n # aa = processhelper.viewhelperSummary(attrname=k[0][i],value=k[1][i])\n # kstr.append(aa)\n coras = Cora.objects.filter(id__in=v)\n # dict[' '.join(kstr)] = coras\n dict[k] = coras\n \n return render(request,'sigir/entityview.html',{'entities':dict,'entity_view_threshold':entity_view_threshold})\n\n\ndef exploration(request):\n username = request.session['username']\n print(username)\n data = WorkerInfo.objects.filter(user=username).values_list('id', flat=True)\n # if DID_flag:\n # # dataset = recordsampling.DIDsamplingInit(dataset=coradataset, BF=coradataset, beta=1,\n # # clustdict=clustdict)\n # # dataset = recordsampling.DIDsampling(dataset=coradataset, BF=coradataset, username=username, userid=data[0],\n # # attra_id=attrid, beta=1, clustdict=clustdict)\n # # dataset = recordsampling.DIDsamplingNoAttr(dataset=coradataset,BF=BP,username=username,userid=data[0],clustdict=clustdict)\n # dataset = recordsampling.DIDsamplingNoAttrLittle(dataset=coradataset,BF=BPid,username=username,userid=data[0],clustdict=clustdict,beta=1)\n # dataset = dataset.order_by('orderscore').reverse()\n # if request.method == 'POST':\n # attribute_editor = request.POST.get(\"attribute_editor\")\n # attrbute_create = request.POST.get(\"attribute_create\")\n # print(attrbute_create)\n # multis = request.POST.getlist(\"IG\")\n # print(multis)\n # submittype = request.POST.get(\"submit\")\n # print(submittype)\n # if submittype == \"create and bind\":\n # # pseedid = request.POST.get(\"pseedid\")\n # print(\"create and bind\")\n # list_sort_value_desc = featuresampling.IG()\n\n if request.is_ajax():\n print(request.body)\n print(request.POST)\n searchkey = request.POST.get('searchkey')\n page = request.POST.get('page')\n print(searchkey)\n # cora_list = Cora_labeled.objects.filter(text__contains=searchkey)\n cora_list = dataset.filter(text__contains=searchkey)\n print(cora_list)\n paginator = Paginator(cora_list, content_DisplayNum)\n try:\n cora = paginator.page(page)\n except PageNotAnInteger:\n # first page\n cora = paginator.page(1)\n except EmptyPage:\n # last page\n cora = paginator.page(paginator.num_pages)\n response = HttpResponse();\n response['Content-Type'] = \"text/javascript\"\n response.write(json.dumps({'rows': cora, 'total': len(cora)}))\n return response\n else:\n searchkey = request.GET.get('q',\"\")\n print(searchkey)\n if searchkey:\n # cora_list = Cora_labeled.objects.filter(text__icontains=searchkey)\n # request.session['q'] = searchkey\n cora_list = dataset.filter(text__contains=searchkey)\n else:\n # searchkey = request.session['q']\n # if searchkey:\n # cora_list = Cora.objects.filter(text__contains=searchkey)\n # else:\n # cora_list = Cora.objects.all\n # cora_list = Cora_labeled.objects.all()\n cora_list = dataset\n print(searchkey)\n paginator = Paginator(cora_list,content_DisplayNum) # Show 10 per page\n page = request.GET.get('page')\n print(page)\n attrpage = request.GET.get('attrpage')\n print(attrpage)\n # attrpaginator = Paginator(list_sort_value_desc, 10)\n if attrpage:\n attrpage = int(attrpage)\n else:\n attrpage = 1\n\n try:\n cora = paginator.page(page)\n except PageNotAnInteger:\n # first page\n cora = paginator.page(1)\n except EmptyPage:\n # last page\n cora = paginator.page(paginator.num_pages)\n # try:\n # attrig = attrpaginator.page(attrpage)\n # except PageNotAnInteger:\n # # first page\n # attrig = attrpaginator.page(1)\n # except EmptyPage:\n # # last page\n # attrig = attrpaginator.page(attrpaginator.num_pages)\n # coraa = Cora_labeled.objects.all()\n # list_sort_value_desc = featuresampling.minHashFastClusterIG(clusternum=100, cora=coraa)\n # print(list_sort_value_desc[0:10])\n # return render(request, 'sigir/exploration.html',{'data': cora,'searchkey':searchkey,'attrIG':list_sort_value_desc[0+attrExplorationDisplayNum*(attrpage-1):attrExplorationDisplayNum*attrpage]})\n\n if not models.sigirAttrExploration.objects.filter(user=username):\n # init the substrings with information gain\n featuresampling.minHashFastClusterIG(clusternum=100,cora=dataset,username=username)\n attrexplo = models.sigirAttrExploration.objects.filter(user=username,is_labelled=False).order_by('orderscore').reverse()\n a = [ item.substring for item in attrexplo]\n print(a)\n matches = []\n seeds = models.patternSeedTemp.objects.filter(user=username)\n attrname = ''\n if seeds:\n\n seedslist = processhelper.str2list(seedstr=seeds[0].seedsubstring, deli='###')\n print(seedslist)\n syn = models.sigirCoraValueSynonym.objects.filter(synonym=seedslist[0],userid=data[0])[0]\n attrname = syn.value.attr.attrname\n attrid = syn.value.attr_id\n # if DID_flag:\n # # dataset = recordsampling.DIDsamplingInit(dataset=coradataset, BF=coradataset, beta=1,\n # # clustdict=clustdict)\n # # dataset = recordsampling.DIDsampling(dataset=coradataset,BF=BP,username=username,userid=data[0],attra_id=attrid,beta=1,clustdict=clustdict)\n # dataset = recordsampling.DIDsamplingLittle(dataset=coradataset,BF=BPid,username=username,userid=data[0],attra_id=attrid,beta=1,clustdict=clustdict)\n # dataset = dataset.order_by('orderscore').reverse()\n values = models.sigirCoraAttrValue.objects.filter(attr_id=attrid, userid=data[0])\n osysn = models.sigirCoraValueSynonym.objects.filter(value_id__in=[item.id for item in values])\n match = patternRecommendation.findCandidateSilbings(seedslist=seedslist, data=dataset)\n matches = list(set(match).difference(set([item.synonym for item in osysn])))[0:pattern_siblingDisplayNum]\n\n stadvalname = ''\n valsyns = []\n curentattrias = models.sigirSynonymsSeedTemp.objects.filter(user=username)\n if curentattrias:\n currentattr = curentattrias[0].cattr\n dict = patternRecommendation.synonymsForCurrentAttr(data=dataset, currentAttr=currentattr)\n if len(dict) == 0:\n stadvalname = ''\n valsyns = []\n else:\n a = sorted(dict.items())[0]\n stadvalname =a[0]\n valsyns = a[1]\n\n # dataset process progress\n ahbf = hbf.construct_value_level_HBFfordataset(data[0])\n entitydict = hbf.entityView(ahbf=ahbf,entity_view_threshold=entity_view_threshold)\n entityprogress = processhelper.getDatasetProgress(entitydict=entitydict)\n # fw = open('_'.join([username, str(entity_view_threshold),'entityview.json']), 'w', encoding='utf-8')\n # json.dump(entitydict, fw, ensure_ascii=False, indent=4)\n f = open('_'.join([username, str(entity_view_threshold),'entityview.json']), 'w')\n f.write(str(entitydict))\n f.close()\n # attribute process progress\n attributeprogress = processhelper.getAttributeProgress(userid=data[0],user=username)\n print(attributeprogress)\n return render(request, 'sigir/exploration.html', {'epg':entityprogress,'ap':attributeprogress,'ap2':attributeprogress,'data': cora, 'searchkey': searchkey,'matches':matches,'attrname':attrname,'syns':valsyns,'standvalue':stadvalname,\n 'attrIG2': attrexplo[\n 0 + attrExplorationDisplayNum * (\n attrpage - 1):attrExplorationDisplayNum * attrpage]})\n\n # return render(request, 'sigir/exploration.html',\n # {'data': cora, 'searchkey': searchkey, 'attrIG': attrig})\n\n\ndef attrexploration(request):\n username = request.session['username']\n data = WorkerInfo.objects.filter(user=username).values_list('id', flat=True)\n print(data[0])\n if request.method == 'POST':\n attrbute_create = request.POST.get('attrname')\n print(attrbute_create)\n multis = request.POST.getlist(\"IG\")\n print(multis)\n msg = {\"attrname\":attrbute_create,'selectedvalues':multis}\n models.dextraitems.objects.create(task=taskc,msg=json.dumps(msg),optype=dxaconstants.WorkerOperation.createAndBind,user=data[0],samplingMethod=samplemth)\n coraattr = models.sigirCoraAttr.objects.filter(attrname=attrbute_create,userid=data[0])\n\n\n\n\n\n if not coraattr:\n coraattr = models.sigirCoraAttr(attrname=attrbute_create, attrscope='local', is_alive=1, userid=data[0])\n coraattr.save()\n else:\n coraattr = coraattr[0]\n print(coraattr.id,coraattr.attrname)\n values = models.sigirAttrExploration.objects.filter(id__in = multis)\n seeds = [ item.substring for item in values]\n ss = models.patternSeedTemp.objects.filter(user=username)\n seedsstr = processhelper.list2str(seedslist=seeds,deli='###')\n if ss:\n ss[0].seedsubstring = seedsstr\n ss[0].save()\n models.sigirSynonymsSeedTemp.objects.filter(user=username).update(cattr=coraattr)\n else:\n models.patternSeedTemp.objects.create(seedsubstring=seedsstr,user=username)\n models.sigirSynonymsSeedTemp.objects.create(cattr=coraattr,user=username)\n substrings = models.sigirAttrExploration.objects.filter(is_labelled=False,user=username).order_by('orderscore').reverse()[\n 0:attrBufferPoolSize]\n #decay\n for item in substrings:\n if item.id not in multis:\n item.orderscore = item.orderscore*0.8\n item.save()\n for item in values:\n print(item.substring)\n val = models.sigirCoraAttrValue(attr_id=coraattr.id, value=item.substring, userid=data[0])\n val.save()\n corasyno = models.sigirCoraValueSynonym(value=val, synonym=item.substring, userid=data[0])\n corasyno.save()\n # if len(item.substring) < 4:\n # # check boundaries\n # print(item.substring)\n # llist = Cora_labeled.objects.filter(cleantext__icontains=' '+item.substring+' ')\n # else:\n # llist = Cora_labeled.objects.filter(text__icontains=item.substring)\n # llist = Cora_labeled.objects.filter(Q(cleantext__icontains=' ' + item.substring + ' ')| Q(cleantext__istartswith=item.substring + ' ')| Q(cleantext__iendswith=' ' + item.substring))\n # llist = dataset.filter(\n # Q(cleantext__icontains=' ' + item.substring + ' ') | Q(cleantext__istartswith=item.substring + ' ') | Q(\n # cleantext__iendswith=' ' + item.substring))\n #\n #\n # # restr = '
'+attra.attrname+'

'+attrsynonym+'

'\n # restr = '' + item.substring + '|' + coraattr.attrname + '.' + item.substring + ''\n llist = processhelper.datasetfilteringMultiContitiion(dataset=dataset, substring=item.substring)\n restr = processhelper.viewhelper(substring=item.substring, attrbute_name=coraattr.attrname, value=item.substring)\n for entiy in llist:\n entiy.labeledtext = entiy.labeledtext.replace(item.substring, restr)\n entiy.save()\n models.sigirCoraToAttrEntity.objects.create(cora_id=entiy.id, attrsynonym=corasyno,user=username)\n # item.delete()\n item.is_labelled = True\n item.save()\n # substrings = models.sigirAttrExploration.objects.all()[0:attrBufferPoolSize]\n substrings = models.sigirAttrExploration.objects.filter(is_labelled=False,user=username).order_by('orderscore').reverse()[0:attrBufferPoolSize]\n return render(request, 'sigir/attrexploration.html',{'attrIG':substrings})\n\n\ndef pattern_siblings(request):\n username = request.session['username']\n data = WorkerInfo.objects.filter(user=username).values_list('id', flat=True)\n if request.method == 'POST':\n attrbute_name = request.POST.get('attrname')\n print(attrbute_name)\n attr = models.sigirCoraAttr.objects.get(attrname=attrbute_name,userid=data[0])\n multis = request.POST.getlist(\"_selected_siblings\")\n print(multis)\n # a = [ str(item) for item in multis]\n # print(a)\n seedsstr = processhelper.list2str(seedslist=multis, deli='###')\n seedtemp = models.patternSeedTemp.objects.get(user=username)\n seedtemp.seedsubstring = seedsstr\n seedtemp.save()\n msg = []\n for substring in multis:\n val = models.sigirCoraAttrValue(attr_id=attr.id, value=substring.replace(' ','_'), userid=data[0])\n val.save()\n a = processhelper.updateUVDcontent(dataset=dataset, attra=val.attr, valueid=val.id, attrsynonym=substring,\n userid=data[0], username=username)\n msg.append(a)\n # corasyno = models.sigirCoraValueSynonym(value=val, synonym=substring, userid=data[0])\n # corasyno.save()\n # llist = processhelper.datasetfilteringMultiContitiion(dataset=dataset,substring=substring)\n # restr = processhelper.viewhelper(substring=substring,attrbute_name=attrbute_name,value=val.value)\n # for entiy in llist:\n # entiy.labeledtext = entiy.labeledtext.replace(substring, restr)\n # entiy.save()\n # models.sigirCoraToAttrEntity.objects.create(cora_id=entiy.id, attrsynonym=corasyno, user=username)\n # models.sigirAttrExploration.objects.filter(substring__in=multis,user=username).update(is_labelled=True)\n # msg = {\"attrname\": attrbute_name, 'selectedvalues': multis}\n models.dextraitems.objects.create(task=taskc, msg=json.dumps(msg),\n optype=dxaconstants.WorkerOperation.valueBind, user=data[0],\n samplingMethod=samplemth)\n\n matches = []\n seeds = models.patternSeedTemp.objects.filter(user=username)\n\n if seeds:\n seedslist = processhelper.str2list(seedstr=seeds[0].seedsubstring,deli='###')\n syn = models.sigirCoraValueSynonym.objects.filter(synonym=seedslist[0],userid=data[0])[0]\n attrname = syn.value.attr.attrname\n attrid = syn.value.attr_id\n values = models.sigirCoraAttrValue.objects.filter(attr_id=attrid,userid=data[0])\n osysn = models.sigirCoraValueSynonym.objects.filter(value_id__in=[ item.id for item in values])\n match = patternRecommendation.findCandidateSilbings(seedslist=seedslist,data=dataset)\n matches = list(set(match).difference(set([item.synonym for item in osysn])))\n return render(request, 'sigir/pattern_siblings.html',{'matches':matches,'attrname':attrname})\n\ndef pattern_synonyms(request):\n username = request.session['username']\n data = WorkerInfo.objects.filter(user=username).values_list('id', flat=True)\n if request.method == 'POST':\n pp = request.POST\n print(pp)\n keys = request.POST.get('kkeys')\n print(keys)\n msg = []\n if not keys:\n print(\"keys none\")\n else:\n print(keys)\n keylist = processhelper.str2list(seedstr=keys,deli='###')\n print(keylist)\n\n for k in keylist:\n multis = request.POST.getlist(k)\n print(k,'------>',multis)\n val = models.sigirCoraAttrValue.objects.filter(value=k,userid=data[0])[0]\n for item in multis:\n a = processhelper.updateUVDcontent(dataset=dataset,attra=val.attr,valueid=val.id,attrsynonym=item,userid=data[0],username=username)\n msg.append(a)\n models.dextraitems.objects.create(task=taskc, msg=json.dumps(msg),\n optype=dxaconstants.WorkerOperation.valueBind, user=data[0],\n samplingMethod=samplemth)\n\n dict = {}\n seeds = models.sigirSynonymsSeedTemp.objects.filter(user=username)\n if seeds:\n currentAttr = seeds[0].cattr\n dict = patternRecommendation.synonymsForCurrentAttr(data=dataset, currentAttr=currentAttr)\n keyss = '###'.join([k for k,v in dict.items()])\n return render(request, 'sigir/pattern_synonyms.html',{'dict':dict,'kkeys':keyss})\n\n\n\n\n\ndef add(request):\n username = request.session['username']\n data = WorkerInfo.objects.filter(user=username).values_list('id', flat=True)\n print(data[0])\n if request.method == 'POST':\n attrname = request.POST.get('attrname')\n scope = request.POST.get('is_local')\n print(attrname, scope)\n # models.sigirCoraAttr.objects.create(attrname=attrname, attrscope=scope, is_alive=1, userid=data[0])\n coraattr = models.sigirCoraAttr(attrname=attrname, attrscope=scope, is_alive=1, userid=data[0])\n coraattr.save()\n value = request.POST.get(\"attrvalue\").strip()\n attrsynonym = request.POST.get(\"attrsynonym\").strip()\n print(value, attrsynonym)\n if len(value) > 0:\n if len(attrsynonym) == 0:\n attrsynonym = value\n\n seedsstr = processhelper.list2str(seedslist=[attrsynonym], deli='###')\n processhelper.createUpdatePatternSeed(username=username, seedsstr=seedsstr, coraattr=coraattr)\n # val = models.sigirCoraAttrValue(attr_id=coraattr.id, value=value, userid=data[0])\n # val.save()\n #\n # corasyno = models.sigirCoraValueSynonym(value=val, synonym=attrsynonym, userid=data[0])\n # corasyno.save()\n #\n # # ss = models.patternSeedTemp.objects.filter(user=username)\n # #\n # # if ss:\n # # ss[0].seedsubstring = seedsstr\n # # ss[0].save()\n # # # seedtemp = models.patternSeedTemp.objects.get(user=username)\n # # # seedtemp.seedsubstring = seedsstr\n # # # seedtemp.save()\n # # models.sigirSynonymsSeedTemp.objects.filter(user=username).update(cattr=coraattr)\n # # else:\n # # models.patternSeedTemp.objects.create(seedsubstring=seedsstr, user=username)\n # # models.sigirSynonymsSeedTemp.objects.create(cattr=coraattr, user=username)\n #\n # # llist = Cora_labeled.objects.filter(\n # # Q(cleantext__icontains=' ' + attrsynonym + ' ') | Q(cleantext__istartswith=attrsynonym + ' ') | Q(\n # # cleantext__iendswith=' ' + attrsynonym))\n # # llist = dataset.filter(\n # # Q(cleantext__icontains=' ' + attrsynonym + ' ') | Q(cleantext__istartswith=attrsynonym + ' ') | Q(\n # # cleantext__iendswith=' ' + attrsynonym))\n # # if len(llist) == 0:\n # # llist = dataset.filter(text__icontains=attrsynonym)\n # # # restr = '
'+attra.attrname+'

'+attrsynonym+'

'\n # # restr = '' + attrsynonym + '|' + attrname + '.' + value + ''\n # llist = processhelper.datasetfilteringMultiContitiion(dataset=dataset, substring=attrsynonym)\n # restr = processhelper.viewhelper(substring=attrsynonym, attrbute_name=attrname, value=value)\n # for entiy in llist:\n # entiy.labeledtext = entiy.labeledtext.replace(attrsynonym, restr)\n # entiy.save()\n # models.sigirCoraToAttrEntity.objects.create(cora_id=entiy.id, attrsynonym=corasyno,user=username)\n # item = models.sigirAttrExploration.objects.filter(substring=value,user=username)\n # if item:\n # item[0].is_labelled = True\n # item[0].save()\n # msg = {\"attrname\": attrname, \"value\": value, \"synonym\": attrsynonym}\n # msg = processhelper.updateUVDcontent(dataset=dataset,attra=coraattr,value=value,attrsynonym=attrsynonym,userid=data[0],username=username)\n value = value.replace(' ','_')\n val = models.sigirCoraAttrValue(attr_id=coraattr.id, value=value, userid=data[0])\n val.save()\n msg = processhelper.updateUVDcontent(dataset=dataset, attra=coraattr, valueid=val.id, attrsynonym=attrsynonym,\n userid=data[0], username=username)\n\n models.dextraitems.objects.create(task=taskc, msg=json.dumps(msg),\n optype=dxaconstants.WorkerOperation.createAndBind, user=data[0],\n samplingMethod=samplemth)\n else:\n msg = {\"attrname\": attrname}\n WorkLog.objects.create(task=dxaconstants.ERTASK.Cora, operate_message=json.dumps(msg),\n operate_flag=dxaconstants.WorkerOperation.attributeCreate, operate_user=data[0])\n models.dextraitems.objects.create(task=taskc, msg=json.dumps(msg),\n optype=dxaconstants.WorkerOperation.attributeCreate, user=data[0],\n samplingMethod=samplemth)\n return render(request, 'sigir/add.html')\n\n\ndef addvalue(request):\n username = request.session['username']\n uids = WorkerInfo.objects.filter(user=username).values_list('id', flat=True)\n if request.method == 'POST':\n print(uids[0])\n seleattr = request.POST.get(\"seleattr\")\n print(\"seleattr\")\n print(seleattr)\n value = request.POST.get(\"attrvalue\").strip()\n attrsynonym = request.POST.get(\"attrsynonym\").strip()\n print(value,attrsynonym)\n attra = models.sigirCoraAttr.objects.get(id=seleattr)\n\n # models.CoraAttrValue.objects.create(attr_id=seleattr,value=value,userid=uids[0])\n # val = models.sigirCoraAttrValue(attr_id=seleattr,value=value,userid=uids[0])\n # val.save()\n # # values = models.CoraAttrValue.objects.filter(value=value)\n # # print(values[0].id)\n # # models.CoraValueSynonym.objects.create(value=values[0],synonym=attrsynonym,userid=uids[0])\n # corasyno = models.sigirCoraValueSynonym(value=val,synonym=attrsynonym,userid=uids[0])\n # corasyno.save()\n # seedsstr = processhelper.list2str(seedslist=[attrsynonym], deli='###')\n # seedtemp = models.patternSeedTemp.objects.get(user=username)\n # seedtemp.seedsubstring = seedsstr\n # seedtemp.save()\n\n seedsstr = processhelper.list2str(seedslist=[attrsynonym], deli='###')\n processhelper.createUpdatePatternSeed(username=username,seedsstr=seedsstr,coraattr=attra)\n msg = {\"attr_id\":seleattr,\"value\":value,\"synonym\":attrsynonym}\n WorkLog.objects.create(task=dxaconstants.ERTASK.Cora,operate_message=json.dumps(msg), operate_flag=dxaconstants.WorkerOperation.valueBind,operate_user=uids[0])\n # llist = dataset.filter(Q(cleantext__icontains=' ' + attrsynonym + ' ')| Q(cleantext__istartswith=attrsynonym + ' ')|Q(cleantext__iendswith=' ' + attrsynonym ))\n # if len(llist)==0:\n # llist = dataset.filter(text__icontains=attrsynonym)\n # attra = models.sigirCoraAttr.objects.get(id=seleattr)\n # #restr = '
'+attra.attrname+'

'+attrsynonym+'

'\n # restr = ''+ attrsynonym +'|' + attra.attrname+'.'+value + ''\n\n # llist = processhelper.datasetfilteringMultiContitiion(dataset=dataset, substring=attrsynonym)\n # restr = processhelper.viewhelper(substring=attrsynonym, attrbute_name=attra.attrname, value=value)\n # for entiy in llist:\n # entiy.labeledtext = entiy.labeledtext.replace(attrsynonym,restr)\n # entiy.save()\n # models.sigirCoraToAttrEntity.objects.create(cora_id=entiy.id,attrsynonym=corasyno)\n # item = models.sigirAttrExploration.objects.filter(substring=attrsynonym,user=username)\n # if item:\n # item[0].is_labelled = True\n # item[0].save()\n # msg = {\"attrname\": attra.attrname, \"value\": value, \"synonym\": attrsynonym}\n value = value.replace(' ','_')\n val = models.sigirCoraAttrValue(attr_id=attra.id, value=value, userid=uids[0])\n val.save()\n msg = processhelper.updateUVDcontent(dataset=dataset,attra=attra,valueid=val.id,attrsynonym=attrsynonym,userid=uids[0],username=username)\n models.dextraitems.objects.create(task=taskc, msg=json.dumps(msg),\n optype=dxaconstants.WorkerOperation.valueBind, user=uids[0],\n samplingMethod=samplemth)\n data = models.sigirCoraAttr.objects.filter(userid=uids[0])\n return render(request, 'sigir/addvalue.html', {'data': data})\n\n\ndef addsynonym(request):\n username = request.session['username']\n uids = WorkerInfo.objects.filter(user=username).values_list('id', flat=True)\n if request.is_ajax():\n print(request.body)\n print(request.POST)\n # attrs = json.loads(request.body.decode(\"utf8\"))\n # print(attrs)\n # attr = attrs.get(\"attribute\")\n # print(attr)\n attribute = request.POST.get(\"attribute\")\n print(\"attriname\",attribute)\n test = request.POST.get(\"test\")\n print(\"test\",test) #ok\n test2 = request.POST.get(\"test2\")\n print(\"test2\",test2) #ok\n test3 = request.POST.get(\"test3\")\n print(\"test3\",test3)\n valuelist = models.sigirCoraAttrValue.objects.filter(attr_id=test).values('id','value')\n print(valuelist)\n htmlstr = \"\"\n\n for i in valuelist:\n print(i['id'],i['value'])\n htmlstr = htmlstr +\"\"\n # a = dict(list(valuelist))\n # print(a)\n # for k,v in a:\n # print(k,v)\n data = {'valuestr': htmlstr}\n\n\n # data = serializers.serialize(\"json\",valuelist)\n return HttpResponse(json.dumps(data))\n # return JsonResponse(valuelist,safe=False)\n\n if request.method == 'POST':\n value = request.POST.get(\"seleval\")\n print('value:',value)\n attrsynonym = request.POST.get(\"attrsynonym\").strip()\n print(attrsynonym)\n seleattr = request.POST.get(\"attrname\")\n attra = models.sigirCoraAttr.objects.get(id=seleattr)\n seedsstr = processhelper.list2str(seedslist=[attrsynonym], deli='###')\n processhelper.createUpdatePatternSeed(username=username, seedsstr=seedsstr, coraattr=attra)\n # # models.CoraValueSynonym.objects.create(value_id=value, synonym=attrsynonym, userid=uids[0])\n # corasyno = models.sigirCoraValueSynonym(value_id=value, synonym=attrsynonym, userid=uids[0])\n # corasyno.save()\n\n msg = {\"value_id\": value, \"synonym\": attrsynonym}\n WorkLog.objects.create(task=dxaconstants.ERTASK.Cora, operate_message=json.dumps(msg),\n operate_flag=dxaconstants.WorkerOperation.valueBind, operate_user=uids[0])\n # llist = Cora_labeled.objects.filter(labeledtext__icontains=attrsynonym)\n # llist = Cora_labeled.objects.filter(Q(cleantext__icontains=' ' + attrsynonym + ' ')| Q(cleantext__istartswith=attrsynonym + ' ')|Q(cleantext__iendswith=' ' + attrsynonym ))\n # if len(llist) == 0:\n # llist = Cora_labeled.objects.filter(text__icontains= attrsynonym)\n\n # #restr = '
'+attra.attrname+'

'+attrsynonym+'

'\n # restr = '' + attrsynonym + '|' + attra.attrname + '.' + corasyno.value.value + ''\n # llist = processhelper.datasetfilteringMultiContitiion(dataset=dataset, substring=attrsynonym)\n # restr = processhelper.viewhelper(substring=attrsynonym, attrbute_name=attra.attrname, value=corasyno.value.value)\n # for entiy in llist:\n # entiy.labeledtext = entiy.labeledtext.replace(attrsynonym,restr)\n # entiy.save()\n # models.sigirCoraToAttrEntity.objects.create(cora_id=entiy.id, attrsynonym=corasyno)\n # item = models.sigirAttrExploration.objects.filter(substring=attrsynonym,user=username)\n # if item:\n # item[0].is_labelled = True\n # item[0].save()\n # msg = {\"attrname\": attra.attrname, \"value\": corasyno.value.value, \"synonym\": attrsynonym}\n msg = processhelper.updateUVDcontent(dataset=dataset,attra=attra,valueid=value,attrsynonym=attrsynonym,userid=uids[0],username=username)\n models.dextraitems.objects.create(task=taskc, msg=json.dumps(msg),\n optype=dxaconstants.WorkerOperation.valueBind, user=uids[0],\n samplingMethod=samplemth)\n data = models.sigirCoraAttr.objects.filter(userid=uids[0])\n return render(request, 'sigir/addsynonym.html', {'data': data})\n\n\n\n\n\n\n\ndef searchsampling(request):\n if request.method == 'POST':\n username = request.session['username']\n current = alloymodels.CoraTemp.objects.get(id=1)\n # cora = Cora.objects.get(id=current.currentCora_id)\n submittype = request.POST.get(\"submit\")\n print(submittype)\n if submittype == \"Save and check another\":\n # pseedid = request.POST.get(\"pseedid\")\n # print(pseedid)\n # cora = Cora.objects.get(id=pseedid)\n print(\"Save and check another\")\n multis = request.POST.getlist(\"Records\")\n print(multis)\n multitems = models.MultiItems.objects.get(seedid=current.currentCora_id,is_checked=-1)\n multitems.selectedidset = str(multis)\n multitems.is_checked = 1\n multitems.save()\n coun = models.MultiItems.objects.filter(is_checked=1,samplingMethod=samplemth).count()\n if coun > workeroperatiionNum/2:\n print(coun)\n return render(request, 'sigir/exploreperformance.html')\n elif submittype == \"submitKeywords\":\n # pseedid = request.POST.get(\"pseedid\")\n # print(pseedid)\n # cora = Cora.objects.get(id=pseedid)\n\n print(\"submitKeywords\")\n seed1kw = request.POST.get(\"seed1kw\")\n print(seed1kw)\n if seed1kw:\n print(True)\n similar = headcast.searchtfidf(seedid=current.currentCora_id, kw=seed1kw, num=9)\n # similar = searchmh(kw=seed1kw, seedid=current.currentCora_id, num=9)\n print(similar)\n data = {'similar': similar, 'seed': current.currentCora}\n print(data)\n simiids = [item.id for item in similar]\n models.MultiItems.objects.create(seedid=current.currentCora_id,text1=current.currentCora.text,keywords=seed1kw, candidateidset=str(simiids),samplingMethod=samplemth,task=dxaconstants.ERTASK.Cora,user=username)\n return render(request, 'sigir/records_searchsampling.html', data)\n elif submittype == \"Save\":\n print(\"save\")\n else:\n print(\"Replace\")\n # crowdCora.objects.create(cora_id=focusedentities[fc], testsystem=dxaconstants.TestSystems.DEXTRA,\n # clusterid=cluid)\n fc = 0\n seed = Cora.objects.order_by('?')[:1]\n print(seed)\n print(seed[fc].id)\n current = alloymodels.CoraTemp.objects.get(id=1)\n print(current.currentCora)\n current.currentCora = seed[fc]\n current.save()\n print(current.currentCora_id)\n data = {'seed': current.currentCora}\n print(\"seed:\")\n print(seed[fc].id)\n print(seed[fc].text)\n print(\"current:\")\n print(current.currentCora_id)\n return render(request, 'sigir/records_searchsampling.html',data)\n\n\n\ndef record_uncertainsampling(request):\n\n # To train dedupe, we feed it a sample of records.\n # deduper.sample(temp_d)\n # If we have training data saved from a previous run of dedupe,\n # look for it an load it in.\n # __Note:__ if you want to train from scratch, delete the training_file\n if os.path.exists(training_file):\n print('reading labeled examples from ', training_file)\n with open(training_file, 'rb') as f:\n deduper.readTraining(f)\n username = request.session['username']\n if request.method == 'POST':\n is_same = request.POST.get('is_same')\n print(is_same)\n unpairid = request.POST.get(\"unpairid\")\n print(unpairid)\n unpair = models.piars.objects.get(id=unpairid)\n unpair.is_same = is_same\n unpair.save()\n examples = {'distinct': [], 'match': []}\n d1 = dict([('text', unpair.text1), ('id', str(unpair.id1))])\n d2 = dict([('text', unpair.text2), ('id', str(unpair.id2))])\n record_pair = (d1,d2)\n uids = WorkerInfo.objects.filter(user=username).values_list('id', flat=True)\n msg = {\"unpair_id1\": unpair.id1, \"unpair_id2\": unpair.id2, \"is_same\": is_same}\n WorkLog.objects.create(task=dxaconstants.ERTASK.Cora, operate_message=json.dumps(msg),\n operate_flag=dxaconstants.WorkerOperation.pairJudge, operate_user=uids[0],\n operate_system=dxaconstants.TestSystems.SigirExpriment)\n if is_same == 1:\n examples['match'].append(record_pair)\n elif is_same == 0:\n examples['distinct'].append(record_pair)\n deduper.markPairs(examples)\n match = models.piars.objects.filter(is_same=1, user=username,samplingMethod=dxaconstants.RecordSamplingMethod.UncertainSampling)\n n_match = match.count()\n print('match',n_match)\n different = models.piars.objects.filter(is_same=0, user=username, samplingMethod=dxaconstants.RecordSamplingMethod.UncertainSampling)\n n_different = different.count()\n print('different',n_different)\n if n_match + n_different < workeroperatiionNum:\n upair = deduper.uncertainPairs()\n\n return render(request, 'sigir/records_uncertainsampling.html',\n {\"record1\": upair[0][0], \"record2\": upair[0][1], 'unid': unpair.id, 'n_match': n_match,\n 'n_different': n_different})\n else:\n examples = {'distinct': [], 'match': []}\n for mitem in match:\n d1 = dict([('text', mitem.text1), ('id', str(mitem.id1))])\n d2 = dict([('text', mitem.text2), ('id', str(mitem.id2))])\n record_pair = (d1, d2)\n examples['match'].append(record_pair)\n for ditem in different:\n d1 = dict([('text', ditem.text1), ('id', str(ditem.id1))])\n d2 = dict([('text', ditem.text2), ('id', str(ditem.id2))])\n record_pair = (d1, d2)\n # record_pair = dict([('text', ditem.text1), ('id', ditem.id1)]) + dict(\n # [('text', ditem.text2), ('id', ditem.id2)])\n print(record_pair)\n examples['distinct'].append(record_pair)\n print(examples)\n deduper.markPairs(examples)\n deduper.train()\n print('train done')\n\n # When finished, save our training away to disk\n with open(training_file, 'w') as tf:\n deduper.writeTraining(tf)\n # Save our weights and predicates to disk. If the settings file\n # exists, we will skip all the training and learning next time we run\n # this file.\n with open(settings_file, 'wb') as sf:\n deduper.writeSettings(sf)\n threshold = deduper.threshold(temp_d, recall_weight=1)\n print('clustering...')\n clustered_dupes = deduper.match(temp_d, threshold)\n\n print('# duplicate sets', len(clustered_dupes))\n cluster_membership = {}\n cluster_id = 0\n print(clustered_dupes)\n\n for (cluster_id, cluster) in enumerate(clustered_dupes):\n id_set, scores = cluster\n print(id_set)\n cluster_d = [temp_d[c] for c in id_set]\n print(cluster_d)\n # canonical_rep = dedupe.canonicalize(cluster_d)\n for record_id, score in zip(id_set, scores):\n cluster_membership[record_id] = {\n \"cluster id\": cluster_id,\n # \"canonical representation\": canonical_rep,\n \"confidence\": score\n\n }\n models.CoraPerformanceLog.objects.create(explorationMethod=dxaconstants.RecordSamplingMethod.UncertainSampling,\n clusterid=cluster_id, cora_id=record_id,\n confidence=score,workerOperationNum=workeroperatiionNum)\n\n singleton_id = cluster_id + 1\n print(cluster_membership)\n cora_true = models.CoraPerformanceLog.objects.filter(explorationMethod='groundtruth').order_by('cora_id')\n print(cora_true)\n aa = [item.clusterid for item in cora_true]\n for item in cora_true:\n coraid = item.cora_id\n if coraid in cluster_membership:\n print(coraid)\n else:\n singleton_id += 1\n models.CoraPerformanceLog.objects.create(explorationMethod=dxaconstants.RecordSamplingMethod.UncertainSampling, clusterid=singleton_id, cora_id=coraid,confidence=0,workerOperationNum=workeroperatiionNum)\n\n # a = clusterCora.objects.filter(user=username,is_checked=-1).aggregate(Min('clusterid'))\n # datas = clusterCora.objects.filter(clusterid=a['clusterid__min'])\n # print(datas)\n # sett = set(item.cora_id for item in datas)\n # coras = Cora.objects.filter(id__in=sett)\n # return render(request, 'dedupe/clusterreview.html',{'data':coras,'clusterid':a['clusterid__min']})\n cora_true = models.CoraPerformanceLog.objects.filter(explorationMethod='groundtruth').order_by('cora_id')\n print(cora_true)\n aa_true = [item.clusterid for item in cora_true]\n cluster_membership = models.CoraPerformanceLog.objects.filter(\n explorationMethod='RecordSamplingMethod.UncertainSampling',workerOperationNum=workeroperatiionNum).order_by('cora_id')\n bb_pred = [item.clusterid for item in cluster_membership]\n nmi = performancemeasure.clusterNMI(y_true=aa_true, y_pred=bb_pred)\n dict = performancemeasure.clusterMeasureSet(aa_true=aa_true, bb_pred=bb_pred,\n RecordSamplingMethod=samplemth,\n workerOperationNum=workeroperatiionNum)\n return render(request,'sigir/exploreperformance.html',dict)\n\n\n\n upair = deduper.uncertainPairs()\n print(upair)\n print(upair[0][0])\n print(upair[0][1])\n unpair = models.piars(id1=upair[0][0]['id'],id2=upair[0][1]['id'],text1=upair[0][0]['text'],\n text2=upair[0][1]['text'],task=dxaconstants.ERTASK.Cora,user=username,samplingMethod=dxaconstants.RecordSamplingMethod.UncertainSampling)\n unpair.save()\n n_match = models.piars.objects.filter(is_same=1, user=username, samplingMethod=samplemth).count()\n n_different = models.piars.objects.filter(is_same=0, user=username, samplingMethod=samplemth).count()\n return render(request,'sigir/records_uncertainsampling.html',{\"record1\": upair[0][0], \"record2\": upair[0][1],\n 'unid': unpair.id,'n_match':n_match,'n_different':n_different})\n # return render(request, 'dedupe/activelabel.html', {\"upair\": upair[0]})\n\n\ndef record_randomsampling(request):\n\n # To train dedupe, we feed it a sample of records.\n # deduper.sample(temp_d)\n # If we have training data saved from a previous run of dedupe,\n # look for it an load it in.\n # __Note:__ if you want to train from scratch, delete the training_file\n # if os.path.exists(training_file):\n # print('reading labeled examples from ', training_file)\n # with open(training_file, 'rb') as f:\n # deduper.readTraining(f)\n username = request.session['username']\n if request.method == 'POST':\n is_same = request.POST.get('is_same')\n print(is_same)\n unpairid = request.POST.get(\"unpairid\")\n print(unpairid)\n unpair = models.piars.objects.get(id=unpairid)\n unpair.is_same = is_same\n unpair.save()\n examples = {'distinct': [], 'match': []}\n d1 = dict([('text', unpair.text1), ('id', str(unpair.id1))])\n d2 = dict([('text', unpair.text2), ('id', str(unpair.id2))])\n record_pair = (d1,d2)\n uids = WorkerInfo.objects.filter(user=username).values_list('id', flat=True)\n msg = {\"unpair_id1\": unpair.id1, \"unpair_id2\": unpair.id2, \"is_same\": is_same}\n WorkLog.objects.create(task=dxaconstants.ERTASK.Cora, operate_message=json.dumps(msg),\n operate_flag=dxaconstants.WorkerOperation.pairJudge, operate_user=uids[0],\n operate_system=dxaconstants.TestSystems.SigirExpriment)\n if is_same == 1:\n examples['match'].append(record_pair)\n elif is_same == 0:\n examples['distinct'].append(record_pair)\n deduper.markPairs(examples)\n match = models.piars.objects.filter(is_same=1, user=username,samplingMethod=samplemth)\n n_match = match.count()\n print('match',n_match)\n different = models.piars.objects.filter(is_same=0, user=username, samplingMethod=samplemth)\n n_different = different.count()\n print('different',n_different)\n if n_match + n_different < workeroperatiionNum:\n randpair = Cora.objects.order_by('?')[:2]\n unpair = models.piars(id1=randpair[0].id, id2=randpair[1].id, text1=randpair[0].text,\n text2=randpair[1].text, task=dxaconstants.ERTASK.Cora, user=username,\n samplingMethod=samplemth)\n unpair.save()\n return render(request, 'sigir/records_randomsampling.html',\n {\"record1\": randpair[0], \"record2\": randpair[1],\n 'unid': unpair.id, 'n_match': n_match, 'n_different': n_different})\n else:\n examples = {'distinct': [], 'match': []}\n for mitem in match:\n d1 = dict([('text', mitem.text1), ('id', str(mitem.id1))])\n d2 = dict([('text', mitem.text2), ('id', str(mitem.id2))])\n record_pair = (d1, d2)\n examples['match'].append(record_pair)\n for ditem in different:\n d1 = dict([('text', ditem.text1), ('id', str(ditem.id1))])\n d2 = dict([('text', ditem.text2), ('id', str(ditem.id2))])\n record_pair = (d1, d2)\n # record_pair = dict([('text', ditem.text1), ('id', ditem.id1)]) + dict(\n # [('text', ditem.text2), ('id', ditem.id2)])\n print(record_pair)\n examples['distinct'].append(record_pair)\n print(examples)\n deduper.markPairs(examples)\n deduper.train()\n print('train done')\n # blocking.blockingsql()\n\n # print(deduper.blocker.index_fields)\n #\n # for field in deduper.blocker.index_fields:\n # print(field)\n # # field_data = set(record[field] for record in temp_d)\n # # print(temp_d)\n # # for record in temp_d:\n # # print(record)\n # # print(temp_d[record][field])\n # field_data = set(temp_d[record][field] for record in temp_d)\n # print(field_data)\n # deduper.blocker.index(data=field_data, field=field)\n # # deduper.blocker.indexAll(temp_d)\n # block_ids = deduper.blocker(temp_d, target=False)\n # print(\"block_ids\")\n # print(block_ids)\n\n # When finished, save our training away to disk\n with open(training_file, 'w') as tf:\n deduper.writeTraining(tf)\n # Save our weights and predicates to disk. If the settings file\n # exists, we will skip all the training and learning next time we run\n # this file.\n with open(settings_file, 'wb') as sf:\n deduper.writeSettings(sf)\n threshold = deduper.threshold(temp_d, recall_weight=1)\n print('clustering...')\n clustered_dupes = deduper.match(temp_d, threshold)\n\n print('# duplicate sets', len(clustered_dupes))\n cluster_membership = {}\n cluster_id = 0\n print(clustered_dupes)\n\n for (cluster_id, cluster) in enumerate(clustered_dupes):\n id_set, scores = cluster\n print(id_set)\n cluster_d = [temp_d[c] for c in id_set]\n print(cluster_d)\n # canonical_rep = dedupe.canonicalize(cluster_d)\n for record_id, score in zip(id_set, scores):\n cluster_membership[record_id] = {\n \"cluster id\": cluster_id,\n # \"canonical representation\": canonical_rep,\n \"confidence\": score\n\n }\n models.CoraPerformanceLog.objects.create(explorationMethod=samplemth,\n clusterid=cluster_id, cora_id=record_id,\n confidence=score,workerOperationNum=workeroperatiionNum)\n\n singleton_id = cluster_id + 1\n print(cluster_membership)\n cora_true = models.CoraPerformanceLog.objects.filter(explorationMethod='groundtruth').order_by('cora_id')\n print(cora_true)\n aa = [item.clusterid for item in cora_true]\n for item in cora_true:\n coraid = item.cora_id\n if coraid in cluster_membership:\n print(coraid)\n else:\n singleton_id += 1\n models.CoraPerformanceLog.objects.create(explorationMethod=samplemth, clusterid=singleton_id, cora_id=coraid,confidence=0,workerOperationNum=workeroperatiionNum)\n\n # a = clusterCora.objects.filter(user=username,is_checked=-1).aggregate(Min('clusterid'))\n # datas = clusterCora.objects.filter(clusterid=a['clusterid__min'])\n # print(datas)\n # sett = set(item.cora_id for item in datas)\n # coras = Cora.objects.filter(id__in=sett)\n # return render(request, 'dedupe/clusterreview.html',{'data':coras,'clusterid':a['clusterid__min']})\n cora_true = models.CoraPerformanceLog.objects.filter(explorationMethod='groundtruth').order_by('cora_id')\n print(cora_true)\n aa_true = [item.clusterid for item in cora_true]\n cluster_membership = models.CoraPerformanceLog.objects.filter(\n explorationMethod=samplemth,workerOperationNum=workeroperatiionNum).order_by('cora_id')\n bb_pred = [item.clusterid for item in cluster_membership]\n nmi = performancemeasure.clusterNMI(y_true=aa_true, y_pred=bb_pred)\n dict = performancemeasure.clusterMeasureSet(aa_true=aa_true,bb_pred=bb_pred,RecordSamplingMethod=samplemth,workerOperationNum=workeroperatiionNum)\n return render(request,'sigir/exploreperformance.html',dict)\n\n randpair = Cora.objects.order_by('?')[:2]\n print(randpair)\n for item in randpair:\n print(item)\n print(item.id)\n\n\n print(randpair[0].id)\n print(randpair[1].id)\n upair = deduper.uncertainPairs()\n # print(upair)\n # # print(upair[0][0])\n # # print(upair[0][1])\n unpair = models.piars(id1=randpair[0].id,id2=randpair[1].id,text1=randpair[0].text,\n text2=randpair[1].text,task=dxaconstants.ERTASK.Cora,user=username,samplingMethod=samplemth)\n unpair.save()\n n_match = models.piars.objects.filter(is_same=1, user=username,samplingMethod=samplemth).count()\n n_different = models.piars.objects.filter(is_same=0, user=username,samplingMethod=samplemth).count()\n return render(request,'sigir/records_uncertainsampling.html',{\"record1\": randpair[0], \"record2\": randpair[1],\n 'unid': unpair.id,'n_match':n_match,'n_different':n_different})\n\n # return render(request, 'dedupe/activelabel.html', {\"upair\": upair[0]})\n","repo_name":"Sayarara/pyweb","sub_path":"sigirexperiments/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":63520,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"35915335222","text":"import socket\nimport numpy as np\nimport encodings\nimport time\nfrom time import sleep\nimport math\nimport smbus\n\n\nPWR_MGMT_1 = 0x6B\nSMPLRT_DIV = 0x19\nCONFIG = 0x1A\nGYRO_CONFIG = 0x1B\nACCEL_CONFIG = 0x1C\nINT_ENABLE = 0x38\nACCEL_XOUT_H = 0x3B\nACCEL_YOUT_H = 0x3D\nACCEL_ZOUT_H = 0x3F\nGYRO_XOUT_H = 0x43\nGYRO_YOUT_H = 0x45\nGYRO_ZOUT_H = 0x47\n \nDevice_Address = 0x68\nmultiplexer_address = 0x70\n \nI2C_ch = [0B00000001,\n 0B00000010,\n 0B00000100,\n 0B00001000,\n 0B00010000,\n 0B00100000,\n 0B01000000,\n 0B10000000]\n \ngyroOffsetX={}\ngyroOffsetY={}\ngyroOffsetZ={}\n\nangX={}\nangY={}\nangZ={}\n\n\nip='192.168.1.107'\nport=5005\n\nbus = smbus.SMBus(1)\n\nbuffer=20\nModule_Quantity = 6\ninterval= 0\npreinterval=time.time()\n\n\ndef MPU_Init():\n bus.write_byte_data(Device_Address, SMPLRT_DIV, 0x07)\n \n bus.write_byte_data(Device_Address, PWR_MGMT_1, 0x01)\n \n bus.write_byte_data(Device_Address, CONFIG, 0x0)\n \n bus.write_byte_data(Device_Address, GYRO_CONFIG, 0x08)\n \n bus.write_byte_data(Device_Address, ACCEL_CONFIG, 0x00)\n \n bus.write_byte_data(Device_Address, INT_ENABLE, 0x01)\n\ndef read_raw_data(addr):\n high = bus.read_byte_data(Device_Address, addr)\n low = bus.read_byte_data(Device_Address, addr+1)\n \n value = ((high << 8) | low)\n \n if(value > 32768):\n value = value - 65536\n return value\n \ndef dist(a,b):\n return math.sqrt((a*a)+(b*b))\n\ndef calibration(n):\n Gx=0\n Gy=0\n Gz=0\n for i in range(3000):\n gyro_x = read_raw_data(GYRO_XOUT_H)\n gyro_y = read_raw_data(GYRO_YOUT_H)\n gyro_z = read_raw_data(GYRO_ZOUT_H)\n \n Gx += gyro_x/65.5\n Gy += gyro_y/65.5\n Gz += gyro_z/65.5\n \n print(i)\n gyroOffsetX[n]=Gx/3000\n gyroOffsetY[n]=Gy/3000\n gyroOffsetZ[n]=Gz/3000\n\n \n\n\nfor n in range(2):\n bus.write_byte(multiplexer_address, I2C_ch[n])\n MPU_Init()\n calibration(n)\n angX[n]=0\n angY[n]=0\n angZ[n]=0\n \n \nprint(gyroOffsetX,gyroOffsetY,gyroOffsetZ)\n\n\n\ns=socket.socket(socket.AF_INET, socket.SOCK_STREAM)\ns.bind((ip, port))\ns.listen(1)\nprint(\"Server Started waiting for client to connect \")\nconn, addr = s.accept()\nprint('Connnection address: ', addr)\ni=0\nj=0\nwhile 1:\n for n in range(2):\n print(n)\n bus.write_byte(multiplexer_address, I2C_ch[n])\n \n acc_x = read_raw_data(ACCEL_XOUT_H)\n acc_y = read_raw_data(ACCEL_YOUT_H)\n acc_z = read_raw_data(ACCEL_ZOUT_H)\n \n gyro_x = read_raw_data(GYRO_XOUT_H)\n gyro_y = read_raw_data(GYRO_YOUT_H)\n gyro_z = read_raw_data(GYRO_ZOUT_H)\n \n Ax = acc_x/16384.0\n Ay = acc_y/16384.0\n Az = acc_z/16384.0\n \n Gx = gyro_x/65.5\n Gy = gyro_y/65.5\n Gz = gyro_z/65.5\n \n Gx-=gyroOffsetX[n]\n Gy-=gyroOffsetY[n]\n Gz-=gyroOffsetZ[n]\n \n radians = math.atan2(Ax, dist(Ay,Az))\n y_rot=-math.degrees(radians)\n \n radians = math.atan2(Ay, dist(Ax,Az))\n x_rot=math.degrees(radians)\n \n \n interval= time.time()-preinterval\n angX[n] = 0.96*(angX[n]+Gx*interval)+0.04*x_rot\n angY[n] = 0.96*(angY[n]+Gy*interval)+0.04*y_rot\n angZ[n] += Gz*interval\n preinterval=time.time()\n \n my_data = \"{},{},{},{};\".format(n,angX[n],angY[n],angZ[n])\n print(my_data)\n if (n==0):\n j+=1\n print(\"Per model: \", j)\n i+=1\n print(\"Total: \", i)\n x_encoded_data = my_data.encode('utf-8')\n conn.sendall(x_encoded_data)\n \nconn.close()\n\n","repo_name":"blackduffelbag/diplom","sub_path":"module.py","file_name":"module.py","file_ext":"py","file_size_in_byte":3715,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"74263013687","text":"import sql_function as sql\r\nimport json\r\n\r\ndef delete_management(request, self):\r\n if(request[\"type\"] == \"private\"):\r\n filename = request[\"file_name\"]\r\n username = request[\"username\"]\r\n result = sql.delete_file(filename,username)\r\n if(result == 1):\r\n reply = {\r\n \"success\": True\r\n }\r\n else:\r\n reply = {\r\n \"success\": False\r\n }\r\n self.request.send(json.dumps(reply).encode())\r\n elif(request[\"type\"] == \"group\"):\r\n group_id = request[\"groupname\"]\r\n filename = request[\"file_name\"]\r\n result = sql.delete_group_file(group_id,filename)\r\n if(result == 1):\r\n reply = {\r\n \"success\": True\r\n }\r\n else:\r\n reply = {\r\n \"success\": False\r\n }\r\n self.request.send(json.dumps(reply).encode())\r\n return 1\r\n","repo_name":"SY17900/School_Work","sub_path":"netdisk/netdisk_server/delete_function.py","file_name":"delete_function.py","file_ext":"py","file_size_in_byte":927,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"40159606526","text":"class Solution:\n def findKthLargest(self, nums, k):\n def swap(nums, i, j):\n tmp = nums[i]\n nums[i] = nums[j]\n nums[j] = tmp\n def partition(nums, left, right):\n org_left = left\n org_right = right\n idx = nums[org_left]\n left += 1\n while left <= right:\n while left <= right and nums[right] <= idx:\n right -= 1\n while left <= right and nums[left] >= idx:\n left += 1\n if right < left:\n break\n swap(nums, left, right)\n swap(nums, org_left, right)\n return right\n def quick_select():\n left = 0\n right = len(nums) - 1\n while True:\n pos = partition(nums, left, right)\n if pos == k-1:\n return nums[pos]\n elif pos > k-1:\n right = pos - 1\n else:\n left = pos + 1\n return quick_select()\n\n\nprint(Solution().findKthLargest([3,2,1,5,6,4] , 2))\n\n","repo_name":"luoyanhan/Algorithm-and-data-structure","sub_path":"Leetcode/medium/215.py","file_name":"215.py","file_ext":"py","file_size_in_byte":1135,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"22516953627","text":"#!/usr/bin/env python\n\nfrom typing import List\n\n# Definition for a binary tree node.\nclass TreeNode:\n def __init__(self, val=0, left=None, right=None):\n self.val = val\n self.left = left\n self.right = right\n\nclass Solution:\n def buildTree(self, inorder: List[int], postorder: List[int]) -> TreeNode:\n in_order_map = {v: i for i, v in enumerate(inorder)}\n\n def build(in_s, in_e, post_e):\n if in_s > in_e: return\n if in_s == in_e: return TreeNode(inorder[in_s])\n root_v = postorder[post_e]\n pivot = in_order_map[root_v] # 根节点在inorder中的位置\n r = in_e - pivot\n\n root = TreeNode(root_v)\n root.left = build(in_s, pivot - 1, post_e - r - 1)\n root.right = build(pivot + 1, in_e, post_e - 1)\n return root\n\n n = len(inorder)\n return build(0, n-1, n-1)","repo_name":"ftakanashi/JobProjects","sub_path":"LeetCode/106.从中序与后序遍历序列构造二叉树/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":911,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"77"} +{"seq_id":"5082767798","text":"from django.contrib.contenttypes.models import ContentType\n\nfrom toolbox.models import Subsets\nfrom bemas.models import Complaint, Organization, Originator, Sector, Status, TypeOfImmission\nfrom .base import DefaultViewTestCase\nfrom .constants_vars import VALID_POINT_DB\n\n\n#\n# general views\n#\n\nclass IndexViewTest(DefaultViewTestCase):\n \"\"\"\n test class for main page\n \"\"\"\n\n def setUp(self):\n self.init()\n\n def test_view_no_rights(self):\n self.generic_view_test(\n False, False, 'index', None, 200,\n 'text/html; charset=utf-8', 'keine Rechte'\n )\n\n def test_view_standard_rights(self):\n self.generic_view_test(\n True, False, 'index', None, 200,\n 'text/html; charset=utf-8', 'Codelisten'\n )\n\n\nclass MapViewTest(DefaultViewTestCase):\n \"\"\"\n test class for map page\n \"\"\"\n\n @classmethod\n def setUpTestData(cls):\n organization = Organization.objects.create(\n name='N1YvcbxM'\n )\n sector = Sector.objects.first()\n originator = Originator.objects.create(\n sector=sector,\n operator_organization=organization,\n description='NutMoxfw',\n emission_point=VALID_POINT_DB\n )\n status = Status.get_default_status()\n type_of_immission = TypeOfImmission.objects.first()\n complaint = Complaint.objects.create(\n status=status,\n type_of_immission=type_of_immission,\n immission_point=VALID_POINT_DB,\n originator=originator,\n description='e506TjLt'\n )\n Subsets.objects.create(\n model=ContentType.objects.filter(\n app_label='bemas',\n model='originator'\n ).first(),\n pk_field='id',\n pk_values=[\n originator.pk\n ]\n )\n Subsets.objects.create(\n model=ContentType.objects.filter(\n app_label='bemas',\n model='complaint'\n ).first(),\n pk_field='id',\n pk_values=[\n complaint.pk\n ]\n )\n\n def setUp(self):\n self.init()\n\n def test_view_no_rights(self):\n self.generic_view_test(\n False, False, 'map', None, 200,\n 'text/html; charset=utf-8', 'keine Rechte'\n )\n\n def test_view_standard_rights(self):\n self.generic_view_test(\n True, False, 'map', None, 200,\n 'text/html; charset=utf-8', 'Immissions- und Emissionsorte'\n )\n\n def test_view_originator_subset_no_rights(self):\n self.generic_view_test(\n False, False, 'map_model_subset', ['originator', 1], 200,\n 'text/html; charset=utf-8', 'keine Rechte'\n )\n\n def test_view_originator_subset_standard_rights(self):\n self.generic_view_test(\n True, False, 'map_model_subset', ['originator', 1], 200,\n 'text/html; charset=utf-8', 'Kartendaten angezeigt'\n )\n\n def test_view_complaint_subset_no_rights(self):\n self.generic_view_test(\n False, False, 'map_model_subset', ['complaint', 1], 200,\n 'text/html; charset=utf-8', 'keine Rechte'\n )\n\n def test_view_complaint_subset_standard_rights(self):\n self.generic_view_test(\n True, False, 'map_model_subset', ['complaint', 1], 200,\n 'text/html; charset=utf-8', 'Kartendaten angezeigt'\n )\n\n\nclass OrphanedDataViewTest(DefaultViewTestCase):\n \"\"\"\n test class for orphaned data page\n \"\"\"\n\n def setUp(self):\n self.init()\n\n def test_view_no_rights(self):\n self.generic_view_test(\n False, False, 'orphaned_data', None, 200,\n 'text/html; charset=utf-8', 'keine Rechte'\n )\n\n def test_view_standard_rights(self):\n self.generic_view_test(\n True, False, 'orphaned_data', None, 200,\n 'text/html; charset=utf-8', 'keine verwaisten'\n )\n","repo_name":"rostock/datenwerft","sub_path":"bemas/tests/tests_general.py","file_name":"tests_general.py","file_ext":"py","file_size_in_byte":3533,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"77"} +{"seq_id":"22092374855","text":"import sys\nimport serial\nfrom virtual_controller import VirtualController\n\n\nclass Controller(VirtualController):\n\n def __init__(self, device):\n VirtualController.__init__(self)\n self.serial = serial.Serial(device)\n\n def _write(self, cmd):\n if sys.version_info[0] == 2:\n self.serial.write(cmd)\n else:\n self.serial.write(bytes(cmd, 'latin-1'))\n\n","repo_name":"CedarStreetGarage/python-joystick-servo-stepper-controller","sub_path":"src/control/controller.py","file_name":"controller.py","file_ext":"py","file_size_in_byte":401,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"21326451096","text":"import json\nimport qrcode\n\nfrom pytest \t\timport approx\nfrom fpdf\t\timport FPDF, FlexTemplate\n\nfrom .layout\t\timport Region, Text, Image, Box, Coordinate\nfrom .defaults\t\timport MM_IN\n\n\ndef test_Region():\n card_size\t\t\t= Coordinate( y=2+1/4, x=3+3/8 )\n card_margin \t\t= 1/8\n card\t\t\t= Box( 'card', 0, 0, card_size.x, card_size.y )\n #print( card )\n card_interior\t\t= card.add_region_relative(\n Region( 'card-interior', x1=+card_margin, y1=+card_margin, x2=-card_margin, y2=-card_margin )\n )\n #print( card_interior )\n assert card_interior.x1 == card_margin\n assert card_interior.x2 == card_size.x - card_margin\n assert card_interior.y2 == card_size.y - card_margin\n assert card_interior.x2 - card_interior.x1 == card_size.x - card_margin * 2\n\n card_qr\t\t\t= card_interior.add_region_proportional(\n Image( 'card-qr', x1=1/2, y1=1/4, x2=1, y2=1 )\n ).square( maximum=1, justify='BR' )\n card_interior.add_region_proportional(\n Box( 'card-box-ul', x1=1/2, y1=1/4, x2=1, y2=1 )\n ).square( maximum=.5, justify='TL' )\n card_interior.add_region_proportional(\n Box( 'card-box-cm', x1=1/2, y1=1/4, x2=1, y2=1 )\n ).square( maximum=.5 )\n card_interior.add_region_proportional(\n Box( 'card-box-br', x1=1/2, y1=1/4, x2=1, y2=1 )\n ).square( maximum=.5, justify='BR' )\n\n #card_qr.x1\t\t\t= card_qr.x2 - 1.0\n #card_qr.y1\t\t\t= card_qr.y2 - 1.0\n #print( card_qr )\n assert card_qr.x1 == 2.25\n assert card_qr.y1 == 1.125\n\n elements\t\t\t= list( card.elements() )[1:]\n print( json.dumps( elements, indent=4 ))\n assert len( elements ) == 4\n assert elements[0]['type'] == 'I'\n\n card_top\t\t\t= card_interior.add_region_proportional(\n Region( 'card-top', x1=0, y1=0, x2=1, y2=1/3 )\n )\n card_top.add_region_proportional(\n Text( 'card-title', x1=0, y1=0, x2=1, y2=40/100 )\n )\n\n elements\t\t\t= list( card.elements() )[1:]\n print( json.dumps( elements, indent=4 ))\n assert elements[-1]['type'] == 'T'\n assert elements[-1]['font'] == 'helvetica'\n assert elements[-1]['size'] == approx( 14.4 )\n\n pdf\t\t\t\t= FPDF()\n pdf.add_page()\n\n tpl\t\t\t\t= FlexTemplate( pdf, list( card.elements() ) )\n tpl['card-qr']\t\t= qrcode.make( 'abc' ).get_image()\n tpl['card-title']\t\t= 'Abc'\n # Abc in upper-left\n tpl.render()\n\n tpl['card-qr']\t\t= qrcode.make( 'abc' ).get_image()\n tpl['card-title']\t\t= 'Xyz'\n # Xyz in lower-right\n tpl.render( offsetx = card_size.x * MM_IN, offsety = card_size.y * MM_IN )\n\n #pdf.output( \"test.pdf\" ) # To view results in test.pdf, uncomment\n","repo_name":"pjkundert/python-slip39","sub_path":"slip39/output_test.py","file_name":"output_test.py","file_ext":"py","file_size_in_byte":2590,"program_lang":"python","lang":"en","doc_type":"code","stars":31,"dataset":"github-code","pt":"77"} +{"seq_id":"42944447901","text":"from adventure.lib.monster import Monster\nfrom adventure.items.key import Key\n\nclass Rat(Monster):\n def __init__(self):\n super().__init__()\n self.name = \"Rat\"\n self.description = \"That rat looks like it might be sick. I should be careful not to get bit!\"\n #Set the Rat's starting health to 2 so that the player only has to attack the Rat twice to kill it\n self.health = 2\n #Set the Rat's attack_value to 1 so that it takes the rat 10 strikes to kill the player\n self.attack_value = 1\n self.actions = {\"search\" : self.do_search_corpse}\n self.my_map_string = \"Rat\"\n\n def respond_to_hit(self, player):\n if self.health:\n print(\"- - - Keep striking it! - - -\")\n\n def do_search_corpse(self, verb, args, player):\n key = Key()\n key.move(self.environment)\n print(f\"As you search the {self.name} you hear a {key.name} hit the floor! You should 'Collect' it!\")\n self.environment.inventory.remove(self)\n self.environment = None\n key.introduce(player)\n return True\n\n def hit(self, attack_value):\n super().hit(attack_value)\n if not self.is_alive:\n self.my_map_string = \"XO-\"\n print(f\"Perhaps you can 'Search {self.name}'\")\n\n def introduce(self, player):\n if not super().introduce(player):\n return\n self.print_rat()\n\n def print_rat(self):\n print(\n \"\"\"\nYou stumble into a giant rat! Perhaps you could 'strike rat'?\n _ __,..---\"\"-._ ';-,\n , _/_),-\"` '-. `\\\\\\\\\n \\|.-\"` -_) '. ||\n /` a , \\ .'/\n '.___,__/ .-' \\_ _.-'.'\n |\\ \\ \\ /` _`\"\"\"\"\"\"`_.-'\n _/;--._, > | --.__/ `\"\"\"\"\"\"`\n (((-' __//`'-......-;\\ )\n (((-' __// '--. /\n (((-' __//\n (((-'\n \"\"\")","repo_name":"coder-crusade/adventure","sub_path":"adventure/monsters/rat.py","file_name":"rat.py","file_ext":"py","file_size_in_byte":2048,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"7756194756","text":"\"\"\"Reorganizing data\"\"\"\n\nimport pandas as pd\nimport glob\nimport datetime as dt\n\nfrom script.common.func import (check_none_sotnam, start_date)\nfrom script.etl.database import (\n conn_bd_oemz, load_inputs_all, load_nomenclature\n)\n\n\ndef prepare_csv():\n \"\"\"Prepare csv tables\"\"\"\n\n CONN = conn_bd_oemz()\n INPUTS_ALL = load_inputs_all(CONN)\n N_DAYS = 7\n NOM = load_nomenclature(CONN)\n DONE_TABLE = load_req_plus_inputs(N_DAYS, INPUTS_ALL, NOM)\n DETAIL_TABLE, SUMMARY_TABLE = reshape_summary_table(DONE_TABLE, N_DAYS, NOM)\n\n DETAIL_TABLE.to_csv(r\".\\tables\\detail.csv\", sep=';', encoding='ansi')\n SUMMARY_TABLE.to_csv(r\".\\tables\\summary.csv\", sep=';', encoding='ansi')\n\n\ndef reshape_inputs(inp1, nom1, date1, n_days1):\n \"\"\"Построение таблицы с поступлениями:\n - только металл\n - по заданному интервалу дат\n - итоговая гру\n :arg inp1 - поступления\n :arg nom1 - словарь номенклатуры из bd_oemz.bd3\n :arg date1 - нижняя дата интервала\n :arg n_days1 - для расчета верхней границы интервала (date1 + n_days1)\"\"\"\n\n inp1 = inp1[\n (inp1.date >= date1) &\n (inp1.date <= date1 + dt.timedelta(days=n_days1))\n ]\n inp1 = inp1.merge(\n nom1[['code', 'indicator']],\n how='left',\n left_on='nom_code',\n right_on='code'\n )\n inp1 = inp1[inp1.indicator == 'Металлопрокат'].drop(columns=['indicator', 'code'])\n return inp1\n\n\ndef load_req_plus_inputs(n_days1, inputs_, nom_):\n \"\"\"Загрузка потребностей из файлов типа ask_date.csv\n (то есть на каждую дату создания файла),\n мерж к них поступлений через код номекнлатуры.\n На выходе таблица потребностей и поступлений и дат, где даты создания файлов.\n Выход в формате DataFrame\n p.s. Inputs загружаются один раз из базы, так как занимают мало памяти.\n :arg n_days1 - для расчета верхней границы интервала (strt_date + n_days1)\n :arg inputs_ - table with inputs\n :arg nom_ - table with nomenclatures\"\"\"\n\n goal_path = r\"W:\\Analytics\\Илья\\Задание 14 Расчет потребности для МТО\\data\\*.csv\"\n output_table1 = pd.DataFrame()\n for i_path in glob.glob(goal_path): # проход по всех файлам\n \"\"\"Подготовка поотребностей\"\"\"\n path = i_path\n req = pd.read_csv(\n path, sep=';',\n parse_dates=['Дата запуска'],\n dayfirst=True, encoding='ansi',\n usecols=[\n 'Дата запуска', 'Код', 'Заказ обеспечен', 'Пометка удаления',\n 'Списание из Поступлений', 'Остаток дефицита'\n ]\n )\n req = req[\n (req['Заказ обеспечен'] == 0) &\n (req['Пометка удаления'] == 0)\n ] # оставляем строки без индикации обеспеченности и удаления\n req['all_req'] = (\n req['Остаток дефицита'] + req['Списание из Поступлений']\n ) # складываем остаток дефицита и поступления\n req = req[['Дата запуска', 'Код', 'all_req']]\n req = req.rename(\n columns={'Дата запуска': 'date', 'Код': 'nom_code', 'all_req': 'req'}\n )\n req = req[req['req'] != 0] # остаются строки != 0\n\n \"\"\"Выборка по нужным датам и мерж поступлений с потребностями\"\"\"\n strt_date = start_date(path)\n need_inputs = reshape_inputs(inputs_, nom_, strt_date, n_days1) # need_inputs - это поступления за нужный промежуток времени\n req = req[(req.date <= strt_date + dt.timedelta(days=n_days1))] # выбираем потребность для нужного промежутка ��ремени\n need_inputs = need_inputs[\n ['nom_code', 'amount']\n ].groupby(by=['nom_code']).sum().reset_index()\n req = req[['nom_code', 'req']].groupby(by=['nom_code']).sum().reset_index()\n req = req.merge(\n need_inputs, how='outer', on='nom_code'\n ).replace({None: 0}) # тут получаем сравнение потребности и поступлений\n req = req.rename(columns={'amount': 'inputs'})\n req['date_start'] = strt_date\n output_table1 = pd.concat([output_table1, req])\n return output_table1\n\n\ndef reshape_summary_table(table1, n_days1, nom_):\n \"\"\"Преборазовывает таблицу потребностей и поступлений из load_req_plus_inputs():\n - мерж с сортаментом\n - группировка по дате и сортаменту\n - расчет in_plan, out_plan, out_nom\n На выходе 2 таблрицы table1, summary_table:\n - table1 = таблица по сортаментам\n - summary_table = table1, схлопнутая по датам\n :arg table1 - таблица из load_req_plus_inputs(n_days1)\n :arg n_days1 - для расчета верхней границы интервала (strt_date + n_days1)\n :arg nom_ - table with nomenclatures\"\"\"\n\n nom_sortam = pd.read_excel(\n r\"W:\\Analytics\\Илья\\Задание 14 Расчет потребности для МТО\\dicts\\dict_nom.xlsx\",\n usecols=['Номенклатура', 'Сортамент']\n ).drop_duplicates()\n code_sortam = nom_.merge(nom_sortam, how='left', left_on='name', right_on='Номенклатура')\n code_sortam = code_sortam[['code', 'Сортамент']].rename(\n columns={'Сортамент': 'sortam', 'code': 'nom_code'}\n )\n table1 = table1.merge(code_sortam, how='left', on='nom_code') # добавляем сортамент\n\n check_none_sotnam(table1) # проверка отсутствия None в столбце с сортаментом\n\n table1 = table1.drop(columns=['nom_code'])\n table1 = table1.groupby(by=['date_start', 'sortam']).sum().reset_index()\n\n \"\"\"Рассчеты in_plan, out_plan, out_nom\"\"\"\n table1['in_plan'] = table1['inputs'].where(\n table1['req'] > table1['inputs'],\n table1['req']\n )\n table1['out_plan'] = 0\n table1['out_plan'] = table1['out_plan'].where(\n ~((table1['req'] != 0) & (table1['inputs'] > table1['req'])),\n table1['inputs'] - table1['req']\n )\n table1['out_nom'] = 0\n table1['out_nom'] = table1['out_nom'].where(\n ~((table1['req'] == 0) & (table1['inputs'] > table1['req'])),\n table1['inputs'] - table1['req']\n )\n table1['date_end'] = table1.date_start + dt.timedelta(days=n_days1)\n table1 = table1[[\n 'date_start', 'date_end', 'sortam', 'req',\n 'in_plan', 'out_plan', 'out_nom'\n ]]\n\n summary_table1 = table1.groupby(by='date_start').sum().reset_index()\n summary_table1['percent'] = summary_table1.in_plan / summary_table1.req * 100\n summary_table1['date_end'] = summary_table1.date_start + dt.timedelta(days=n_days1)\n summary_table1 = summary_table1[[\n 'date_start', 'date_end', 'req', 'in_plan',\n 'percent', 'out_plan', 'out_nom'\n ]]\n return table1, summary_table1\n","repo_name":"smolinilya01/procurement_plan_fact","sub_path":"script/etl/data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":7784,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"22050714877","text":"from typing import List\n\nimport torch\nfrom torch import Tensor\nfrom torchmetrics.audio import ScaleInvariantSignalDistortionRatio as SISDR\n\nfrom src.base.base_metric import BaseMetric\n\n\nclass SISDRMetric(BaseMetric):\n def __init__(self, device, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.device = device\n self.sisdr = SISDR().to(device)\n\n def __call__(self, pred, target, *args, **kwargs):\n valid_size = min(pred.size(-1), target.size(-1))\n sisdrs = [\n self.sisdr(\n pred[i, 0, :valid_size].to(self.device),\n target[i, 0, :valid_size].to(self.device),\n )\n for i in range(pred.size(0))\n if torch.isfinite(pred[i, 0]).all()\n ]\n return (sum(sisdrs) / len(sisdrs)).squeeze().detach().cpu().item()\n","repo_name":"LeoProko/ss_hw","sub_path":"src/metric/sisdr.py","file_name":"sisdr.py","file_ext":"py","file_size_in_byte":837,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"313190599","text":"'''\n Example script to plot the\n galaxy componets using the FASTPT algorithm.\n\n Additionally the code also makes use of extending the power spectrum out\n to higher and lower k to alliviate edge effects.\n\n The parameters low_extrap and high_extrap control the extension of P_{lin}(k)\n out to lower and higher k values by extrapolating using the effective power-law\n index n_{eff}= dlogP/dlogk.\n'''\n\n\nimport numpy as np\n# example data\nd=np.loadtxt('P_bias_example.dat')\n#print('shape', d.shape)\n'''\n0: k [in h/Mpc]\n1: Plin (scales as D^2)\n2: P22 (scales as D^4)\n3: P13 (scales as D^4)\n4: b1 b2 (scales as D^4)\n5: b2^2 (scales as D^4)\n6: bs^2 (scales as D^4)\n7: b1 bv (no advection) (scales as D^2)\n8: b1 bv Ls (scales as D^2)\n9: b2 bv (scales as D^2)\n10: bs bv (scales as D^2)\n11: bv^2 (constant )\n'''\n\n# example use of FASTPTII\nk=d[:,0]; P_lin=d[:,1]\n\nfrom fastpt import FASTPT\nfrom time import time\nC_window=.65\nn_pad=1000\n\nt1=time()\n# initialize the FASTPT class\nnu=-2\nfastpt=FASTPT.FASTPT(k,-2,n_pad=n_pad)\n\nP_1loop=fastpt.one_loop(P_lin,C_window=C_window)\n_,Pd1d2, Pd2d2, Pd1s2, Pd2s2, Ps2s2, sig4 =fastpt.P_bias(P_lin,C_window=C_window)\nt2=time()\nprint('The time to make density-density type power spectra is ', t2-t1,' .')\n\n\n# plot the output\nimport matplotlib.gridspec as gridspec\nimport matplotlib.pyplot as plt\nfrom matplotlib.ticker import ScalarFormatter, FormatStrFormatter\ngs = gridspec.GridSpec(2,3, height_ratios=[2,1.25])\n\n\nfig=plt.figure(figsize=(16,10))\n\n#x1=10**(-2.5)\n#x2=50\nx1=10**(-5)\nx2=100\nx1=k[0]\nx2=k[-1]\nax1=fig.add_subplot(gs[0,0])\n#ax1.set_ylim(1e-2,1e3)\nax1.set_xlim(x1,x2)\nax1.set_xscale('log')\nax1.set_yscale('log')\nax1.set_ylabel(r'$P(k)$ [Mpc/$h$]$^3$', size=20)\nax1.tick_params(axis='both', which='major', labelsize=20)\nax1.tick_params(axis='both', width=2, length=10)\nax1.tick_params(axis='both', which='minor', width=1, length=5)\n#ax1.xaxis.set_major_formatter(FormatStrFormatter('%2.2f'))\nax1.xaxis.labelpad = 20\nax1.set_xticklabels([])\n\nax1.plot(k,P_1loop, lw=2,color='black', label=r'$P_{22}(k) + P_{13}(k)$, FAST-PT ' )\nax1.plot(k,-P_1loop, '--',lw=2, color='black', alpha=.5 )\nplt.legend(loc=3)\nplt.grid()\n\nax2=fig.add_subplot(gs[1,0])\nax2.set_xscale('log')\nax2.set_xlabel(r'$k$ [$h$/Mpc]', size=20)\nax2.set_ylabel('ratio', size=20)\nax2.set_ylim(.99,1.01)\nax2.set_xlim(x1,x2)\nax2.tick_params(axis='both', which='major', labelsize=20)\nax2.tick_params(axis='both', width=2, length=10)\nax2.tick_params(axis='both', which='minor', width=1, length=5)\n#ax2.xaxis.set_major_formatter(FormatStrFormatter('%2.2f'))\nax2.xaxis.labelpad = 20\n\n\nax2.plot(d[:,0],P_1loop/(d[:,2]+d[:,3]),lw=2, color='black', alpha=.5)\nplt.grid()\n\n##########################################################################\nax1=fig.add_subplot(gs[0,1])\n#ax1.set_ylim(1e-2,1e3)\nax1.set_xlim(x1,x2)\nax1.set_xscale('log')\nax1.set_yscale('log')\nax1.set_ylabel(r'$P(k)$ [Mpc/$h$]$^3$', size=20)\nax1.tick_params(axis='both', which='major', labelsize=20)\nax1.tick_params(axis='both', width=2, length=10)\nax1.tick_params(axis='both', which='minor', width=1, length=5)\n#ax1.xaxis.set_major_formatter(FormatStrFormatter('%2.4f'))\nax1.xaxis.labelpad = 20\nax1.set_xticklabels([])\n\nax1.plot(k,Pd1d2, lw=2, color='black',label=r'$b_1 b_2$, FAST-PT ' )\nax1.plot(k,Pd2d2, '--', lw=2,color='red', label=r'$b_2^2$, FAST-PT ' )\nplt.legend(loc=3)\n\nplt.grid()\n\nax2=fig.add_subplot(gs[1,1])\nax2.set_xscale('log')\nax2.set_xlabel(r'$k$ [$h$/Mpc]', size=20)\nax2.set_ylabel('ratio', size=20)\nax2.set_ylim(.99,1.01)\nax2.set_xlim(x1,x2)\nax2.tick_params(axis='both', which='major', labelsize=20)\nax2.tick_params(axis='both', width=2, length=10)\nax2.tick_params(axis='both', which='minor', width=1, length=5)\n#ax2.xaxis.set_major_formatter(FormatStrFormatter('%2.4f'))\nax2.xaxis.labelpad = 20\n\n\nax2.plot(k,Pd1d2/d[:,4], lw=2,color='black', alpha=.5, label=r'$b_1 b_2$, FAST-PT ' )\nax2.plot(k,Pd2d2/d[:,5]/4., '--', lw=2,color='red', label=r'$b_2^2$, FAST-PT ' )\nplt.grid()\n\n##########################################################################\nax1=fig.add_subplot(gs[0,2])\n#ax1.set_ylim(1e-2,1e3)\nax1.set_xlim(x1,x2)\nax1.set_xscale('log')\nax1.set_yscale('log')\nax1.set_ylabel(r'$P(k)$ [Mpc/$h$]$^3$', size=20)\nax1.tick_params(axis='both', which='major', labelsize=20)\nax1.tick_params(axis='both', width=2, length=10)\nax1.tick_params(axis='both', which='minor', width=1, length=5)\n#ax1.xaxis.set_major_formatter(FormatStrFormatter('%2.4f'))\nax1.xaxis.labelpad = 20\nax1.set_xticklabels([])\n\nax1.plot(k,-Pd1s2, lw=2, color='black', label=r'$-b_1 b_s$, FAST-PT ' )\nax1.plot(k,Pd2s2, '--', lw=2,color='red', label=r'$b_2 b_s$, FAST-PT ' )\nax1.plot(k,Ps2s2, '-.', lw=2,color='blue', label=r'$b_s^2$, FAST-PT ' )\nplt.legend(loc=3)\n\nplt.grid()\n\nax2=fig.add_subplot(gs[1,2])\nax2.set_xscale('log')\nax2.set_xlabel(r'$k$ [$h$/Mpc]', size=20)\nax2.set_ylabel('ratio', size=20)\nax2.set_ylim(.99,1.01)\nax2.set_xlim(x1,x2)\nax2.tick_params(axis='both', which='major', labelsize=20)\nax2.tick_params(axis='both', width=2, length=10)\nax2.tick_params(axis='both', which='minor', width=1, length=5)\n#ax2.xaxis.set_major_formatter(FormatStrFormatter('%2.2f'))\nax2.xaxis.labelpad = 20\n\nax2.plot(k,Pd1s2/d[:,12], lw=2, color='black', alpha=.5, label=r'$-b_1 b_s$, FAST-PT ' )\nax2.plot(k,Pd2s2/d[:,13]/2., '--', lw=2,color='red', label=r'$b_2 b_s$, FAST-PT ' )\nax2.plot(k,Ps2s2/d[:,6]/4., '-.', lw=2,color='blue', label=r'$b_s^2$, FAST-PT ' )\nplt.grid()\n\nplt.tight_layout()\nplt.show()\nfig.savefig('bias_example_plot.pdf')\n","repo_name":"JoeMcEwen/FAST-PT","sub_path":"examples/galaxy_bias_perturbation_example.py","file_name":"galaxy_bias_perturbation_example.py","file_ext":"py","file_size_in_byte":5498,"program_lang":"python","lang":"en","doc_type":"code","stars":20,"dataset":"github-code","pt":"77"} +{"seq_id":"11767519954","text":"import re\n\nn = input()\n\n# codeline 6 ~ 15 ==> 앞의 0000이 있는 수를 제거 하는 과정\n# -----------------------------------------------------\noperatorlist = []\n # split()을 할것이기 때문에 연산자를 미리 저장해둔다.\nfor i in range(len(n)):\n if n[i] == '+' or n[i] == '-':\n operatorlist.append(n[i]) \n # re module을 이용해 +,-을 제외한 수를 tmp에 list로 저장시켜둔다.\ntmp = re.split(r'[+-]',n)\n\n # lstrip으로 앞의 0이 있는 수를 제거 한다.\nfor i in range(len(tmp)):\n tmp[i] = tmp[i].lstrip('0')\n# --------------------------------------------\n# 이제는 앞에 0이 있는 수를 제거 했으니 편하게 계산하는 작업.\nn=''\nfor i,x in enumerate(tmp):\n n += ''.join(x)\n if i == len(tmp)-1:\n break\n n += ''.join(operatorlist[i])\n # -를 -(로 변경해주고 pcheck이란 bool값을 이용해 )을 닫을 타이밍을 체크함\n # 결론적으로, 다음 -연산자 앞에 )를 닫으면 된다.\nn= n.replace('-','-(')\nanswer =''\npcheck = False\n\nfor i in range(len(n)):\n if n[i] == '(':\n pcheck = True\n if pcheck and n[i] == '-':\n answer +=''.join(')-')\n pcheck = False\n continue\n if pcheck and i == len(n)-1:\n answer+= ''.join(n[i]+')')\n break\n answer += ''.join(n[i])\n\nprint(answer)\n\n\n\"\"\"\n 0~9,+,- ()을 이용해 최소값으로 만들어라.\n\n 마지막 조건 000009-0000009를 제거하는게 젤 어려웠다.\n 조금 돌아가는 코드 인것 같은데.. 좋은 방법이 지금은 생각나지 않는다.\n\n\"\"\"","repo_name":"Jinstarship-code/coding-test","sub_path":"greedy/Sliver/BOJ_1541.py","file_name":"BOJ_1541.py","file_ext":"py","file_size_in_byte":1547,"program_lang":"python","lang":"ko","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"35483426243","text":"## Definition of Knight's Tour search problem\n## For use with queue_search.py\n\nfrom __future__ import print_function\n\nfrom copy import deepcopy\n\n# Representation of a state:\n# (move_number, (current_x_pos, current_y_pos), full_board_state)\n# So on a small 3x3 board after 3 moves we might have a state such as:\n#\n# ( 3, (2,0), [[1,0,0],\n# [0,0,2],\n# [3,0,0]] )\n\n\ndef knight_get_initial_state(x,y):\n return ( 0, 0, matrix_of_zeros(y,x) )\n\ndef matrix_of_zeros(X,Y):\n return [ [0 for x in range(X)] for y in range(Y)] # Pythonic or what?\n\n\ndef knight_possible_actions( state ):\n if state[0] == 0:\n return knight_initial_moves()\n return knight_following_moves(state)\n\ndef knight_initial_moves():\n moves = []\n for i in range(BOARD_X):\n for j in range(BOARD_Y):\n moves = moves + [[i,j]]\n return moves\n\ndef square_is_empty(i,j, state):\n if state[2][i][j] == 0:\n return True\n return False\n\nknights_moves = ( (1,2), (2,1), (-1,2), (2,-1),\n (1,-2), (2,-1), (-1,-2), (-2,-1) )\n\ndef knight_following_moves( state ):\n kx = state[1][0]\n ky = state[1][1]\n moves = []\n for move in knights_moves:\n newx = kx + move[0] ## target x coord\n newy = ky + move[1] ## target y coord\n\n ## If target square is on board and empty\n ## add it to the list of moves \n if newx in range(BOARD_X) and newy in range(BOARD_Y):\n if state[2][newx][newy] == 0:\n moves = moves + [move]\n return moves\n\ndef knight_successor_state( action, state ):\n if state[0] == 0:\n newstate = knight_initial_successor( action )\n return newstate\n board = deepcopy(state[2])\n xpos = state[1][0] + action[0]\n ypos = state[1][1] + action[1]\n movenum = state[0] + 1\n board[xpos][ypos] = movenum\n return (movenum, (xpos,ypos), board) \n\n\ndef knight_initial_successor( action ):\n board = deepcopy(knight_initial_state[2])\n board[action[0]][action[1]] = 1\n return( 1, action, board )\n\n\ndef knight_goal_state( state ):\n if state[0] == BOARD_X * BOARD_Y:\n print( \"\\nGOAL STATE:\" )\n print_board_state( state )\n return True\n return False\n\n\ndef print_board_state( state ):\n board = state[2]\n for row in board:\n for square in row:\n print( \" %2i\" % square, end = '' )\n print()\n\n\n\ndef knight_print_problem_info():\n print( \"The Knight's Tour (\", BOARD_X, \"x\", BOARD_Y, \"board)\" )\n\n## Return a problem spec tuple for a given board size\ndef get_knights_tour_problem(x, y):\n global BOARD_X, BOARD_Y, knight_initial_state\n BOARD_X = x\n BOARD_Y = y\n knight_initial_state = knight_get_initial_state(x,y)\n return ( None,\n knight_print_problem_info,\n knight_initial_state,\n knight_possible_actions,\n knight_successor_state,\n knight_goal_state\n )\n \n \n\n","repo_name":"Emharsh/Projects","sub_path":"Missionaries and Cannibals Problem -AI/knights_tour.py","file_name":"knights_tour.py","file_ext":"py","file_size_in_byte":3164,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"40250353607","text":"\nimport pyrealsense2 as rs\nimport numpy as np\nfrom ..utils.post_process import *\nfrom .helper import *\nfrom ..detect.detect import *\nfrom PyQt5.QtCore import (\n QThread,\n Qt,\n pyqtSignal,\n pyqtSlot,\n QRunnable,\n QThreadPool,\n QObject,\n QTimer,\n)\nimport time\n\n\nclass SocialDistancing(QThread):\n def __init__(self, signals,cap,imageQueue,lock,Detect):\n super(SocialDistancing, self).__init__()\n self.align = rs.align(rs.stream.color)\n self.queue = imageQueue\n self.signals = signals\n self.camera = cap\n self.lock = lock\n self.detector = Detect\n self.align = rs.align(rs.stream.color)\n self.minDistance = 1\n self.signals.min_distance.connect(self.updateSignal)\n self.signals.tab_selection.connect(self.updateTab)\n self.tab = 0\n self.frame = signals.distance_frame\n \n def updateSignal(self, value):\n self.minDistance = value\n \n def updateTab(self,value):\n \n self.tab = value\n \n \n @pyqtSlot()\n def run(self):\n \n\n \n \n \n\n self.threadActive = True\n\n\n\n \n\n print(\"starting stream\")\n while self.threadActive:\n if self.tab ==0:\n self.setPriority(QThread.TimeCriticalPriority)\n self.lock.lockForRead()\n try:\n \n frames = self.camera.getFrame()\n self.lock.unlock()\n # time.sleep(0.5)\n \n color_frame = frames.get_color_frame()\n depth_frame = frames.get_depth_frame()\n\n if not color_frame or not depth_frame:\n continue\n\n depth_frame = alignImage(frames,self.align)\n\n color_image = color_frame.get_data()\n color_image = np.asanyarray(color_image)\n \n \n\n predictions = self.detector.detectPeople(color_image)\n\n numberOfPeople = 0\n\n numberOfPeople = len(predictions)\n\n pred_bbox = getVectorsAndBbox(predictions, depth_frame)\n\n self.signals.people.emit(numberOfPeople)\n\n if pred_bbox:\n\n\n color_image, violation = drawBox(\n color_image, pred_bbox, self.minDistance\n )\n\n self.signals.violation.emit(violation)\n\n\n\n\n\n\n\n\n\n #self.queue.put(color_image)\n p = rgbtoQimage(color_image)\n \n self.frame.emit(p)\n \n \n\n\n \n except Exception as e:\n\n print(\"Error is :\", str(e))\n #self.quit()\n \n \n \n \n\n \n \n #self.camera.stop()\n #self.quit()\n\n def stop(self):\n self.threadActive = False\n","repo_name":"Face-Recognition-Gate-Control/socialDistancingApp","sub_path":"src/threads/socialDistancing.py","file_name":"socialDistancing.py","file_ext":"py","file_size_in_byte":3061,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"21804716954","text":"# -*- coding: utf-8 -*-\n\n\"\"\"Access an object's name as a property\n\nAutoname is a data-descriptor, which automatically looks up the\nname under which the object on which the descriptor is accessed\nis known by.\n\nImport the descriptor using ``from mtoolbox.autoname import Autoname``.\n\nExample:\n\n>>> class Object(object):\n... name = Autoname()\n>>> obj1 = Object()\n>>> obj1.name\n'obj1'\n>>> obj2 = Object()\n>>> obj2.name\n'obj2'\n\nBy default Autoname will return the outer-most name that was defined\nfor the object:\n\n>>> class Object(object):\n... name = Autoname()\n>>> def func(anobject):\n... return anobject.name\n>>> o = Object()\n>>> func(o)\n'o'\n\nYou can change this behaviour by using the 'inner' keyword:\n\n>>> class Object(object):\n... name = Autoname(inner=True)\n>>> o = Object()\n>>> def func(anobject):\n... return anobject.name\n>>> func(o)\n'anobject'\n\nNote:\n Please be aware, that getting the inner-most name, is not what you\n want in most cases:\n\n >>> class Object(object):\n ... name = Autoname(inner=True)\n ... def printname(self):\n ... print(self.name)\n >>> o = Object()\n >>> o.printname()\n self\n\n When in automatic mode (see the class documentation below) the\n descriptor will always return a name, that is in some callframe\n dictionary. If you delete a name, it will use another one, that\n is still in use:\n\n >>> class Object(object):\n ... name = Autoname()\n >>> o = Object()\n >>> o.name\n 'o'\n >>> g = o\n >>> del o\n >>> g.name\n 'g'\n\n This can be helped a bit by using the 'bind' keyword argument and\n calling .name with the name that should be used first:\n\n >>> class Object(object):\n ... name = Autoname(bind=True)\n >>> o = Object()\n >>> o.name\n 'o'\n >>> g = o\n >>> del o\n >>> g.name\n 'o'\n\nWarning:\n Defining multiple names for an object in the same call frame (which is\n easily said the same level of indention in your program) will\n cause undetermined behaviour, depending on the Python interpreter:\n\n >>> class Object(object):\n ... name = Autoname()\n >>> o = Object()\n >>> g = o\n >>> o.name in ['o', 'g']\n True\n\"\"\"\n\nimport doctest\nimport inspect\n\n\nclass Autoname(object):\n \"\"\"Create a new Autoname descriptor\n\n Args:\n initval (str, bool, None): The initial name\n inner (bool): Return the inner-most name of the object (or not)\n bind (bool): Bind the descriptor to the first name it returns\n\n Returns:\n Autoname: An Autoname instance\n \"\"\"\n\n def __init__(self, initval=True, inner=False, bind=False):\n self.val = None\n self.inner = inner\n self.bind = bind\n self.__set__(None, initval)\n\n def __get__(self, theobject, objtype):\n \"\"\"Return the name of theobject or None\n\n Returns:\n str or None: the name of the object\n\n Usage:\n >>> class Object(object):\n ... name = Autoname()\n >>> obj = Object()\n >>> obj.name\n 'obj'\n >>> obj.name = 'another name'\n >>> obj.name\n 'another name'\n \"\"\"\n if isinstance(self.val, str):\n return self.val\n elif self.val is False or self.val is None:\n return None\n else:\n # If we really didn't find a name, we return None\n thename = None\n\n # There is at least one frame in the callstack, in which\n # the calling object is a local variable, so we climb up\n # the callstack, to find the name of the object.\n for count, frametuple in enumerate(inspect.stack()):\n # skip the first frame - this is our __get__\n if count == 0:\n continue\n\n for name, obj in frametuple[0].f_locals.items():\n # found a name, but keep searching in order to get\n # the outer-most name unless inner == True\n if obj is theobject:\n thename = name\n if self.inner:\n self.__bind_if_wanted(thename)\n return thename\n\n self.__bind_if_wanted(thename)\n return thename\n\n def __bind_if_wanted(self, name):\n if self.bind:\n self.__set__(None, name)\n\n def __set__(self, theobject, val):\n \"\"\"Set the name of the theobject\n\n Args:\n theobject (object): The object to which's class\n the descriptor is attached to\n\n val (str, bool or None): Sets the name to depending on the type:\n str sets the name to this str.\n False or None sets the name to None.\n True sets the name to automatically lookup.\n\n Returns:\n None\n\n Raises:\n TypeError if type(val) is invalid\n\n Usage:\n >>> class Object(object):\n ... name = Autoname()\n >>> o = Object()\n >>> o.name = 'k'\n >>> o.name\n 'k'\n >>> o.name = True\n >>> o.name\n 'o'\n >>> o.name = False\n >>> str(o.name)\n 'None'\n >>> o.name = 4\n Traceback (most recent call last):\n ...\n TypeError: Autoname must be set to str, bool, NoneType\n \"\"\"\n types = (str, bool, type(None))\n\n if not isinstance(val, types):\n raise TypeError(\"Autoname must be set to %s\" % \", \".join(\n [t.__name__ for t in types]))\n\n self.val = val\n\nif __name__ == '__main__':\n doctest.testmod()\n","repo_name":"messersm/mtoolbox","sub_path":"mtoolbox/autoname.py","file_name":"autoname.py","file_ext":"py","file_size_in_byte":5722,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"33291563139","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Apr 06 10:00 2018\n@author(s): Sebastian Multsch\n\nThe script is based on a tutorial:\nhttp://fb09-pasig.umwelt.uni-giessen.de/cmf/wiki/semi_distributed#\n\nThe setup of the tutorial has been modified in order to simulate hydrological\nfluxed of different landuse types (grass, crop, wood, orchards, urban) with a \nlumped molling approach.\n\n\"\"\"\n\ndef stats_calc_stats(dat_observed,dat_simulated):\n #clc r2\n r_squared = np.corrcoef(dat_observed,dat_simulated)[0][1]**2\n\n #calc NSE\n def calc_NSE(obs,sim):\n avg_obs = np.mean(obs)\n return 1 - (sum((obs-sim)**2)/sum((obs-avg_obs)**2))\n NSE = calc_NSE(dat_observed,dat_simulated)\n #plot data\n return r_squared,NSE\n\ndef plot_flow(fname,model,sim,posterior_threshold=None):\n \n #preapre data\n stats = pd.DataFrame([(a,b,c) for a,b,c in zip(pd.date_range(model.begin,model.end)[1:],sim,model.evaluation()[1:])],columns=[\"Date\",\"Sim\",\"Obs\"])\n r_squared,NSE = stats_calc_stats(stats[~np.isnan(stats[\"Obs\"])][\"Obs\"],stats[~np.isnan(stats[\"Obs\"])][\"Sim\"])\n stats.set_index(\"Date\",inplace=True)\n\n #make plot of river segemnts\n fig = plt.figure(figsize=(10,7))\n \n #plot rainfall\n ax1 = fig.add_axes([0.1,0.71,0.8,0.2]) # x,y, lenght, height\n ax1.bar(stats.index, model.data.P[model.begin+model.data.step:model.end+model.data.step],align='center',color=\"k\",width=.5)\n ax1.invert_yaxis()\n ax1.xaxis.tick_top()\n ax1.xaxis.set_ticks_position('both') # THIS IS THE ONLY CHANGE\n ax1.xaxis_date()\n ax1.grid(True)\n ax1.spines['bottom'].set_color('none')\n ax1.xaxis.set_ticks_position('top')\n ax1.set_ylabel(\"Rain [mm day$^{-1}$]\")\n ax1.set_xlim(pd.Timestamp(model.begin),pd.Timestamp(model.end)) \n ax1.yaxis.tick_right()\n ax1.yaxis.set_label_position(\"right\")\n# ax1.xaxis.set_major_locator(mdates.MonthLocator())\n# ax1.xaxis.set_major_formatter(mdates.DateFormatter('%b'))\n \n #plot stream flow\n ax2 = fig.add_axes([0.1,0.2,0.8,0.5]) # x,y, lenght, height\n ax2.plot(stats.index,stats[\"Sim\"],color=\"b\",label=\"Simulated\",linewidth=.7) \n ax2.plot(stats.index,stats[\"Obs\"],color=\"r\",label=\"Observed\",linewidth=.7) \n ax2.set_ylim(0,.5)\n ax2.set_ylabel(\"Flow [m$^3$ sec$^{-1}$]\")\n ax2.grid(True)\n ax2.spines['top'].set_color('none')\n ax2.set_xlim(pd.Timestamp(model.begin),pd.Timestamp(model.end)) \n ax2.xaxis.set_ticks_position('bottom')\n# ax2.xaxis.set_major_locator(mdates.MonthLocator())\n# ax2.xaxis.set_major_formatter(mdates.DateFormatter('%b'))\n if posterior_threshold:\n #get posteritor distribution\n r = pd.DataFrame([list(i) for i in res[1:]],columns=[i for i in res.dtype.names]) #TODO: skip first run ... error spotpy?\n #select runs with a NSE higher than ..., i.e. posterior distribution\n if len(r[r[\"like1\"]>posterior_threshold])>0:\n r = r[r[\"like1\"]>posterior_threshold]\n #get minimum and maximum vale\n col_names=[i for i in res.dtype.names if i.find(\"simulation\")>-1]\n postdis_sim_min = np.array([min([row[col] for r,row in r.iterrows()]) for col in col_names])\n postdis_sim_max = np.array([max([row[col] for r,row in r.iterrows()]) for col in col_names])\n #make plot\n ax2.fill_between(stats.index, postdis_sim_min, postdis_sim_max, where=postdis_sim_max >= postdis_sim_min, facecolor='b', interpolate=True,alpha=.25)\n \n #plot legend\n ax3 = fig.add_axes([0.1,0.1,0.8,0.1])# x,y, lenght, height\n legend_obs = mlines.Line2D([],[],color=\"red\",label=\"Observed\")\n legend_sim = mlines.Line2D([],[],color=\"blue\",label = \"Best simulation\")\n legend_rain = mlines.Line2D([],[],color=\"k\",label = \"Rainfall\")\n handels = [legend_sim,legend_obs,legend_rain]\n if posterior_threshold:\n legend_sim_post = mlines.Line2D([],[],color=\"blue\",label = \"Posterior dist.\",alpha=.25,linewidth=4)\n handels.append(legend_sim_post)\n ax3.legend(handles=handels, ncol=3,bbox_to_anchor=(0.00, 0.5, 1., .102),fontsize=10.,frameon=True)\n ax3.axis(\"off\")\n ax3.text(0.2, 0.05,\"Mielen boven Aalst, 2007\"+ \"\\n\"+ \"r$^2$: \"+\"%.2f NSE: %.2f\"%(r_squared,NSE),\n verticalalignment='bottom', horizontalalignment='left',\n transform=ax3.transAxes,\n color='k', fontsize=9, \n bbox=dict(facecolor='0.7', edgecolor='None', boxstyle='round,pad=.5',alpha=.5))\n\n fig.autofmt_xdate() \n fig.savefig(fname,dpi=300)\n \n \nimport datetime\nimport cmf\nimport spotpy\n#from spotpy.parameter import Uniform\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pylab as plt\nimport matplotlib.lines as mlines\nimport os\n\nclass CatchmetnModel:\n def __init__(self,fpath,start,end):\n self.fpath = fpath\n self.start = start\n self.end = end\n\nclass SpotpySetup(object):\n def __init__(self,fpath,start,end):\n self.fpath = fpath\n self.start = start\n self.end = end\n self.params = [spotpy.parameter.Normal('Ksat',0.3,0.1,0.02,0.2),\n spotpy.parameter.Normal('Phi',1.2,0.035,0.01,1.22),\n spotpy.parameter.Normal('alpha',1,0.3,0.1,2.0),\n spotpy.parameter.Normal('n',.55,0.04,0.02,0.6),\n spotpy.parameter.Normal('m',.55,0.04,0.02,0.6),\n ]\n \n def parameters(self):\n return spotpy.parameter.generate(self.params)\n\n def simulation(self,vector):\n simulations= self.cmfmodel._run(alpha=vector[0],n=vector[1],ksat=vector[2],porosity=vector[3])\n return simulations\n\n def evaluation(self,evaldates=False):\n if evaldates:\n return self.cmfmodel.eval_dates\n else:\n return self.cmfmodel.observations\n\n def objectivefunction(self,simulation,evaluation):\n objectivefunction= -spotpy.objectivefunctions.rmse(evaluation,simulation)\n return objectivefunction\n\n\n\n\n\n\nif __name__ == '__main__':\n # Get sampler\n from spotpy.algorithms import lhs as Sampler_lhs\n\n # Check if we are running on a supercomputer or local\n parallel = 'mpi' if 'OMPI_COMM_WORLD_SIZE' in os.environ else 'seq'\n\n# parallel = 'mpc'\n\n # Run the models\n runs = 2\n\n #create cmf\n model = SpotpySetup(begin=datetime.datetime(2007,1,1),end=datetime.datetime(2007,12,31))\n\n # Create the sampler\n sampler = Sampler_lhs(model, parallel=parallel, dbname=model.dbname,\n dbformat='csv', save_sim=True)\n #make analysis\n sampler.sample(runs)\n\n #get results\n res = sampler.getdata()\n \n \n \n \n \n \n \n \n \n \n# #get best model run \n# best_run = res[np.argmax(res[\"like1\"])]#sampler.status.bestrep\n# sim=list(best_run)[[i for i in res.dtype.names].index(\"simulation_0\"):-1]\n# \n# #make plot\n# plot_flow(\"melsterbeek_miele_calibration.png\",model,sim)#,posterior_threshold=0.35) #posterior_threshold: minimum NSE to select posterior runs\n\n# #make validation for each posterior dataset\n# posterior_threshold=.3\n# res_posterior = pd.DataFrame([list(i) for i in res[1:]],columns=[i for i in res.dtype.names])\n# if len(res_posterior[res_posterior[\"like1\"]>posterior_threshold])>0:\n# res_posterior = res_posterior[res_posterior[\"like1\"]>posterior_threshold]\n#\n# for rowid,bestrun_pars_vector in res_posterior.iterrows():\n# #create new model for validation\n# model_validation = LumpedModel(begin=datetime.datetime(2012,1,1),end=datetime.datetime(2016,12,31))\n# \n# # Get the array of parameter realizations\n# params = spotpy.parameter.get_parameters_array(model)\n# \n# # Create the namedtuple from the parameter names\n# partype = spotpy.parameter.get_namedtuple_from_paramnames(model, params['name'])\n# par = partype(bestrun_pars_vector[\"pargrass_V0L1\"],\n# grass_V0L2=bestrun_pars_vector[\"pargrass_V0L2\"],\n# grass_fETV0=bestrun_pars_vector[\"pargrass_fETV0\"],\n# grass_fETV1=bestrun_pars_vector[\"pargrass_fETV1\"],\n# grass_trL1L2=bestrun_pars_vector[\"pargrass_trL1L2\"],\n# grass_trL1out=bestrun_pars_vector[\"pargrass_trL1out\"],\n# grass_trL2out=bestrun_pars_vector[\"pargrass_trL2out\"])\n# \n# #run model with \n# validation_res_q = model_validation.simulation(par=par)\n# \n# #plot validation\n# plot_flow(\"melsterbeek_miele_validation_\"+str(rowid)+\".png\",model_validation,validation_res_q)\n# \n# \n\n# \n# ","repo_name":"xlandscape/CmfContinuous-Component","sub_path":"module/bin/LandscapeModel/utils/todo/spotpy.py","file_name":"spotpy.py","file_ext":"py","file_size_in_byte":8681,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"13551393523","text":"from bst import Node\r\nfrom mylist import *\r\n\r\n\r\nclass PointerList(MyList):\r\n\r\n def __init__(self, size: int, value=None) -> None:\r\n self.head = prev = None\r\n self.size = size\r\n\r\n for _ in range(size):\r\n node = Node(value)\r\n if prev == None:\r\n prev = node\r\n self.head = node\r\n else:\r\n prev.left = node\r\n prev = node\r\n\r\n def __len__(self) -> int:\r\n return self.size\r\n\r\n def __getitem__(self, i: int):\r\n assert (\r\n 0 <= i < len(self)\r\n ), f\"Getting invalid list index {i} from list of size {len(self)}\"\r\n current = self.head\r\n for _ in range(i):\r\n current = current.left\r\n return current.data\r\n\r\n def __setitem__(self, i: int, value) -> None:\r\n assert (\r\n 0 <= i < len(self)\r\n ), f\"Getting invalid list index {i} from list of size {len(self)}\"\r\n current = self.head\r\n for _ in range(i):\r\n current = current.left\r\n current.data = value\r\n\r\n def __iter__(self) -> \"MyList\":\r\n return super().__iter__()\r\n\r\n def __next__(self):\r\n return super().__next__()\r\n\r\n def get(self, index: int):\r\n return self.__getitem__(index)\r\n\r\n def set(self, index: int, value: (int, int, int)) -> None:\r\n self.__setitem__(index, value)\r\n","repo_name":"Qazalbash/GradVault","sub_path":"data-structures-II/homeworks/2/src/pointerlist.py","file_name":"pointerlist.py","file_ext":"py","file_size_in_byte":1392,"program_lang":"python","lang":"en","doc_type":"code","stars":20,"dataset":"github-code","pt":"77"} +{"seq_id":"12020519947","text":"# Reversal Algorithm for Array Rotations from a given point \n\n# Examples: \n# Input: arr[] = {1, 2, 3, 4, 5, 6, 7}, d = 2\n# Output: 3, 4, 5, 6, 7, 1, 2\n# Explanation: If the array is rotated by 1 position to the left, \n# it becomes {2, 3, 4, 5, 6, 7, 1}.\n# When it is rotated further by 1 position,\n# it becomes: {3, 4, 5, 6, 7, 1, 2}\n\n# Input: arr[] = {1, 6, 7, 8}, d = 3\n# Output: 8, 1, 6, 7\n\nclass ReversalAlgorithm:\n def rotateArray (self, arr, start, end):\n while (start < end):\n temp = arr[start]\n arr[start] = arr[end]\n arr[end] = temp\n start += 1\n end -= 1\n\n def leftReversal (self, arr, key):\n if key == 0:\n return\n n = len (arr)\n key %= n\n self.rotateArray (arr, 0, key-1)\n self.rotateArray (arr, key, n-1)\n self.rotateArray (arr, 0, n-1)\n\nif __name__ == '__main__':\n arr = [1, 2, 3, 4, 5, 6, 7]\n reversal = ReversalAlgorithm ()\n reversal.leftReversal (arr, 2)\n print(f'The array after rotation based on given key is {arr}')\n\n","repo_name":"kanuarj/pythonDSA","sub_path":"Arrays/arrayRotations/02_reversalArrayRotation.py","file_name":"02_reversalArrayRotation.py","file_ext":"py","file_size_in_byte":1069,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"26359228577","text":"import argparse\nimport pathlib\n\n# TODO support all types.\n# See \"Data Types in https://irsa.ipac.caltech.edu/applications/DDGEN/Doc/ipac_tbl.html\n# TODO support quoted values where commas can be used inside a value\n\n\ndef list_arg(s):\n return s.split(',')\n\ndef check_args(args):\n first_line_data = check_csvfile(args)\n # print(\"Number of columns in CSV: {}\\n\".format(len(first_line_data)))\n\n for i in (args.header, args.units, args.null):\n if i is not None:\n if len(i) != len(first_line_data):\n raise ValueError(\"Number of columns in file does not match with the arguments\")\n\n\ndef check_csvfile(args):\n filepath = args.csvfile\n if not filepath.exists():\n raise FileNotFoundError('File does not exist {}'.format(filepath))\n \n with open(filepath, 'r') as f:\n line = next(f)\n\n first_line = line.split(',')\n return first_line\n\n\ndef construct_line(line_data, max_lengths, in_between=\" \"):\n line = in_between\n for val, m in zip(line_data, max_lengths):\n space_bef = \" \"*((m - len(val))//2)\n space_after = \" \"*(m - len(val) - len(space_bef))\n line += space_bef + val + space_after + in_between\n return line\n\ndef adjust_max_lengths(lst, max_lengths):\n for i in range(len(max_lengths)):\n max_lengths[i] = max(max_lengths[i], len(lst[i]))\n\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description='Converts a CSV file to IPAC Table file.')\n parser.add_argument('csvfile', type=pathlib.Path, help='file to convert')\n parser.add_argument('--header',\n action='store',\n type=list_arg,\n help='comma-separated column names')\n parser.add_argument('--units',\n action='store',\n type=list_arg,\n help='units of the column values')\n parser.add_argument('--null',\n action='store',\n type=list_arg,\n help='null value specifiers')\n \n args = parser.parse_args()\n # print(args)\n\n check_args(args)\n \n with open(args.csvfile, 'r') as fin:\n header = next(fin).strip().split(',') if args.header is None else args.header\n max_lengths = list(map(len, header))\n types = [None] * len(header)\n for line_num, line in enumerate(fin):\n vals = line.split(',')\n if len(vals) != len(header):\n raise ValueError('Line {} contains {} columns!'.format(\n line_num if args.header is None else line_num + 1,\n len(vals)))\n\n adjust_max_lengths(vals, max_lengths)\n\n for i in range(len(max_lengths)):\n was_double = types[i] == 'double'\n if types[i] != 'char':\n try:\n float(vals[i])\n types[i] = 'double'\n except Exception:\n types[i] = 'char'\n else:\n if not was_double:\n try:\n int(vals[i])\n types[i] = 'int'\n except Exception:\n pass\n\n adjust_max_lengths(types, max_lengths)\n \n\n with open(args.csvfile, 'r') as fin:\n if args.header is None:\n next(fin)\n\n print(construct_line(header, max_lengths, in_between=\"|\"))\n\n print(construct_line(types, max_lengths, in_between=\"|\"))\n\n if args.units is not None:\n adjust_max_lengths(args.units, max_lengths)\n print(construct_line(args.units, max_lengths))\n\n if args.null is not None:\n adjust_max_lengths(args.null, max_lengths)\n print(construct_line(args.null, max_lengths))\n\n for line in fin:\n print(construct_line(line.strip().split(','), max_lengths))\n\n\n","repo_name":"tungli/csv_to_tableIPAC.py","sub_path":"csv_to_table.py","file_name":"csv_to_table.py","file_ext":"py","file_size_in_byte":3983,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"21929850487","text":"# -*- coding: utf-8 -*-\n# author: itimor\n\nimport os\n\nAPP_ENV = 'dev'\n\n# SECURITY WARNING: keep the secret key used in production secret!\nSECRET_KEY = '64318ob@vbou7h50)b0a_pfda4d$bw2nhl4h*m$qo0_e_fxw=658!z*x'\n\n# SECURITY WARNING: don't run with debug turned on in production!\nDEBUG = True\n\nALLOWED_HOSTS = ['*']\n\n# Build paths inside the project like this: os.path.join(BASE_DIR, ...)\nBASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n\n# sqlite\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.sqlite3',\n 'NAME': os.path.join(BASE_DIR, '../core.db'),\n }\n}\n\n# mysql\n# DATABASES = {\n# 'default': {\n# 'ENGINE': 'django.db.backends.mysql',\n# 'NAME': 'one',\n# 'USER': 'root',\n# 'PASSWORD': 'momo520',\n# 'HOST': '1.1.1.11',\n# 'OPTIONS': {\n# \"init_command\": \"SET foreign_key_checks=0;\",\n# }\n# }\n# }\n\n# 加载 mysql\n# import pymysql\n# pymysql.install_as_MySQLdb()\n","repo_name":"itimor/one-workflow","sub_path":"backend/core/settings/mac.py","file_name":"mac.py","file_ext":"py","file_size_in_byte":983,"program_lang":"python","lang":"en","doc_type":"code","stars":237,"dataset":"github-code","pt":"77"} +{"seq_id":"21709230091","text":"\"\"\"\nTransport equation\nexplicit - Lax-Wendroff scheme\n\"\"\"\n\n# cython: it's possible to compile time-critical code using cython\n# cython: which will reduce pure calculation time by a factor of about 3-4\n# cython: to activate it, uncomment the following import and some code below (see comments)\n\n#import transp_LaxWendroff_cython\n#import numpy as np\n\n\ndef calc_transp_lw(pd, m, to_step):\n\n # a,b ... temporary variable for better readability\n a = 0.5 * pd.CFL2 + pd.NE\n b = 0.5 * pd.CFL\n\n stable_calc = 2.0 * a\n m.is_stable = (stable_calc<=1)\n\n # cython: if you want to use cython, activate this ...\n #u_1 = transp_LaxWendroff_cython.calc_transp_lw_cython(pd.CFL, pd.CFL2, pd.v, a, b, to_step, pd.u_00.copy())\n\n # cython: ... and deactivate code from here ...\n u_0 = pd.u_00.copy()\n u_1 = pd.u_00.copy()\n\n for n in range(0, to_step):\n\n if pd.v == 0.0:\n # only advection\n u_1[1:-1] += ( 1.0 + pd.CFL) *b * u_0[:-2]\\\n +( - pd.CFL2) * u_0[1:-1]\\\n +(-1.0 + pd.CFL) *b * u_0[2:]\n else:\n\n u_1[1:-1] = ( a +b ) * u_0[:-2]\\\n +(-2.0*a +1.0) * u_0[1:-1]\\\n +( a -b ) * u_0[2:]\n\n # boundary conditions\n u_1[0] = pd.bc_upstream(n*pd.dt)\n u_1[-1] = pd.bc_downstream(n*pd.dt)\n\n u_0 = u_1 # calculated values are input values for the next step\n # cython: ... to here\n\n m.u_1 = u_1.copy()\n m.u_final = u_1.copy()","repo_name":"alex-tug/NumFlowEduc","sub_path":"src/calc_modules/Transp_LaxWendroff.py","file_name":"Transp_LaxWendroff.py","file_ext":"py","file_size_in_byte":1535,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"816024299","text":"#!/usr/bin/env python\n# -*- coding: UTF-8 -*-\n\nimport sys\nfrom Attraction import Attraction\nfrom Brands import Brands\n\nreload(sys)\nsys.setdefaultencoding('utf8')\n\n\ndef main():\n address_old = '192.168.199.147'\n port_old = 27017\n\n address_new = '192.168.199.147'\n port_new = 27017\n\n print('convert latestattractions to attraction.')\n Attraction.convert_attraction(address_old, port_old, address_new, port_new)\n\n print('convert brands to brand.')\n Brands.convert_brands(address_old, port_old, address_new, port_new)\n\n print(\"OK\")\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"hezhensong/MongoConvertor","sub_path":"travel3/main_hezhensong.py","file_name":"main_hezhensong.py","file_ext":"py","file_size_in_byte":594,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"2675917202","text":"## making a deck of cards, including a shuffle feature\r\n\r\nimport random\r\nfrom random import shuffle\r\n\r\nsuits = ['Hearts', 'Diamonds', 'Spades', 'Clubs']\r\nvalues = ['Ace', '2', '3', '4', '5', '6', '7', '8', '9', '10', 'Jack', 'Queen', 'King']\r\n\r\nclass Card:\r\n def __init__(self, suit, value):\r\n self.suit = suit\r\n self.value = value\r\n \r\n def show(self):\r\n print (\"{} of {}\".format(self.value, self.suit))\r\n\r\ncard = Card(\"Card\", 6)\r\ncard.show()\r\n\r\nclass Deck:\r\n def __init__(self):\r\n self.cards = []\r\n self.pile()\r\n \r\n def pile(self):\r\n for i in suits:\r\n for s in values:\r\n self.cards.append(Card(i, s))\r\n\r\n def show(self):\r\n for a in self.cards:\r\n a.show()\r\n\r\n def shuffle(self):\r\n for i in range(len(self.cards) - 1, 0, -1):\r\n r = random.randint(0, i)\r\n self.cards[i], self.cards[r] = self.cards[r], self.cards[i]\r\n\r\ndeck = Deck()\r\ndeck.shuffle()\r\ndeck.show() \r\n","repo_name":"Scare966/python-practice-problems","sub_path":"deck_of_cards.py","file_name":"deck_of_cards.py","file_ext":"py","file_size_in_byte":1007,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"8174513923","text":"# encoding: utf-8\ntry:\n import os\n import argparse\n\n import torch\n import numpy as np\n import torch.nn.functional as F\n\n from sklearn.mixture import GaussianMixture\n from torch.optim.lr_scheduler import StepLR\n from tqdm import tqdm\n from itertools import chain\n from sklearn.metrics import normalized_mutual_info_score as NMI\n\n from fsvae.datasets import dataset_list, get_dataloader\n from fsvae.config import RUNS_DIR, DATASETS_DIR, DEVICE, DATA_PARAMS\n from fsvae.model import Generator, GMM, Encoder\n from fsvae.utils import save_images, cluster_acc, gmm_Loss, mse\n\nexcept ImportError as e:\n print(e)\n raise ImportError\n\n\ndef main():\n global args\n parser = argparse.ArgumentParser(description=\"Convolutional NN Training Script\")\n parser.add_argument(\"-r\", \"--run_name\", dest=\"run_name\", default=\"fsvae\", help=\"Name of training run\")\n parser.add_argument(\"-n\", \"--n_epochs\", dest=\"n_epochs\", default=500, type=int, help=\"Number of epochs\")\n parser.add_argument(\"-o\", \"--outlier\", dest=\"outlier_rate\", default=0, type=float, help=\"ratios of outlier\")\n parser.add_argument(\"-s\", \"--dataset_name\", dest=\"dataset_name\", default='mnist', choices=dataset_list,\n help=\"Dataset name\")\n parser.add_argument(\"-v\", \"--version_name\", dest=\"version_name\", default=\"v1\")\n args = parser.parse_args()\n\n run_name = args.run_name\n dataset_name = args.dataset_name\n\n # make directory\n run_dir = os.path.join(RUNS_DIR, dataset_name, run_name, args.version_name)\n data_dir = os.path.join(DATASETS_DIR, dataset_name)\n imgs_dir = os.path.join(run_dir, 'images')\n models_dir = os.path.join(run_dir, 'models')\n log_path = os.path.join(run_dir, 'logs')\n\n os.makedirs(data_dir, exist_ok=True)\n os.makedirs(run_dir, exist_ok=True)\n os.makedirs(imgs_dir, exist_ok=True)\n os.makedirs(models_dir, exist_ok=True)\n os.makedirs(log_path, exist_ok=True)\n\n # -----train-----\n # train detail var\n n_epochs = args.n_epochs\n b1 = 0.5\n b2 = 0.99\n\n data_params = DATA_PARAMS[dataset_name]\n train_batch_size, latent_dim, c_feature, picture_size, cshape, data_size, train_lr, test_num, n_cluster = data_params\n print(data_params)\n\n # net\n gen = Generator(latent_dim=latent_dim, x_shape=picture_size, cshape=cshape)\n gmm = GMM(n_cluster=n_cluster, n_features=c_feature)\n encoder = Encoder(\n input_channels=picture_size[0],\n output_channels=latent_dim,\n cshape=cshape,\n c_feature=c_feature,\n r=9,\n adding_outlier=(args.outlier_rate > 0.05)\n )\n\n # parallel\n if torch.cuda.device_count() > 1:\n print(\"this GPU have {} core\".format(torch.cuda.device_count()))\n\n # set device: cuda or cpu\n gen.to(DEVICE)\n encoder.to(DEVICE)\n gmm.to(DEVICE)\n\n # optimization\n gen_enc_gmm_ops = torch.optim.Adam(chain(\n gen.parameters(),\n encoder.parameters(),\n gmm.parameters(),\n ), lr=train_lr, betas=(b1, b2))\n lr_s = StepLR(gen_enc_gmm_ops, step_size=10, gamma=0.95)\n\n dataloader = get_dataloader(dataset_path=data_dir, dataset_name=dataset_name,\n batch_size=train_batch_size, outlier_rate=args.outlier_rate)\n i_dataloader = get_dataloader(dataset_path=data_dir, dataset_name=dataset_name,\n batch_size=30000, shuffle=False, outlier_rate=0)\n\n # =============================================================== #\n # =========================initialization======================== #\n # =============================================================== #\n best_model = None\n best_score = 0\n print('searching best Gaussian mixture prior...')\n datas, labels = i_dataloader.dataset.data, i_dataloader.dataset.targets\n if dataset_name in ['cifar10', 'ytf', 'gtsrb']:\n datas = datas.permute((0, 3, 1, 2)).to(DEVICE) / 255.0\n else:\n datas = datas.unsqueeze(1).to(DEVICE) / 255.0\n\n Z = []\n Y = []\n with torch.no_grad():\n z, _ = encoder(datas)\n Z.append(z[2])\n Y.append(labels)\n\n Z = torch.cat(Z, 0).detach().cpu().numpy()\n Y = torch.cat(Y, 0).detach().numpy()\n for i in range(30):\n _gmm = GaussianMixture(n_components=n_cluster, covariance_type='diag')\n pre = _gmm.fit_predict(Z)\n acc = cluster_acc(pre, Y)[0] * 100\n\n if best_score < acc:\n best_score = acc\n best_model = _gmm\n datas = datas.cpu()\n torch.cuda.empty_cache()\n del datas\n\n print('best accuracy is: {:.4f}'.format(best_score))\n gmm.pi_.data = torch.from_numpy(best_model.weights_).to(DEVICE).float()\n gmm.mu_c.data = torch.from_numpy(best_model.means_).to(DEVICE).float()\n gmm.log_sigma2_c.data = torch.log(torch.from_numpy(best_model.covariances_).to(DEVICE).float())\n\n # =============================================================== #\n # ============================training=========================== #\n # =============================================================== #\n print('begin training...')\n epoch_bar = tqdm(range(0, n_epochs))\n best_acc, best_nmi, best_ite = 0, 0, 0\n gen_weight = 0.15\n for epoch in epoch_bar:\n\n g_t_loss = 0\n for index, (real_images, augmented_images, target) in enumerate(dataloader):\n real_images, augmented_images, target = real_images.to(DEVICE), \\\n augmented_images.to(DEVICE), \\\n target.to(DEVICE)\n\n gen.train()\n gmm.train()\n encoder.train()\n gen_enc_gmm_ops.zero_grad()\n\n original_z, augmented_z = encoder(real_images, augmented_images, argument=(epoch > 50))\n fake_images = gen(original_z[0], gen_weight * original_z[1])\n\n rec_loss = torch.mean(\n torch.sum(\n F.binary_cross_entropy(fake_images, real_images, reduction='none'), dim=[1, 2, 3]\n )\n )\n\n augmented_loss = mse(original_z[2], augmented_z[2]) + \\\n mse(original_z[3], augmented_z[3])\n\n c_loss = mse(gmm.get_asign(original_z[0])[0], gmm.get_asign(augmented_z[0])[0])\n\n kl_loss = torch.mean(\n -0.5 * torch.sum(\n 1 + original_z[5] - original_z[3] ** 2 - original_z[5].exp(), dim=1\n ), dim=0\n )\n\n kls_loss = gmm_Loss(original_z[0], original_z[2], original_z[4], gmm)\n\n # the loss weight of augmentation module is used for all data sets.\n g_loss = 10 * (augmented_loss + c_loss) + kls_loss + rec_loss + kl_loss\n\n g_loss.backward()\n\n gen_enc_gmm_ops.step()\n g_t_loss += g_loss\n\n # save cheekpoint model\n if (epoch + 1) % 20 == 0:\n cheek_path = os.path.join(models_dir, \"cheekpoint_{}\".format(epoch))\n os.makedirs(cheek_path, exist_ok=True)\n torch.save(gen.state_dict(), os.path.join(cheek_path, 'gen.pkl'))\n torch.save(encoder.state_dict(), os.path.join(cheek_path, 'enc.pkl'))\n torch.save(gmm.state_dict(), os.path.join(cheek_path, 'gmm.pkl'))\n\n print(\n 'rec_loss: {:.4f}, a_loss: {:.4f}, c_loss: {:.4f}, kls_loss: {:.4f}, kl_loss: {:.4f}'.format(\n rec_loss, augmented_loss, c_loss, kls_loss, kl_loss\n )\n )\n lr_s.step()\n\n # =============================================================== #\n # ==============================test============================= #\n # =============================================================== #\n gen.eval()\n encoder.eval()\n gmm.eval()\n\n with torch.no_grad():\n Z = []\n Y = []\n for _data, _, _target in i_dataloader:\n z, _ = encoder(_data.to(DEVICE))\n Z.append(z[0])\n Y.append(_target)\n\n Z = torch.cat(Z, 0)\n Y = torch.cat(Y, 0).detach().numpy()\n _pred = gmm.predict(Z)\n _acc = cluster_acc(_pred, Y)[0] * 100\n _nmi = NMI(_pred, Y)\n\n t_pred = _pred[:-test_num]\n t_acc = cluster_acc(t_pred, Y[:-test_num])[0] * 100\n t_nmi = NMI(t_pred, Y[:-test_num])\n\n if best_acc < t_acc:\n best_acc, best_nmi, best_ite = t_acc, t_nmi, epoch\n\n stack_images = None\n for k in range(n_cluster):\n\n z1, z2 = gmm.sample_by_k(k, latent_dim=latent_dim)\n fake_images = gen(z1, gen_weight * z2)\n\n if stack_images is None:\n stack_images = fake_images[:n_cluster].data.cpu().numpy()\n else:\n stack_images = np.vstack((stack_images, fake_images[:n_cluster].data.cpu().numpy()))\n stack_images = torch.from_numpy(stack_images)\n save_images(stack_images, imgs_dir, 'test_gen_{}'.format(epoch), nrow=n_cluster)\n\n logger = open(os.path.join(log_path, \"log.txt\"), 'a')\n logger.write(\n \"[FSVAE]: epoch: {}, g_loss: {:.4f}, acc: {:.4f}%, nmi: {:.4f}, t_acc: {:.4f}%, t_nmi: {:.4f}\\n\".format\n (\n epoch, g_t_loss / len(dataloader), _acc, _nmi, t_acc, t_nmi\n )\n )\n logger.close()\n print(\"[FSVAE]: epoch: {}, g_loss: {:.4f}, acc: {:.4f}%, nmi: {:.4f}, t_acc: {:.4f}%, t_nmi: {:.4f}\".format\n (\n epoch, g_t_loss / len(dataloader), _acc, _nmi, t_acc, t_nmi\n ))\n\n print('complete training...best_acc is: {:.4f}, best_nmi is: {:.4f}, iteration is: {}'.format(\n best_acc, best_nmi, best_ite,\n ))\n torch.save(gen.state_dict(), os.path.join(models_dir, 'gen.pkl'))\n torch.save(encoder.state_dict(), os.path.join(models_dir, 'enc.pkl'))\n torch.save(gmm.state_dict(), os.path.join(models_dir, 'gmm.pkl'))\n\n\nif __name__ == '__main__':\n\n main()\n","repo_name":"fwt-team/FSVAE","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":10068,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"77"} +{"seq_id":"13262678559","text":"from .logger import Logger\nimport torch\nimport os\nfrom custom_pt_layers import pytorch_layers_map\nfrom custom_pt_layers import MaskedLinear\nfrom layers_modules import layers_modules_maps\nimport copy\nfrom .utils import test, weights_init, device, Mode, adjust_learning_rate\nfrom prettytable import PrettyTable\nimport time\nimport numpy as np\n\n\nclass ModelTrain:\n def __init__(self, model, criterion, optimizer, storage_parent_dir, input_size=None, debug=False, retrain_masked=False, other_model_parent_dir=None):\n \"\"\"initialization of ModelTrain object used to train and test both unmasked and masked model\n\n Arguments:\n model {models.Model} -- input model object that will be trained and sparsified\n criterion {nn.} -- loss function used during training\n optimizer {torch.optim} -- optimizer used during training\n storage_parent_dir {string} -- parent save directory used to save logs and trained models\n\n Keyword Arguments:\n input_size {tuple} -- size of input data to the model (default: {None})\n debug {bool} -- when set to true the solver will be verbose (default: {False})\n retrain_masked {bool} -- used to load a sparsified model with its original initialization and retrain it (default: {False})\n other_model_parent_dir {string} -- path of the folder having the model sparsified on another dataset that will be retrained / generalized(default: {None})\n \"\"\"\n self._logger = Logger.__call__(storage_parent_dir, debug).get_logger()\n self.model = model\n self.model_masked = None\n self.masking_indices = []\n self.criterion = criterion\n self.optimizer = optimizer\n self.epoch = 0\n self.storage_parent_dir = storage_parent_dir\n # usef for generalization with retrain masked\n self.other_model_parent_dir = other_model_parent_dir\n self._device = device\n self.input_size = input_size\n self.retrain_masked = retrain_masked\n self._init_models()\n\n def _init_models(self):\n parent_storage_dir = self.storage_parent_dir\n if self.retrain_masked and self.other_model_parent_dir is not None:\n parent_storage_dir = self.other_model_parent_dir\n masked_model_path = os.path.join(\n parent_storage_dir, self.model.name+'_'+Mode.MASK.name+'.pt')\n model_path = os.path.join(\n self.storage_parent_dir, self.model.name+'.pt')\n if self.retrain_masked:\n # loading masked model initialization and setting layer importance\n model_init_path = os.path.join(\n parent_storage_dir, self.model.name+'_init.pt')\n self.load_model_init(model_init_path)\n # now based on the loaded init creating the masked version\n self.swap_pytorch_layers()\n model_masked_loaded = self.load_model_masked(masked_model_path)\n if not(model_masked_loaded):\n self._logger.exception(\n 'Masking indices not loaded for model {}'.format(self.model.name))\n raise Exception(\n 'Masking indices not set which is a critical bug')\n removed_neurons_percentage = self.swap_pytorch_layers(\n mask_indices=True)\n self._logger.info('Retraining masked model with {}% removed parameters from original model {}'.format(\n removed_neurons_percentage, self.model.name))\n else:\n if not(os.path.isfile(model_path)):\n self.reset_model()\n if os.path.isfile(masked_model_path):\n self.swap_pytorch_layers()\n self.load_model_masked(masked_model_path)\n if os.path.isfile(model_path):\n self.load_model(model_path)\n\n def reset_model(self):\n \"\"\"Reset model weights\n \"\"\" \n self.model.to(self._device)\n self.model.apply(weights_init)\n self._save_model_init()\n\n def _save_model_init(self):\n \"\"\"used to save model initialization\n \n Returns:\n bool -- return True on success\n \"\"\" \n try:\n torch.save(self.model.state_dict(), os.path.join(\n self.storage_parent_dir, self.model.name + '_init.pt'))\n return True\n except Exception as e:\n self._logger.exception(str(e))\n return False\n\n def load_model_init(self, path):\n \"\"\"load saved model initialization from specific path\n \n Arguments:\n path {string} -- path of the model's initialization\n \n Returns:\n bool -- return True on success\n \"\"\" \n try:\n checkpoint = torch.load(path, map_location=self._device)\n self.model.load_state_dict(checkpoint)\n self._logger.info(\n 'Model initialization {} loaded'.format(self.model.name))\n return True\n except Exception as e:\n self._logger.exception(str(e))\n return False\n\n def save_model(self, model, optimizer, prefix=''):\n \"\"\"save model state dict along with optimizer state to continue training on interruptions\n \n Arguments:\n model {models.Model} -- model used for training\n optimizer {torch.optim} -- optimizer used during training\n \n Keyword Arguments:\n prefix {str} -- prefix added during model save (default: {''})\n \n Returns:\n bool -- True on success\n \"\"\" \n try:\n torch.save({\n 'epoch': self.epoch + 1,\n 'model_state_dict': model.state_dict(),\n 'optimizer_state_dict': optimizer.state_dict()\n }, os.path.join(self.storage_parent_dir, model.name+prefix+'.pt'))\n self._logger.info('Model {} saved at epoch {}'.format(\n self.model.name, str(self.epoch)))\n return True\n except Exception as e:\n self._logger.exception(str(e))\n return False\n\n def load_model(self, path):\n \"\"\"used to load model from specific path\n \n Arguments:\n path {string} -- path of the checkpoint holding model and optimizer state dict\n \n Returns:\n bool -- True on success\n \"\"\" \n try:\n checkpoint = torch.load(path, map_location=self._device)\n self.model.load_state_dict(checkpoint['model_state_dict'])\n if self.optimizer is not None:\n self.optimizer.load_state_dict(\n checkpoint['optimizer_state_dict'])\n self.epoch = checkpoint['epoch']\n self._logger.info('Model {} loaded'.format(self.model.name))\n return True\n except Exception as e:\n self._logger.exception(str(e))\n return False\n\n def save_model_masked(self, suffix='_masked'):\n \"\"\"used to save the sparsified model\n \n Keyword Arguments:\n suffix {str} -- a suffix to be added to the model's name when saved (default: {'_masked'})\n \n Returns:\n bool -- True on success\n \"\"\" \n try:\n torch.save({\n 'model_state_dict': self.model_masked.state_dict(),\n 'masking_indices': self.masking_indices\n }, os.path.join(self.storage_parent_dir, self.model.name + suffix + '.pt'))\n return True\n except Exception as e:\n self._logger.exception(str(e))\n return False\n\n def load_model_masked(self, path):\n \"\"\"load saved masked model\n \n Arguments:\n path {string} -- path of the masked model's checkpoint\n \n Returns:\n bool -- True on success\n \"\"\" \n try:\n checkpoint = torch.load(path, map_location=self._device)\n self.model_masked.load_state_dict(checkpoint['model_state_dict'])\n self.masking_indices = checkpoint['masking_indices']\n self._logger.info(\n 'Masked Model {} loaded'.format(self.model_masked.name))\n self.model_masked.eval()\n return True\n except Exception as e:\n self._logger.exception(str(e))\n return False\n\n def swap_pytorch_layers(self, mask_indices=False):\n \"\"\"this function is used to replace dense layers with custom pytorch layers which allows sparsification \n custom modules are from custom_pt_layers\n \n Keyword Arguments:\n mask_indices {bool} -- when enabled will mask indices set in ModelTrain object (default: {False})\n \n Returns:\n float -- parameter removal percentage\n \"\"\" \n self.model_masked = copy.deepcopy(self.model)\n self.model_masked.name = self.model.name + '_' + Mode.MASK.name + '_model'\n mask_layer_indx = 0\n prev_input_size = self.input_size\n prev_layer_indx = 0\n first_layer_checked = False\n removed_params = 0\n original_model_num_params = 1\n if mask_indices:\n original_model_num_params = sum(\n p.numel() for p in self.model.parameters())\n removed_params = original_model_num_params\n for layer_indx, layer in enumerate(self.model):\n # checking for available keys that can be swapped in pytorch layers map\n for layer_type in pytorch_layers_map:\n if isinstance(layer, layer_type):\n # needs swapping in order to update image size\n if first_layer_checked:\n prev_input_size = self.model_masked[prev_layer_indx].output_size\n self.model_masked[layer_indx] = pytorch_layers_map[layer_type].copy_layer(\n layer, prev_input_size)\n if mask_indices and len(self.masking_indices) > 0 and layer_indx < len(self.model) - 1:\n if self.masking_indices[mask_layer_indx][0] == layer_indx:\n self.model_masked[layer_indx].mask_neurons(\n self.masking_indices[mask_layer_indx][1])\n removed_params -= len(\n self.masking_indices[mask_layer_indx][1]) * self.model_masked[layer_indx].in_features\n mask_layer_indx += 1\n prev_layer_indx = layer_indx\n first_layer_checked = True\n break\n self._logger.info(\n 'Finished creating masked version of model {}'.format(self.model.name))\n return 100 - ((removed_params / original_model_num_params) * 100)\n\n def set_mask_indices(self, mask_indices, suffix='_masked', save_masking_model=False):\n \"\"\"setting masked indices of the parameters that will be sparsified from the model\n \n Arguments:\n mask_indices {list} -- a list of numpy array holding indexes of neurons to be sparsified\n \n Keyword Arguments:\n suffix {str} -- suffix used to save the masked model (default: {'_masked'})\n save_masking_model {bool} -- when set to true the masked model will be saved (default: {False})\n \"\"\" \n self.masking_indices = mask_indices\n # create a new model copy\n self.swap_pytorch_layers(True)\n if save_masking_model:\n self.save_model_masked(suffix)\n\n def train(self, train_loader, val_loader=None, num_epochs=10, finetune_masked=False):\n \"\"\"train a model based on input train data loader and validation data loader\n \n Arguments:\n train_loader {torch.dataloader} -- a batch generator for training data\n \n Keyword Arguments:\n val_loader {torch.dataloader} -- a batch generator for validation data (default: {None})\n num_epochs {int} -- number of training epochs (default: {10})\n finetune_masked {bool} -- when set to True this function will finetune the masked model (default: {False})\n \"\"\" \n model = self.model\n optimizer = self.optimizer\n prefix = ''\n if self.retrain_masked or finetune_masked:\n model = self.model_masked\n optimizer = self.optimizer.__class__(\n model.parameters(), self.optimizer.param_groups[0]['lr'])\n self.epoch = 0\n if finetune_masked:\n prefix = '_finetuned'\n model.train()\n train_batch_time_list = []\n inference_batch_time_list = []\n self._logger.info(\n 'Started training Model {} training'.format(model.name))\n for epoch_indx in range(self.epoch, num_epochs):\n total_loss, total_err = 0., 0.\n for X, y in train_loader:\n start_batch_time = time.time()\n X, y = X.to(self._device), y.to(self._device)\n yp = model(X)\n loss = self.criterion(yp, y)\n optimizer.zero_grad()\n\n model.register_backward_hooks()\n loss.backward()\n optimizer.step()\n train_batch_time_list.append(time.time() - start_batch_time)\n\n total_err += (yp.max(dim=1)[1] != y).sum().item()\n total_loss += loss.item() * X.shape[0]\n adjust_learning_rate(optimizer, epoch_indx)\n self._logger.logging_loss(\n 'train', total_loss / len(train_loader.dataset), epoch_indx)\n if val_loader is not None:\n model.eval()\n start_eval_time = time.time()\n val_loss, _ = test(model, val_loader, self.criterion)\n inference_batch_time_list.append(\n (time.time() - start_eval_time) / len(val_loader))\n self._logger.logging_loss('val', val_loss, epoch_indx)\n model.train()\n self.epoch = epoch_indx\n self.save_model(model, optimizer, prefix=prefix)\n model.eval()\n model.unregister_backward_hooks()\n avg_time_per_batch = np.mean(train_batch_time_list)\n total_train_time = np.sum(train_batch_time_list)\n avg_inference_per_batch = np.mean(inference_batch_time_list)\n self._logger.info('Model {} took {} seconds per batch'.format(\n model.name, avg_time_per_batch))\n self._logger.info('Model {} took total {} seconds to train'.format(\n model.name, total_train_time))\n self._logger.info('Model {} took average {} seconds for inference per batch'.format(\n model.name, avg_inference_per_batch))\n self._logger.info('Finished Model {} training'.format(model.name))\n\n def print_results(self, train_loader, val_loader, test_loader, test_original_model=True, test_masked_model=True, save_heat_map=False, mode_name=''):\n \"\"\"used to print accuracy/loss results of the original and masked model on input datasets\n \n Arguments:\n train_loader {torch.dataloader} -- train data loader\n val_loader {torch.dataloader} -- validation data loader\n test_loader {torch.dataloader} -- test data loader\n \n Keyword Arguments:\n test_original_model {bool} -- a flag to test the original model (default: {True})\n test_masked_model {bool} -- a flag to test the masked model (default: {True})\n save_heat_map {bool} -- a flag to save the model heatmap on random images for conv models (default: {False})\n mode_name {str} -- name of the sparsification mode that can be MASK, CRITICAL, RANDOM (default: {''})\n \n Returns:\n string -- result table \n \"\"\" \n if not(test_masked_model) and not(test_original_model):\n self._logger.exception(\n 'Error when testing model no model is enabled')\n return\n col_names = ['Model Name']\n if train_loader is not None:\n col_names.append('Train l/acc')\n if val_loader is not None:\n col_names.append('Val l/acc')\n if test_loader is not None:\n col_names.append('Test l/acc')\n model_summary_table = PrettyTable(col_names)\n model_list = []\n if test_original_model and not(self.retrain_masked):\n model_list = [('Original Model ' + self.model.name, self.model)]\n if test_masked_model:\n if self.model_masked is not None:\n model_list.append(\n ('Masked Model ' + self.model_masked.name, self.model_masked))\n # saving heat map on some sample images\n if val_loader is not None and save_heat_map:\n for label in range(val_loader.dataset.n_classes):\n X, y = val_loader.dataset.sample_itm_class(label)\n for model in model_list:\n if hasattr(model[-1], 'get_heat_map'):\n # sample an image for class 0 for now\n img_path = model[-1].get_heat_map(\n X.clone(), y, self.storage_parent_dir, model[0] + '_' + mode_name)\n self._logger.info(\n 'Created Heat map at {}'.format(img_path))\n results = []\n for model in model_list:\n current_model_results_dict = {}\n loss_acc = [0] * (len(col_names) - 1)\n start_indx = 0\n if train_loader is not None:\n loss_acc[start_indx] = test(\n model[-1], train_loader, self.criterion)\n current_model_results_dict['loss_train'] = loss_acc[start_indx][0]\n current_model_results_dict['acc_train'] = loss_acc[start_indx][1]\n start_indx += 1\n if val_loader is not None:\n loss_acc[start_indx] = test(\n model[-1], val_loader, self.criterion)\n start_indx += 1\n if test_loader is not None:\n loss_acc[start_indx] = test(\n model[-1], test_loader, self.criterion)\n current_model_results_dict['loss_test'] = loss_acc[start_indx][0]\n current_model_results_dict['acc_test'] = loss_acc[start_indx][1]\n start_indx += 1\n resulting_info = [str(current_l_acc[0]) + ' / ' +\n str(current_l_acc[1]) for current_l_acc in loss_acc]\n model_summary_table.add_row([model[0]]+resulting_info)\n results.append(current_model_results_dict)\n self._logger.info(str(model_summary_table))\n return results\n","repo_name":"pherber3/DLDO-Project","sub_path":"Project copy/mip-for-ann-master/training/model_train.py","file_name":"model_train.py","file_ext":"py","file_size_in_byte":18724,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"29459720009","text":"def jerigonzo(string):\n jer=\"\"\n for letra in string:\n if letra not in \"AEIOUaeiou\":\n jer+=letra\n else:\n jer+=letra+\"p\"+letra\n return jer\nif __name__ == \"__main__\":\n palabra=input(\"Ingrese la palabra a tranformar: \")\n jerigonzo(palabra)\n ","repo_name":"pabloschwarzenberg/grader","sub_path":"tema4_ej3/tema4_ej3_23cd175780d5d75ed3fb07906fcb93a0.py","file_name":"tema4_ej3_23cd175780d5d75ed3fb07906fcb93a0.py","file_ext":"py","file_size_in_byte":296,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"14948913016","text":"#!/usr/bin/python\n# -*- coding: UTF-8 -*-\n\n'''\n valid_book model\n'''\n\nimport sys\nfrom op_app.Model.base.defaultDbModelClass import DefaultDbModelClass\nfrom op_app.logger.log import dblog\n\n\nclass ValidBookModelClass(DefaultDbModelClass):\n\n def getBookId(self, id):\n sql = 'select book_id from valid_book where book_id=%s'\n try:\n ret = self._cursorQuery(sql, [id])\n if len(ret) == 0:\n return '-1'\n else:\n return ret[0][0]\n except Exception as e:\n dblog.error(\"[ERROR] Query error, Catch exception:[ %s ], file: [ %s ], line: [ %s ]\" % (e, __file__, sys._getframe().f_lineno))\n return '-1'\n\n def addBookId(self, id):\n sql = '''insert into valid_book(`book_id`) values (%s)'''\n try:\n ret = self._cursorInsert(sql, [id])\n return '1'\n except Exception as e:\n print(\"[ERROR] Query error, Catch exception:[ %s ], file: [ %s ], line: [ %s ]\" % (\n e, __file__, sys._getframe().f_lineno))\n return '-1'\n\n def updateBook(self, id, pindao, title, book_type, is_free, valid):\n sql = '''update valid_book set pindao=%s, title=%s, book_type=%s, is_free=%s, valid=%s where book_id=%s'''\n try:\n ret = self._cursorUpdate(sql, [pindao, title, book_type, is_free, valid, id])\n return '1'\n except Exception as e:\n print(\"[ERROR] Update error, Catch exception:[ %s ], file: [ %s ], line: [ %s ]\" % (\n e, __file__, sys._getframe().f_lineno))\n return '-1'\n\n def updateScrapyedStatus(self, book_id, result='SUCCESSED'):\n if result == 'SUCCESSED':\n val = 1\n elif result == 'FAILED':\n val = 3\n elif result == 'RUNNING':\n val = 2\n else:\n val = 4\n sql = '''update valid_book set scrapyed=%s where book_id=%s'''\n try:\n ret = self._cursorUpdate(sql, [val, book_id])\n return '1'\n except Exception as e:\n print(\"[ERROR] Update error, Catch exception:[ %s ], file: [ %s ], line: [ %s ]\" % (\n e, __file__, sys._getframe().f_lineno))\n return '-1'\n\n def getUnCrapyedBookId(self): #返回一个未爬取的书本ID\n sql = '''select book_id from valid_book where is_free=1 and valid=1 and scrapyed=0 order by book_id limit 1'''\n try:\n ret = self._cursorQuery(sql, [])\n if len(ret) == 0:\n return ''\n else:\n return ret[0][0]\n except Exception as e:\n print(\"[ERROR] Query error, Catch exception:[ %s ], file: [ %s ], line: [ %s ]\" % (\n e, __file__, sys._getframe().f_lineno))\n return '-1'\n\n def getFailedAndUnknowStatusBookId(self): # 得到一个状态未知或者爬取失败的书本ID\n sql = '''select book_id from valid_book where is_free=1 and valid=1 and (scrapyed=3 or scrapyed=4) order by book_id limit 1'''\n try:\n ret = self._cursorQuery(sql, [])\n if len(ret) == 0:\n return ''\n else:\n return ret[0][0]\n except Exception as e:\n print(\"[ERROR] Query error, Catch exception:[ %s ], file: [ %s ], line: [ %s ]\" % (\n e, __file__, sys._getframe().f_lineno))\n return '-1'\n\n","repo_name":"yezimai/v1","sub_path":"op_app/Model/validBookModelClass.py","file_name":"validBookModelClass.py","file_ext":"py","file_size_in_byte":3407,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"10230623493","text":"import argparse\r\nimport logging\r\nimport time\r\n\r\nimport cv2\r\nimport numpy as np\r\nimport os\r\n\r\nfrom tf_pose.estimator import TfPoseEstimator\r\nfrom tf_pose.networks import get_graph_path, model_wh\r\n\r\nimport math\r\nimport sys\r\nimport pickle\r\n\r\nlogger = logging.getLogger('TfPoseEstimator-WebCam')\r\nlogger.setLevel(logging.DEBUG)\r\nch = logging.StreamHandler()\r\nch.setLevel(logging.DEBUG)\r\nformatter = logging.Formatter('[%(asctime)s] [%(name)s] [%(levelname)s] %(message)s')\r\nch.setFormatter(formatter)\r\nlogger.addHandler(ch)\r\n\r\nfps_time = 0\r\n\r\ndef find_point(pose, p):\r\n for point in pose:\r\n try:\r\n body_part = point.body_parts[p]\r\n return (int(body_part.x * width + 0.5), int(body_part.y * height + 0.5))\r\n except:\r\n return (0,0)\r\n return (0,0)\r\n\r\ndef euclidian( point1, point2):\r\n return math.sqrt((point1[0]-point2[0])**2 + (point1[1]-point2[1])**2 )\r\n\r\ndef angle_calc(p0, p1, p2 ):\r\n '''\r\n p1 is center point from where we measured angle between p0 and\r\n '''\r\n try:\r\n a = (p1[0]-p0[0])**2 + (p1[1]-p0[1])**2\r\n b = (p1[0]-p2[0])**2 + (p1[1]-p2[1])**2\r\n c = (p2[0]-p0[0])**2 + (p2[1]-p0[1])**2\r\n angle = math.acos( (a+b-c) / math.sqrt(4*a*b) ) * 180/math.pi\r\n except:\r\n return 0\r\n return int(angle)\r\ndef plank( a, b, c, d, e, f):\r\n #There are ranges of angle and distance to for plank. \r\n '''\r\n a and b are angles of hands\r\n c and d are angle of legs\r\n e and f are distance between head to ankle because in plank distace will be maximum.\r\n '''\r\n if (a in range(50,100) or b in range(50,100)) and (c in range(135,175) or d in range(135,175)) and (e in range(50,250) or f in range(50,250)):\r\n return True\r\n return False\r\n\r\ndef draw_str(dst, xxx_todo_changeme, s, color, scale):\r\n \r\n (x, y) = xxx_todo_changeme\r\n if (color[0]+color[1]+color[2]==255*3):\r\n cv2.putText(dst, s, (x+1, y+1), cv2.FONT_HERSHEY_PLAIN, scale, (0, 0, 0), thickness = 4, lineType=10)\r\n else:\r\n cv2.putText(dst, s, (x+1, y+1), cv2.FONT_HERSHEY_PLAIN, scale, color, thickness = 4, lineType=10)\r\n #cv2.line \r\n cv2.putText(dst, s, (x, y), cv2.FONT_HERSHEY_PLAIN, scale, (255, 255, 255), lineType=11)\r\n\r\ndef str2bool(v):\r\n return v.lower() in (\"yes\", \"true\", \"t\", \"1\")\r\n\r\n\r\nif __name__ == '__main__':\r\n parser = argparse.ArgumentParser(description='tf-pose-estimation realtime webcam')\r\n parser.add_argument('--camera', type=str, default=0)\r\n\r\n parser.add_argument('--resize', type=str, default='0x0',\r\n help='if provided, resize images before they are processed. default=0x0, Recommends : 432x368 or 656x368 or 1312x736 ')\r\n parser.add_argument('--resize-out-ratio', type=float, default=4.0,\r\n help='if provided, resize heatmaps before they are post-processed. default=1.0')\r\n\r\n parser.add_argument('--model', type=str, default='cmu', help='cmu / mobilenet_thin / mobilenet_v2_large / mobilenet_v2_small')\r\n parser.add_argument('--show-process', type=bool, default=False,\r\n help='for debug purpose, if enabled, speed for inference is dropped.')\r\n \r\n parser.add_argument('--tensorrt', type=str, default=\"False\",\r\n help='for tensorrt process.')\r\n parser.add_argument('--save-txt')\r\n args = parser.parse_args()\r\n \r\n# print(\"mode 0: Only Pose Estimation \\nmode 1: Planking/Push up Detection\")\r\n# mode = int(input(\"Enter a mode : \"))\r\n \r\n\r\n# logger.debug('initialization %s : %s' % (args.model, get_graph_path(args.model)))\r\n w, h = model_wh(args.resize)\r\n if w > 0 and h > 0:\r\n e = TfPoseEstimator(get_graph_path(args.model), target_size=(w, h), trt_bool=str2bool(args.tensorrt))\r\n else:\r\n e = TfPoseEstimator(get_graph_path(args.model), target_size=(432, 368), trt_bool=str2bool(args.tensorrt))\r\n# logger.debug('cam read+')\r\n cam = cv2.VideoCapture(args.camera)\r\n ret_val, image = cam.read()\r\n# logger.info('cam image=%dx%d' % (image.shape[1], image.shape[0]))\r\n global height,width\r\n orange_color = (0,140,255)\r\n frame_no = 0\r\n os.mkdir(args.save_txt)\r\n file_name = args.camera.split('/')[-1].split('.')[0]\r\n while True:\r\n ret_val, image = cam.read()\r\n frame_no += 1\r\n# logger.debug('image process+')\r\n humans = e.inference(image, resize_to_default=(w > 0 and h > 0), upsample_size=args.resize_out_ratio)\r\n pose = humans\r\n with open(args.save_txt+'/'+file_name+'_'+str(frame_no)+'.pkl', 'wb') as f:\r\n \tpickle.dump(pose, f)\r\n# logger.debug('postprocess+')\r\n image = TfPoseEstimator.draw_humans(image, humans, imgcopy=False)\r\n height,width = image.shape[0],image.shape[1]\r\n \r\n# logger.debug('show+')\r\n cv2.putText(image,\r\n \"FPS: %f\" % (1.0 / (time.time() - fps_time)),\r\n (10, 10), cv2.FONT_HERSHEY_SIMPLEX, 0.5,\r\n (0, 255, 0), 2)\r\n cv2.imshow('tf-pose-estimation result', image)\r\n fps_time = time.time()\r\n if cv2.waitKey(1) == 27:\r\n break\r\n# logger.debug('finished+')\r\n cv2.destroyAllWindows()\r\n","repo_name":"TechChandu/Project-Thesis","sub_path":"Pose Estimation/tf-pose-estimation/run_webcam.py","file_name":"run_webcam.py","file_ext":"py","file_size_in_byte":5229,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"38133654482","text":"# Original code from https://github.com/nicksandau/GIFImage_ext/blob/master/GIFImage_ext.py\n\nfrom PIL import Image\nimport pygame\nfrom pygame.locals import SRCALPHA\nimport time\n\n\nclass GIFImage(object):\n def __init__(self, filename):\n self.filename = filename\n self.image = Image.open(filename)\n self.original_size = self.image.size\n#Added by NS *********************\n #self.frames = []\n self.fps_scale = 1\n self.img_scale = 1\n#**********************************\n self.get_frames()\n\n self.cur = 0\n self.ptime = time.time()\n\n self.running = True\n self.breakpoint = len(self.frames)-1\n self.startpoint = 0\n self.reversed = False\n\n def get_rect(self):\n return pygame.rect.Rect((0, 0), self.image.size)\n\n def get_frames(self):\n image = self.image\n #Added by NS ************\n self.frames = []\n #*************************\n pal = image.getpalette()\n base_palette = []\n for i in range(0, len(pal), 3):\n rgb = pal[i:i+3]\n base_palette.append(rgb)\n\n all_tiles = []\n try:\n while 1:\n if not image.tile:\n image.seek(0)\n if image.tile:\n all_tiles.append(image.tile[0][3][0])\n image.seek(image.tell()+1)\n except EOFError:\n image.seek(0)\n\n all_tiles = tuple(set(all_tiles))\n\n try:\n while 1:\n try:\n duration = image.info[\"duration\"]\n except Exception as e:\n print(\"Error '{0}' occurred. Arguments {1}.\",\n e.message, e.args)\n duration = 100\n\n duration *= .001 # convert to milliseconds!\n\n #Added by NS ************\n duration *= self.fps_scale\n #*************************\n\n cons = False\n\n x0, y0, x1, y1 = (0, 0) + image.size\n if image.tile:\n tile = image.tile\n else:\n image.seek(0)\n tile = image.tile\n if len(tile) > 0:\n x0, y0, x1, y1 = tile[0][1]\n\n if all_tiles:\n if all_tiles in ((6,), (7,)):\n cons = True\n pal = image.getpalette()\n palette = []\n for i in range(0, len(pal), 3):\n rgb = pal[i:i+3]\n palette.append(rgb)\n elif all_tiles in ((7, 8), (8, 7)):\n pal = image.getpalette()\n palette = []\n for i in range(0, len(pal), 3):\n rgb = pal[i:i+3]\n palette.append(rgb)\n else:\n palette = base_palette\n else:\n palette = base_palette\n\n pi = pygame.image.fromstring(\n image.tobytes(), image.size, image.mode)\n pi.set_palette(palette)\n if \"transparency\" in image.info:\n pi.set_colorkey(image.info[\"transparency\"])\n pi2 = pygame.Surface(image.size, SRCALPHA)\n if cons:\n for i in self.frames:\n pi2.blit(i[0], (0, 0))\n pi2.blit(pi, (x0, y0), (x0, y0, x1-x0, y1-y0))\n\n self.frames.append([pi2, duration])\n image.seek(image.tell()+1)\n except EOFError:\n pass\n\n def render(self, screen, pos):\n if self.running:\n if time.time() - self.ptime > self.frames[self.cur][1]:\n if self.reversed:\n self.cur -= 1\n if self.cur < self.startpoint:\n self.cur = self.breakpoint\n else:\n self.cur += 1\n if self.cur > self.breakpoint:\n self.cur = self.startpoint\n\n self.ptime = time.time()\n #Added by NS **************************************\n if self.img_scale == 1:\n surf = self.frames[self.cur][0]\n else:\n surf = pygame.transform.scale(self.frames[self.cur][0],\n (int(self.image.width * self.img_scale),\n int(self.image.height * self.img_scale)))\n screen.blit(surf, pos)\n #screen.blit(self.frames[self.cur][0], pos)\n #***************************************************\n\n def seek(self, num):\n self.cur = num\n if self.cur < 0:\n self.cur = 0\n if self.cur >= len(self.frames):\n self.cur = len(self.frames)-1\n\n def set_bounds(self, start, end):\n if start < 0:\n start = 0\n if start >= len(self.frames):\n start = len(self.frames) - 1\n if end < 0:\n end = 0\n if end >= len(self.frames):\n end = len(self.frames) - 1\n if end < start:\n end = start\n self.startpoint = start\n self.breakpoint = end\n\n def pause(self):\n self.running = False\n\n#added by NS ********************************\n def next_frame(self):\n if self.running:\n self.pause()\n else:\n self.cur += 1\n if self.cur > self.breakpoint:\n self.cur = self.startpoint\n\n def prev_frame(self):\n if self.running:\n self.pause()\n else:\n self.cur -= 1\n if self.cur < 0:\n self.cur = self.breakpoint\n\n def slow_down(self):\n self.fps_scale += .05 if self.fps_scale != .01 else .04\n self.get_frames()\n self.seek(self.cur)\n\n def speed_up(self):\n if self.fps_scale - .05 <= 0:\n self.fps_scale = .01\n else:\n self.fps_scale -= .25\n self.get_frames()\n self.seek(self.cur)\n\n def scale(self, scale_factor):\n self.img_scale += scale_factor\n\n def reset_scale(self):\n self.img_scale = 1\n#*********************************************\n\n def play(self):\n self.running = True\n\n def rewind(self):\n self.seek(0)\n\n def fastforward(self):\n self.seek(self.length()-1)\n\n def get_height(self):\n return self.image.size[1]\n\n def get_width(self):\n return self.image.size[0]\n\n def get_size(self):\n return self.image.size\n\n def length(self):\n return len(self.frames)\n\n def reverse(self):\n self.reversed = not self.reversed\n\n def reset(self):\n self.cur = 0\n self.ptime = time.time()\n self.reversed = False\n\n def copy(self):\n new = GIFImage(self.filename)\n new.running = self.running\n new.breakpoint = self.breakpoint\n new.startpoint = self.startpoint\n new.cur = self.cur\n new.ptime = self.ptime\n new.reversed = self.reversed\n #Added by NS ****\n new.fps_scale = self.fps_scale\n #*****************\n return new\n","repo_name":"sajjadium/ctf-archives","sub_path":"ctfs/HITCON/2021/rev/baba-is-rev/BabaGUI/images.py","file_name":"images.py","file_ext":"py","file_size_in_byte":7253,"program_lang":"python","lang":"en","doc_type":"code","stars":490,"dataset":"github-code","pt":"77"} +{"seq_id":"17080921002","text":"from lino import ad, _\n\n\nclass Plugin(ad.Plugin):\n \"See :doc:`/dev/plugins`.\"\n\n verbose_name = _(\"Concepts\")\n\n def setup_main_menu(self, site, profile, m):\n m = m.add_menu(self.app_label, self.verbose_name)\n m.add_action('concepts.Concepts')\n\n\n","repo_name":"amir17688/google_data_p2","sub_path":"86898___init__.py_C__Users_user_Desktop_data_2_data_google_data_lsaffre_lino_lino_modlib_concepts.py","file_name":"86898___init__.py_C__Users_user_Desktop_data_2_data_google_data_lsaffre_lino_lino_modlib_concepts.py","file_ext":"py","file_size_in_byte":267,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"73579534649","text":"from odoo import api, fields, models, _\n\nclass PurchaseRequestHistory(models.TransientModel):\n\t_name = 'purchase.request.history'\n\t_description = 'Purchase Request History'\n\n\n\tdate = fields.Datetime(string='From(Request Date)', help='Choose a date to get requests from that date', default=fields.Datetime.now)\n\n\tdef open_table(self):\n\t\tself.ensure_one()\n\n\t\ttree_view_id = self.env.ref('kolarequisition.requisition_line_view_tree').id\n\t\tform_view_id = self.env.ref('kolarequisition.requisition_line_form').id\n\t\t#we pass 'to_date' in the context so that the requests will be computed accross \n\t\t#till the to date and the state be checked as well\n\n\t\taction = {\n\t\t\t'type': 'ir.actions.act_window',\n\t\t\t'views': [(tree_view_id, 'tree'), (form_view_id, 'form')],\n\t\t\t'view_mode': 'tree,form',\n\t\t\t'name': _('Request lines'),\n\t\t\t'res_model': 'kola.requisition.lines',\n\t\t\t'domain': \"[('status', 'in', ['validate'])]\",\n\t\t\t'context': dict(self.env.context, create_date=self.date),\n\t\t}\n\t\treturn action\n\t\t\n\n\n","repo_name":"madrara256/e-procurement","sub_path":"kolarequisition/wizard/purchase_request_history.py","file_name":"purchase_request_history.py","file_ext":"py","file_size_in_byte":993,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"6096066638","text":"# Create three variables and attach a value to each\npeople = 20\ncats = 30\ndogs = 15\n\n# if statement, checks whether there are less people than cats\nif people < cats:\n # prints result\n print(\"Too many cats! The world is doomed!\")\n\n# if statement, checks whether there are more cats than people\nif people > cats:\n # prints result\n print(\"Not many Cats! The world is saved!\")\n\n# if statement, check if more dogs than people\nif people < dogs:\n # print result\n print(\"The world is drooled on!\")\n\n# if statement, check if more people than dogs\nif people > dogs:\n # print result\n print(\"The world is dry!\")\n\n# add an increment of 5 to dog value\ndogs += 5\n\n# if statement, check whether people are greater than or equal to dogs\nif people >= dogs:\n # print result\n print(\"People are greater than or equal to dogs.\")\n\n# if statement, check whether people are less than or equal to dogs\nif people <= dogs:\n # print result\n print(\"People are less than or equal to dogs.\")\n\n# if equal statement, checks if people are dogs\nif people == dogs:\n # print result\n print(\"People are dogs\")\n\n\n# 1. What do you think the `if` does to the code below it?\n##### It indents the code, checks statement\n\n# 2. Why does the code under the `if` need to be indented four spaces?\n##### This allows the computer to read the return/print statement, else it cannot distinguish it from the other statements\n","repo_name":"michodgs25/python-exercises","sub_path":"exercises/if-statement.py","file_name":"if-statement.py","file_ext":"py","file_size_in_byte":1412,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"23627808029","text":"from copy import copy\nfrom bfish import *\nfrom util import *\n\ndef OFB(bytes_array, BLOCK_SIZE) -> bytes:\n init_block = copy(init)\n result = []\n \n for i in range(0, len(bytes_array), BLOCK_SIZE):\n block = bytes_array[i:i+BLOCK_SIZE]\n enc_init_block = cipher.encrypt_block(init_block)\n \n gamma = bytes(enc_init_block[:BLOCK_SIZE])\n init_block = update_init(init_block, gamma) \n \n block = bytes(a ^ b for (a, b) in zip(block, gamma))\n \n result.extend(block)\n \n return bytes(result)","repo_name":"vnrdd/ofb-cfb","sub_path":"ofb.py","file_name":"ofb.py","file_ext":"py","file_size_in_byte":564,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"33045874624","text":"from SRCNN.srcnn_arch import load_net\nimport utils\nimport numpy as np\nimport onnxruntime\nimport cv2\n\ndef test_model(img_path,weight_path = \"SRCNN/srcnn.pth\"):\n net = load_net(weight_path)\n img = utils.read_img(img_path)\n out = net(img.unsqueeze(0))\n utils.tensor2img(out[0],\"sr_result.png\")\n\ndef test_onnx(img_path,onnx_path):\n input_img = cv2.imread(img_path).astype(np.float32)\n input_img = np.transpose(input_img, [2, 0, 1]) \n input_img = np.expand_dims(input_img, 0)\n \n ort_session = onnxruntime.InferenceSession(onnx_path,providers=[\"CPUExecutionProvider\"]) \n ort_inputs = {'input': input_img} \n ort_output = ort_session.run(['output'], ort_inputs)[0] \n \n ort_output = np.squeeze(ort_output, 0) \n ort_output = np.clip(ort_output, 0, 255) \n ort_output = np.transpose(ort_output, [1, 2, 0]).astype(np.uint8) \n cv2.imwrite(\"face_ort.png\", ort_output)\n\nif __name__ == \"__main__\":\n img_path = \"image/face.png\"\n onnx_path = \"SRCNN/srcnn_sim.onnx\"\n # test_model(img_path)\n test_onnx(img_path,onnx_path)","repo_name":"cslvjt/modelDeploy","sub_path":"test_model.py","file_name":"test_model.py","file_ext":"py","file_size_in_byte":1060,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"36316963051","text":"# -*- coding: utf-8 -*-\n\nfrom __future__ import absolute_import, division, print_function\n\n__metaclass__ = type\n\n\nclass ModuleDocFragment(object):\n\n DOCUMENTATION = r\"\"\"\noptions:\n obj_id:\n type: int\n description: i-doit object id\n required: true\n\"\"\"\n","repo_name":"ScaleUp-Technologies/ansible-i-doit","sub_path":"plugins/doc_fragments/category_options.py","file_name":"category_options.py","file_ext":"py","file_size_in_byte":263,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"77"} +{"seq_id":"29325838709","text":"#Aprobación de créditos\ningreso=int(input(\"Ingrese el ingreso del cliente (en pesos): \"))\nano_nacimiento=int(input(\"Ingrese el año de nacimiento del cliente: \"))\nhijos=int(input(\"Ingrese el número de hijos del cliente: \"))\npertenencia= int(input(\"Ingrese los años de pertenencia al banco del cliente: \"))\nestado_civil=input(\"Ingrese el estado civil del cliente (S para soltero, C para casado): \").upper()\nubicacion=input(\"Ingrese la ubicación del cliente (U para urbano, R para rural): \").upper()\n\nif (pertenencia>10 and hijos>= 2):\n print(\"APROBADO\")\nelif (estado_civil==\"C\" and hijos>3 and año_nacimiento>=45 and año_nacimiento<=55):\n print(\"APROBADO\")\nelif (ingreso>2500000 and estado_civil==\"S\" and ubicacion==\"U\"):\n print(\"APROBADO\")\nelif (ingreso>3500000 and pertenencia>5):\n print(\"APROBADO\")\nelif (ubicacion==\"R\" and estado_civil==\"C\" and hijos<2):\n print(\"APROBADO\")\nelse:\n print(\"RECHAZADO\") ","repo_name":"pabloschwarzenberg/grader","sub_path":"hito1_ej3/hito1_ej3_142e3b299bb4fea8426c307ffa59c772.py","file_name":"hito1_ej3_142e3b299bb4fea8426c307ffa59c772.py","file_ext":"py","file_size_in_byte":932,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"8643399678","text":"import numpy as np\nimport tensorflow as tf\n\nx_data = np.random.random((1000,13))\ny_data = np.random.random((1000,1))\n\n# 1.定义添加层的方法\ndef add_layer(input_data, in_size, out_size, activity_function=None):\n a = tf.Variable(tf.random_normal([in_size, out_size]))\n b = tf.Variable(tf.zeros([1, out_size]) + 0.1)\n result = tf.matmul(input_data, a) + b\n if activity_function is None:\n answer = result\n else:\n answer = activity_function(result)\n return answer\n\n# 2.定义结点准备接收数据\nxs = tf.placeholder(tf.float32, [None, 13])\nys = tf.placeholder(tf.float32, [None, 1])\n\n# 3.定义神经网络结构\nhidden_1 = add_layer(xs, 13, 10, activity_function=tf.nn.sigmoid)\nprediction = add_layer(hidden_1, 10, 1, activity_function=tf.nn.sigmoid)\n\n# 4.定义误差表达式\nloss = tf.reduce_mean(tf.reduce_sum(tf.square(ys - prediction), reduction_indices=[1]))\n\n# 5.选择optimizer使误差达到最小\ntrain = tf.train.GradientDescentOptimizer(0.1).minimize(loss)\n\n# 6.对所有的变量进行初始化\ninit = tf.global_variables_initializer()\nsess = tf.Session()\nsess.run(init)\n\n# 7.迭代1000次学习\nfor i in range(1000):\n sess.run(train, feed_dict={xs: x_data, ys: y_data})\n # if i%10 == 0:\n # print(sess.run(loss, feed_dict={xs: x_data, ys: y_data}))\n\nprint(sess.run(prediction-y_data, feed_dict={xs: x_data, ys: y_data}))\n","repo_name":"xiaokanchaliang/DeepLearning","sub_path":"BPNN.py","file_name":"BPNN.py","file_ext":"py","file_size_in_byte":1387,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"25811264937","text":"# https://leetcode.com/problems/the-kth-factor-of-n\n\n\nclass Solution:\n def kthFactor(self, n: int, k: int) -> int:\n j = 0\n for i in range(1, n + 1):\n if n % i == 0:\n j += 1\n if j == k:\n return i\n return -1\n","repo_name":"dandavison/misc-python","sub_path":"lc-the-kth-factor-of-n.py","file_name":"lc-the-kth-factor-of-n.py","file_ext":"py","file_size_in_byte":290,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"15195548419","text":"import argparse\nimport os\nimport subprocess as sp\nimport sys\nimport threading\nimport time\nfrom utils import concat_folder\nfrom tqdm import tqdm\n\nINTERPRETER=\"python\"\n\nFLAGS = [\n ('agent', [\n 'naive', 'optimal', 'sophisticated', 'myopic', 'underconfident',\n 'overconfident'\n ]),\n ('algorithm', [\n 'given_rewards', 'em_with_init', 'boltzmann_planner', 'vi_inference',\n 'optimal_planner', 'joint_with_init', 'em_without_init',\n 'joint_without_init'\n ]),\n]\n\nCONSTANT_FLAGS = [\n ('simple_mdp', False),\n ('imsize', 16),\n ('noise', 0.2),\n ('num_rewards', 7),\n ('num_human_trajectories', 8000),\n ('vin_regularizer_C', 1e-4),\n ('reward_regularizer_C', 0),\n ('lr', 0.01),\n ('reward_lr', 1.0),\n ('epochs', 20),\n ('reward_epochs', 50),\n ('k', 10),\n ('ch_h', 150),\n ('ch_p', 5),\n ('ch_q', 5),\n ('num_actions', 5),\n ('batchsize', 20),\n ('gamma', 0.95),\n ('num_iters', 50),\n ('max_delay', 10),\n ('hyperbolic_constant', 1.0),\n ('display_step', 1),\n ('log', False),\n ('verbosity', 1),\n ('plot_rewards', False),\n ('use_gpu', True),\n ('strict', False),\n]\n\ndef get_algorithm_specific_flags(flags):\n [alg] = [val for name, val in flags if name == 'algorithm']\n flag_names = ['em_iterations', 'num_simulated', 'num_with_rewards', 'num_validation', 'model']\n if alg == 'given_rewards':\n flag_values = [0, 0, 7000, 2000, 'VIN']\n elif alg in ['boltzmann_planner', 'optimal_planner']:\n flag_values = [0, 5000, 0, 2000, 'VIN']\n elif alg == 'em_with_init':\n flag_values = [2, 5000, 0, 2000, 'VIN']\n elif alg == 'joint_with_init':\n flag_values = [0, 5000, 0, 2000, 'VIN']\n elif alg == 'em_without_init':\n flag_values = [2, 0, 0, 0, 'VIN']\n elif alg == 'joint_without_init':\n flag_values = [0, 0, 0, 0, 'VIN']\n elif alg == 'vi_inference':\n flag_values = [0, 0, 0, 0, 'VI']\n else:\n raise ValueError('Unknown algorithm {}'.format(alg))\n\n return list(zip(flag_names, flag_values))\n\ndef get_agent_specific_flags(flags):\n [agent] = [val for name, val in flags if name == 'agent']\n if agent == 'overconfident':\n return [('calibration_factor', 5)]\n elif agent == 'underconfident':\n return [('calibration_factor', 0.5)]\n elif agent in ['optimal', 'naive', 'sophisticated', 'myopic']:\n return [('calibration_factor', 1)]\n else:\n raise ValueError('Unknown agent {}'.format(agent))\n\ndef get_beta_flag(flags):\n [agent] = [val for name, val in flags if name == 'agent']\n if agent in ['optimal', 'overconfident']:\n return ('beta', 0.1)\n elif agent in ['naive', 'sophisticated', 'myopic', 'underconfident']:\n return ('beta', 1.0)\n else:\n raise ValueError('Unknown agent {}'.format(agent))\n\ndef flag_generator(flags):\n \"\"\"Returns a generator that yields list of (flag, value) tuples.\"\"\"\n if not flags:\n yield []\n return\n\n flag_name, flag_values = flags[0]\n for value in flag_values:\n for sublst in flag_generator(flags[1:]):\n yield [(flag_name, value)] + sublst\n\n\ndef run_command(command, error_file, gpu_id, gpu_utilization, lock):\n command_str = ' '.join(command)\n try:\n with lock:\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = str(gpu_id)\n proc = sp.Popen(command, stdout=sp.PIPE, stderr=sp.PIPE)\n # Give it a few seconds to start up and read CUDA_VISIBLE_DEVICES\n time.sleep(5)\n out, err = proc.communicate()\n with lock:\n gpu_utilization[gpu_id] -= 1\n print('Ran command: {}'.format(command_str))\n print(out.decode('utf-8'))\n with open(error_file, 'a') as errtxt:\n errtxt.write(command_str + '\\n')\n errtxt.write(err.decode('utf-8'))\n return True\n except Exception as e:\n print(\"Failed to run: {} because of exception {}\".format(command_str))\n return False\n\nclass CommandRunner(object):\n def __init__(self, num_gpus):\n self.num_gpus = num_gpus\n self.gpu_utilization = {numeric_id:0 for numeric_id in range(num_gpus)}\n self.lock = threading.Lock()\n\n def find_gpu(self):\n with self.lock:\n gpu_id, utilization = min(self.gpu_utilization.items(), key=lambda x: x[1])\n if utilization >= 4:\n return None\n self.gpu_utilization[gpu_id] += 1\n return gpu_id\n\n def run_command_async(self, interpreter, flags, dest):\n base_command = [interpreter, 'train.py', '--output_folder={}'.format(dest)]\n flag_strs = ['--{}={}'.format(name, val) for name, val in flags]\n command = base_command + flag_strs\n error_file = concat_folder(dest, 'errors.log')\n gpu_id = self.find_gpu()\n while gpu_id is None:\n time.sleep(10)\n gpu_id = self.find_gpu()\n threading.Thread(\n target=run_command,\n args=(command, error_file, gpu_id, self.gpu_utilization, self.lock)).start()\n\n def is_done(self):\n return set(self.gpu_utilization.values()) == set([0])\n\n\ndef run_benchmarks(low, high, interpreter, flag_parameters, constant_flags, num_gpus, dest):\n \"\"\"\n :param interpreter: path to relevant python executable\n :param flags: dictionary of flags: [benchmark_values]\n high-low = # of trials run per config\n \"\"\"\n if not os.path.isdir(dest):\n os.mkdir(dest)\n\n runner = CommandRunner(num_gpus)\n for start in tqdm(range(low, high), desc='# trials'):\n seeds = range(10 * start, 10 * (start + 1))\n seed_flag = ('seeds', ','.join([str(seed) for seed in seeds]))\n for flags in tqdm(flag_generator(flag_parameters), desc='parameter combinations'):\n algorithm_flags = get_algorithm_specific_flags(flags)\n agent_flags = get_agent_specific_flags(flags)\n beta_flag = get_beta_flag(flags)\n all_flags = [seed_flag] + flags + constant_flags + algorithm_flags + agent_flags\n runner.run_command_async(interpreter, all_flags, dest)\n runner.run_command_async(interpreter, [beta_flag] + all_flags, dest)\n\n while not runner.is_done():\n time.sleep(30)\n\n # Delete the generated gridworld data, since it is quite large\n # for seed in range(10 * low, 10 * high):\n # sp.call('rm datasets/*-seed-{}-*.npz'.format(seed), shell=True)\n\n\ndef parse_args():\n parser = argparse.ArgumentParser()\n parser.add_argument('--low', required=True)\n parser.add_argument('--high', required=True)\n parser.add_argument('-f', '--folder', required=True)\n parser.add_argument('-g', '--num_gpus', required=True)\n return parser.parse_args()\n\nif __name__ == '__main__':\n args = parse_args()\n low, high, num_gpus = map(int, (args.low, args.high, args.num_gpus))\n run_benchmarks(low, high, INTERPRETER, FLAGS, CONSTANT_FLAGS, num_gpus, args.folder)\n","repo_name":"HumanCompatibleAI/learning_biases","sub_path":"run_benchmarks.py","file_name":"run_benchmarks.py","file_ext":"py","file_size_in_byte":6977,"program_lang":"python","lang":"en","doc_type":"code","stars":21,"dataset":"github-code","pt":"77"} +{"seq_id":"36410712243","text":"# ===== Inicialização =====\n# ----- Importa e inicia pacotes\nimport pygame\nfrom assets import load_assets\nfrom classes import GalinhaInicio\nfrom constantes import TELA_INICIAL, ANIMACAO, FIM, FPS, BRANCO, PRETO\n\n\n#Define a função que carrega a tela inicial\ndef init_screen(window):\n #Define o primeiro estado da tela inicial\n state = TELA_INICIAL\n\n #Carrega os assets\n assets = load_assets()\n\n #Carrega a música da tela inicial\n pygame.mixer.music.load('assets/sons/background_inicial.wav')\n pygame.mixer.music.set_volume(0.4)\n\n #Transforma os assets em variaveis e define os textos que vão ser imprimidos na tela\n background_inicio = assets['background_inicio']\n img_galinha = pygame.transform.scale(assets['galinha_img'], (35, 35))\n font_titulo = assets['font_inicio_titulo']\n font_texto = assets['font_inicio_texto']\n nome_jogo = font_titulo.render('FUGA DA GALINHA', False, BRANCO)\n comando = font_texto.render('APERTE ENTER PARA COMEÇAR', False, PRETO)\n\n #Cria um objeto da galinha inicial e adiciona a um grupo\n all_sprites = pygame.sprite.Group()\n galinha = GalinhaInicio(img_galinha)\n all_sprites.add(galinha)\n\n #Variável para o ajuste de velocidade\n clock = pygame.time.Clock()\n\n #Loop inicial\n pygame.mixer.music.play(loops=-1)\n while state != FIM:\n # Ajusta a velocidade para o número de FPS\n clock.tick(FPS)\n\n # ----- Trata eventos\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n return -1\n if state == TELA_INICIAL:\n if event.type == pygame.KEYDOWN:\n #Se o jogador apertar espaço inicia a animação da galinha\n if event.key == pygame.K_SPACE:\n pygame.mixer.music.stop()\n assets['som_start'].play()\n state = ANIMACAO\n\n\n # ----- Gera saídas\n\n #Preenche a tela com o background\n window.fill(PRETO)\n window.blit(background_inicio, (0, 0))\n if state == TELA_INICIAL:\n window.blit(nome_jogo, (90, 50))\n window.blit(comando, (58, 260))\n\n #Se a animação estiver ocorrendo atualiza a posição da sprite\n if state == ANIMACAO:\n all_sprites.update()\n\n #Se a galinha passou do fim da tela sai da tela inicial\n if galinha.y > 420:\n state = 1\n return state\n\n #Desenha a sprite na tela\n all_sprites.draw(window)\n\n # Mostra o novo frame para o jogador\n pygame.display.update()","repo_name":"lorenabc/pygame-fugadagalinha","sub_path":"carrega_tela_inicial.py","file_name":"carrega_tela_inicial.py","file_ext":"py","file_size_in_byte":2608,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"26182896454","text":"# -*- coding: utf-8 -*-\n\nimport io\nimport os\nimport pprint\nimport logging as log\n\nfrom queue import Queue\n\npp = pprint.PrettyPrinter(indent=2)\n\n\nclass FileReader(object):\n def __init__(self):\n pass\n\n def readline(self, filename, batch_size, included_header: bool, callback, args=None):\n assert callback\n\n with open(filename, 'rb') as fs:\n if included_header:\n header = fs.readline()\n\n bio = io.BytesIO()\n line_cnt = 0\n while True:\n line = fs.readline()\n if not line:\n break\n bio.write(line)\n line_cnt += 1\n if line_cnt % batch_size == 0:\n callback(bio.getvalue(), header, filename, *args)\n bio = io.BytesIO()\n\n if bio.getbuffer().nbytes > 0:\n callback(bio.getvalue(), header, filename, *args)\n\n return line_cnt, fs.tell()\n\n def readlines(self, filename, batch_size, mode, included_header: bool, callback):\n assert callback\n with open(filename, mode) as fs:\n if included_header:\n header = fs.readline()\n\n bio = io.BytesIO()\n line_cnt = 0\n for line in fs.readlines():\n bio.write(line)\n line_cnt += 1\n\n if line_cnt % batch_size == 0:\n callback(bio.getvalue(), filename)\n bio = io.BytesIO()\n\n if bio.getbuffer().nbytes > 0:\n callback(bio.getvalue(), filename)\n\n return line_cnt, fs.tell()\n\n\ndef file_copy(src_filename: str, dst_filename: str, line_count: int):\n with open(src_filename, \"rb\") as fs:\n lines = fs.readlines()\n\n with open(dst_filename, \"wb\") as fs:\n if line_count > 0:\n cnt = 0\n for line in lines:\n fs.write(line)\n cnt += 1\n if cnt >= line_count:\n return\n\n\nif __name__ == '__main__':\n src_filename = \"G:\\\\DWDB_RAW\\\\ODS\\\\PERIOD\\\\201901\\\\test\\\\OPT_BIS_MSG_BIT_BMS_INFO.31D.E7.001.csv\"\n src_dir = os.path.dirname(src_filename)\n src_basename = os.path.basename(src_filename).split('.')[0]\n\n file_copy(src_filename, os.path.join(src_dir, src_basename + '.test.csv'), 3)\n","repo_name":"etri-city-traffic-brain/traffic-data-mgmt","sub_path":"etri_data_collect_provider/online_and_realtime_loader-dj_etri/base/io/file_reader.py","file_name":"file_reader.py","file_ext":"py","file_size_in_byte":2343,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"77"} +{"seq_id":"19148656744","text":"import sys, os\nsys.path.append(os.path.join(os.path.dirname(__file__), '..'))\n\nimport torch\nfrom torch.utils import data\n\nfrom Frontnet.DataProcessor import DataProcessor\nfrom Frontnet.ModelTrainer import ModelTrainer\nfrom Frontnet.Dataset import Dataset\nfrom Frontnet.Frontnet import FrontnetModel\nfrom Frontnet import Utils\n\n\ndef main():\n args = Utils.ParseArgs()\n\n trainset_path = args.load_trainset\n data_params = {'batch_size': args.batch_size, 'shuffle': True, 'num_workers': 6}\n w, h, c = args.model_params['w'], args.model_params['h'], args.model_params['c']\n\n out_folder = \"Results/{}x{}\".format(w, c)\n os.makedirs(out_folder, exist_ok=True)\n\n Utils.Logger(logfile=os.path.join(out_folder, \"FPTraining.log\"))\n\n torch.manual_seed(args.seed)\n\n # Load the training data (which will be split to validation and train)\n [x_train, x_validation, y_train, y_validation] = DataProcessor.ProcessTrainData(trainset_path)\n\n # Create the PyTorch data loaders\n training_set = Dataset(x_train, y_train, train=True)\n training_generator = data.DataLoader(training_set, **data_params)\n validation_set = Dataset(x_validation, y_validation, train=False)\n validation_generator = data.DataLoader(validation_set, **data_params)\n\n # Choose your model\n model = FrontnetModel(**args.model_params)\n\n # Run the training loop\n trainer = ModelTrainer(model)\n trainer.folderPath = out_folder\n trainer.Train(training_generator, validation_generator, args.save_model)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"idsia-robotics/pulp-frontnet","sub_path":"PyTorch/Scripts/FPTraining.py","file_name":"FPTraining.py","file_ext":"py","file_size_in_byte":1546,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"77"} +{"seq_id":"22429405006","text":"import emoji\nimport re\nimport pandas as pd\nimport numpy as np\nfrom tensorflow.keras.preprocessing.text import Tokenizer\nfrom tensorflow.keras.preprocessing.sequence import pad_sequences\n\nemoji_list = emoji.UNICODE_EMOJI.keys()\nFLAGS = re.MULTILINE | re.DOTALL\n\ndef preprocess(text):\n def re_sub(pattern, repl):\n return re.sub(pattern, repl, text, flags=FLAGS)\n \n def allcaps(text):\n text = text.group()\n return text.lower() + \" \"\n\n text = text.replace(\"#\",\" \")\n \n no_emoji = ''\n for char in text:\n if char not in emoji_list:\n no_emoji = no_emoji + char\n else:\n no_emoji = no_emoji + '<' + emoji.demojize(char) + '> '\n text = no_emoji\n \n text = re_sub(r\"@\\w+\",\"\")\n text = re_sub(r\"https?:\\/\\/\\S+\\b|www\\.(\\w+\\.)+\\S*\", \"\")\n text = re_sub(r\"/\",\" / \")\n text = re_sub(r\"([!?.]){2,}\", r\"\\1 \")\n text = re_sub(r\"\\b(\\S*?)(.)\\2{2,}\\b\", r\"\\1\\2 \")\n\n text = re_sub(r\"([A-Z]){2,}\", allcaps)\n \n punctuations = '''!()-[]{};:'\"\\,./?@#$%^&*_~0123456789'''\n \n no_punct = ''\n for char in text:\n if char not in punctuations:\n no_punct = no_punct + char\n text = no_punct\n text = text.replace(\" \",\" \")\n text = text.replace(\"\\n\",\" \")\n return text.lower()\n\ndef preprocessing(filename):#, max_length, vocab_size):\n data = pd.read_csv(filename)\n labels = list(data['Annotation'])\n texts = list(data['Tweets'])\n for i in range(0, len(texts)):\n texts[i] = preprocess(texts[i])\n return texts, labels\n# train_padded, validation_padded = tokenize(texts, .8, vocab_size, max_length)\n# training_label, validation_label = label_tokenize(labels, .8)\n# return train_padded, validation_padded, training_label, validation_label\n","repo_name":"sajal-1999/Hate_Speech_Detection","sub_path":"preprocessing.py","file_name":"preprocessing.py","file_ext":"py","file_size_in_byte":1814,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"86292032564","text":"import pytest\nimport logging\n\nfrom wooqi.src.plugin_fixtures import test_config_parser, test_sequence_name, test_time\nfrom wooqi.src.plugin_fixtures import wooqi_conf\nfrom wooqi.src.pytest_hooks import pytest_collection_modifyitems, pytest_runtest_makereport\nfrom wooqi.src.pytest_hooks import pytest_report_header, pytest_generate_tests\nfrom wooqi.src.pytest_hooks import pytest_sessionfinish, pytest_unconfigure\nfrom wooqi.src import global_var\n\n\nlogger = logging.getLogger(__name__)\n\n\ndef pytest_addoption(parser):\n \"\"\"\n Configuration of pytest parsing\n \"\"\"\n group = parser.getgroup('general')\n group.addoption(\n \"--seq-config\",\n default=None,\n action=\"store\",\n help=\"Test file config\"\n )\n group.addoption(\n \"--sn\",\n action=\"store\",\n default=\"test\",\n help=\"Sample serial number or name\"\n )\n group.addoption(\n \"--wooqi\",\n action=\"store_true\",\n dest='wooqi tag',\n help=\"wooqi tag to check if the test is runned thanks to wooqi\"\n )\n\n\n@pytest.fixture(scope=\"session\")\ndef test_config(request, wooqi):\n \"\"\"\n Test config\n \"\"\"\n return request.config.getoption(\"--seq-config\")\n\n\n@pytest.fixture(scope=\"session\")\ndef serial_number(request, wooqi):\n \"\"\"\n Serial number\n \"\"\"\n return request.config.getoption(\"--sn\")\n\n\n@pytest.fixture(scope=\"session\")\ndef wooqi(request):\n \"\"\"\n Check if the test is runned with wooqi\n \"\"\"\n return request.config.getoption(\"--wooqi\")\n\n\n@pytest.fixture()\ndef test_name(request):\n \"\"\"\n Return current test name\n \"\"\"\n # test_name according in file_config\n test_name = str(request.node).split(\"'\")[1].split('[')[0]\n if test_name not in global_var['config'].file_config:\n call_number = str(request.node).split(\"'\")[1].split('[')[1].split('-')[0].replace(']', '')\n test_name = '{}-{}'.format(test_name, call_number)\n return test_name\n\n\n@pytest.fixture()\ndef test_info(request, test_name):\n \"\"\"\n Return current test info\n \"\"\"\n logger.debug(\"Get {} infos\".format(str(request.node).split(\"'\")[1]))\n uut = None\n uut2 = None\n var = None\n test_dico = {}\n test = str(request.node).split(\"'\")[1].split(\"[\")\n uuts = global_var['config'].uut(test_name)\n uuts2 = global_var['config'].uut2(test_name)\n if len(test) > 1:\n unit = test[1].replace(\"]\", \"\")\n if uuts is not None:\n for each in uuts:\n if each in unit:\n var = each\n try:\n uut = int(var)\n except Exception:\n uut = var\n test_dico[\"uuts\"] = uuts\n if uuts2 is not None:\n for each in uuts2:\n if each in unit:\n var = each\n try:\n uut2 = int(var)\n except Exception:\n uut2 = var\n\n test_dico[\"uuts2\"] = uuts2\n elif uuts is not None:\n test_dico[\"uuts\"] = uuts\n\n params = [\"time_test\", \"limit\", \"comparator\", \"misc_data\", \"nb_cycles\"]\n for param in global_var['config'].file_config[test_name]:\n if param in params:\n info = getattr(global_var['config'], param)(test_name, uut, uut2)\n if info is not None:\n test_dico[param] = info\n elif param != \"uut\" and param != \"uut2\":\n test_dico[param] = global_var['config'].file_config[test_name][param]\n logger.debug(test_dico)\n return test_dico\n","repo_name":"aldebaran/wooqi","sub_path":"wooqi/plugin.py","file_name":"plugin.py","file_ext":"py","file_size_in_byte":3479,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"77"} +{"seq_id":"74744825847","text":"from Day_7_helpers import readInputFile\nfrom Day_7_Classes import Bag, Luggage\n\ndef main():\n handler = open(\"./input2.txt\")\n luggage = Luggage()\n allBags = readInputFile(handler, True) # {bagID:[subbag1, ...]}\n for rootBag, subBags in allBags.items():\n # We have a root bag, and potentially some children.\n # If the root bag doesn't exist, create it and add it to the luggage\n # If it already exists, then we're good\n # UNLESS! It contains our magic bag then we update.\n newRootBag = Bag(rootBag)\n if not luggage.contains(newRootBag.ID):\n luggage.addBag(newRootBag)\n else:\n newRootBag = luggage.getBag(rootBag)\n for subBag,count in subBags: # This is now a touple\n # First, check if we've hit our magic bag\n if subBag == \"shiny gold\":\n luggage.getBag(rootBag).promote() #!\n # Check if it exists\n if luggage.contains(subBag):\n luggage.getBag(subBag).addParent(newRootBag)\n newRootBag.addChild((luggage.getBag(subBag),count))\n else:\n newBag = Bag(subBag)\n newBag.addParent(newRootBag)\n newRootBag.addChild((newBag,count))\n luggage.addBag(newBag)\n # That's all the same from part 1.\n # Now we start at the shiny bag and start counting how many bags it has\n print(luggage.getBag(\"shiny gold\").count()-1) # subtract 1 to not count the bag itself\n\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"bijanagahi/advent-of-code-2020","sub_path":"Day_7/Day_7_2.py","file_name":"Day_7_2.py","file_ext":"py","file_size_in_byte":1543,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"73416443130","text":"menu_options = ('1,2,3,4,5,6'.split(','))\ncontacts = {}\n\n\nclass Contact(object):\n # Constructor\n def __init__(self, first_name, last_name, phone_number):\n if len(first_name) < 2:\n raise ValueError(\"First name must be at least 2 characters long.\")\n\n if len(last_name) < 2:\n raise ValueError(\"Last name must be at least 2 characters long.\")\n\n if len(phone_number) < 5 and phone_number[:].isdigit():\n raise ValueError(\"Phone number must contains only digits and have at least 5 numbers.\")\n\n self._first_name = first_name\n self._last_name = last_name\n self._phone_number = phone_number\n\n # Properties\n def first_name(self):\n return self._first_name\n\n def last_name(self):\n return self._last_name\n\n def phone_number(self):\n return self._phone_number\n\n def print_details(self):\n print(\"{} - {} - {}\".format(self._first_name, self._last_name, self._phone_number))\n\n\noption = 0\nwhile True:\n print(\"-\" * 31)\n option = input(\"Choose option:\" + '\\n'\n \"-------------------------------\" + '\\n'\n '1. Add contact' + '\\n'\n '2. Print Address Book' + '\\n'\n '3. Delete contact' + '\\n'\n '4. Modify contact' + '\\n'\n '5. Search contact' + '\\n'\n '6. Quit' + '\\n'\n \"-------------------------------\" '\\n>> ')\n if option == '6':\n break\n elif option == '1':\n print(\"You have choose option 1.\")\n elif option == '2':\n print(\"\")\n elif option == '3':\n print(\"\")\n elif option == '4':\n print(\"\")\n elif option == '5':\n print(\"\")\n else:\n print(\"Unknown choice!\")\n\n\n\n","repo_name":"lukaszziobro89/python","sub_path":"OOP_python/address_book.py","file_name":"address_book.py","file_ext":"py","file_size_in_byte":1775,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"26709907741","text":"import espressopp\n\ndef writexyz(filename, system, velocities = True, unfolded = False, append = False):\n \"\"\"\n \"\"\"\n if append:\n file = open(filename,'a')\n else:\n file = open(filename,'w')\n numParticles = int(espressopp.analysis.NPart(system).compute())\n box_x = system.bc.boxL[0]\n box_y = system.bc.boxL[1]\n box_z = system.bc.boxL[2]\n st = \"%d\\n%15.10f %15.10f %15.10f\\n\" % (numParticles, box_x, box_y, box_z)\n file.write(st)\n maxParticleID = int(espressopp.analysis.MaxPID(system).compute())\n pid = 0\n while pid <= maxParticleID:\n if system.storage.particleExists(pid):\n particle = system.storage.getParticle(pid)\n if unfolded == False:\n xpos = particle.pos[0]\n ypos = particle.pos[1]\n zpos = particle.pos[2]\n else:\n unfoldedpos = system.bc.getUnfoldedPosition(particle.pos, particle.imageBox)\n xpos = unfoldedpos[0]\n ypos = unfoldedpos[1]\n zpos = unfoldedpos[2]\n xvel = particle.v[0]\n yvel = particle.v[1]\n zvel = particle.v[2]\n type = particle.type\n if velocities:\n st = \"%d %d %15.10f %15.10f %15.10f %15.10f %15.10f %15.10f\\n\"%(pid, type, xpos, ypos, zpos, xvel, yvel, zvel)\n else:\n st = \"%d %d %15.10f %15.10f %15.10f\\n\"%(pid, type, xpos, ypos, zpos)\n file.write(st)\n pid += 1\n else:\n pid += 1\n\n file.close()\n\n\n\"\"\"\ndef fastwritexyz(filename, system, append = False):\n\n if append:\n file = open(filename,'a')\n else:\n file = open(filename,'w')\n\n configurations = espressopp.analysis.Configurations(system)\n configurations.gather()\n configuration = configurations[0]\n\n numParticles = configuration.size\n box_x = system.bc.boxL[0]\n box_y = system.bc.boxL[1]\n box_z = system.bc.boxL[2]\n st = \"%d\\n%15.10f %15.10f %15.10f\\n\" % (numParticles, box_x, box_y, box_z)\n file.write(st)\n\n for pid in configuration:\n xpos = configuration[pid][0]\n ypos = configuration[pid][1]\n zpos = configuration[pid][2]\n st = \"%d %15.10f %15.10f %15.10f\\n\"%(pid, xpos, ypos, zpos)\n file.write(st)\n\n file.close()\n\"\"\"\n\ndef readxyz(filename):\n \"\"\"\n \"\"\"\n file = open(filename)\n line = file.readline()\n num_particles = int(line.split()[0])\n line = file.readline().split()\n if len(line) == 3:\n Lx = float(line[0])\n Ly = float(line[1])\n Lz = float(line[2])\n else:\n Lx = float(line[0])\n Ly = float(line[4])\n Lz = float(line[8])\n\n pid = []\n type = []\n xpos = []\n ypos = []\n zpos = []\n xvel = []\n yvel = []\n zvel = []\n for i in range(num_particles):\n line = file.readline().split()\n if len(line) == 7 or len(line)==4:\n line.insert(1,'0')\n pid.append(int(line[0]))\n type.append(int(line[1]))\n xpos.append(float(line[2]))\n ypos.append(float(line[3]))\n zpos.append(float(line[4]))\n if len(line) > 5:\n xvel.append(float(line[5]))\n yvel.append(float(line[6]))\n zvel.append(float(line[7]))\n else:\n xvel.append(0.0)\n yvel.append(0.0)\n zvel.append(0.0)\n return pid, type, xpos, ypos, zpos, xvel, yvel, zvel, Lx, Ly, Lz\n file.close()\n\n\n\ndef readxyzr(filename):\n \"\"\"\n \"\"\"\n\n file = open(filename)\n line = file.readline()\n num_particles = int(line.split()[0])\n line = file.readline()\n Lx = float(line.split()[0])\n Ly = float(line.split()[1])\n Lz = float(line.split()[2])\n pid = []\n type = []\n xpos = []\n ypos = []\n zpos = []\n xvel = []\n yvel = []\n zvel = []\n radius = []\n for i in range(num_particles):\n line = file.readline().split()\n if len(line) == 7:\n line.insert(1,'0')\n pid.append(int(line[0]))\n type.append(int(line[1]))\n xpos.append(float(line[2]))\n ypos.append(float(line[3]))\n zpos.append(float(line[4]))\n if len(line) == 6:\n radius.append(float(line[5]))\n else:\n radius.append(0.0)\n if len(line) > 5 and len(line) <= 8 and len(line) != 6:\n xvel.append(float(line[5]))\n yvel.append(float(line[6]))\n zvel.append(float(line[7]))\n else:\n xvel.append(0.0)\n yvel.append(0.0)\n zvel.append(0.0)\n if len(line) == 9:\n radius.append(float(line[8]))\n else:\n if len(line) != 6:\n radius.append(0.0)\n return pid, type, xpos, ypos, zpos, xvel, yvel, zvel, Lx, Ly, Lz, radius\n file.close()\n\n# Livia's modified writexyz to fastwritexyz with velocities\n\ndef fastwritexyz(filename, system, velocities = True, unfolded = True, append = False, scale=1.0):\n \"\"\"\n \"\"\"\n\n if append:\n file = open(filename,'a')\n else:\n file = open(filename,'w')\n\n configurations = espressopp.analysis.ConfigurationsExt(system)\n configurations.unfolded = unfolded\n configurations.gather()\n configuration = configurations[0]\n\n if velocities:\n velocities = espressopp.analysis.Velocities(system)\n velocities.gather()\n velocity = velocities[0]\n\n numParticles = int(espressopp.analysis.NPart(system).compute())\n box_x = system.bc.boxL[0]*scale\n box_y = system.bc.boxL[1]*scale\n box_z = system.bc.boxL[2]*scale\n st = \"%d\\n%15.10f %15.10f %15.10f\\n\" % (numParticles, box_x, box_y, box_z)\n file.write(st)\n\n for pid in configuration:\n xpos = configuration[pid][0]*scale\n ypos = configuration[pid][1]*scale\n zpos = configuration[pid][2]*scale\n if velocities:\n xvel = velocity[pid][0]*scale\n yvel = velocity[pid][1]*scale\n zvel = velocity[pid][2]*scale\n st = \"%d %15.10f %15.10f %15.10f %15.10f %15.10f %15.10f\\n\"%(pid, xpos, ypos, zpos, xvel, yvel, zvel)\n else:\n st = \"%d %15.10f %15.10f %15.10f\\n\"%(pid, xpos, ypos, zpos)\n file.write(st)\n #pid += 1\n\n file.close()\n\n# Franziska's modified readxyz to fastreadxyz without velocities\ndef fastreadxyz(filename):\n \"\"\"\n \"\"\"\n\n file = open(filename)\n line = file.readline()\n num_particles = int(line.split()[0])\n line = file.readline().split()\n if len(line) == 3:\n Lx = float(line[0])\n Ly = float(line[1])\n Lz = float(line[2])\n else:\n Lx = float(line[0])\n Ly = float(line[4])\n Lz = float(line[8])\n\n pid = []\n type = []\n xpos = []\n ypos = []\n zpos = []\n for i in range(num_particles):\n line = file.readline().split()\n if len(line) == 7 or len(line)==4:\n line.insert(1,'0')\n pid.append(int(line[0]))\n type.append(int(line[1]))\n xpos.append(float(line[2]))\n ypos.append(float(line[3]))\n zpos.append(float(line[4]))\n\n return pid, type, xpos, ypos, zpos, Lx, Ly, Lz\n file.close()\n\ndef fastwritexyz_standard(filename, system, unfolded = False, append = False):\n \"\"\"\n Fast write standard xyz file. Generally standard xyz file is\n\n >>> number of particles\n >>> comment line\n >>> type x y z\n >>> ......\n >>> ......\n >>> ......\n\n Additional information can be found here:\n Wiki: http://en.wikipedia.org/wiki/XYZ_file_format\n OpenBabel: http://openbabel.org/wiki/XYZ_%28format%29\n\n In this case one can choose folded or unfolded coordinates.\n Currently it writes only particle type = 0 and pid is a line number.\n Later different types should be implemented.\n \"\"\"\n\n if append:\n file = open(filename,'a')\n else:\n file = open(filename,'w')\n\n conf = espressopp.analysis.ConfigurationsExt(system)\n conf.unfolded = unfolded\n conf.gather()\n\n numParticles = int(espressopp.analysis.NPart(system).compute())\n box_x = system.bc.boxL[0]\n box_y = system.bc.boxL[1]\n box_z = system.bc.boxL[2]\n st = \"%d\\n%18.12f %18.12f %18.12f\\n\" % (numParticles, box_x, box_y, box_z)\n file.write(st)\n\n for pid in conf[0]:\n xpos = conf[0][pid][0]\n ypos = conf[0][pid][1]\n zpos = conf[0][pid][2]\n\n st = \"%d %15.10f %15.10f %15.10f\\n\"%(0, xpos, ypos, zpos)\n file.write(st)\n pid += 1\n\n file.close()\n\n\nimport espressopp\nfrom math import sqrt\nfrom espressopp import Real3D\n\ndef xyzfilewrite(filename, system, append=False, atomtypes={0:'Fe',1:'O',2:'C'}, velocities=False, charge=False):\n \"\"\"\n\n This method creates a xyz file with the data from a specific system:\n 1. row: number of the atoms\n 2. row: REMARK generated by ESPResSo++\n following rows: atomsymbol positionX positionY positionZ (velocityX velocityY velocityZ) (charge)\n last row: END\n\n The method needs the following parameters:\n\n * filename\n\n name of the file where the table schould be saved in\n\n * system\n\n ESPResSo system which creates the data e.g.:\n\n >>> system, integrator = espressopp.standard_system.LennardJones(100,(10,10,10))\n\n * append\n\n =False\n the data in the file will be overwritten\n\n =True\n the data will be appended\n\n * atomtypes\n the xyz file needs atom symbols, so it has to translate the numbers\n insert a dictionary with the right translation\n\n * velocities\n\n =False\n does not save the velocity vectors\n\n =True\n creates collumns for the velocity vectors and saves the data\n\n * charge\n\n =False\n does not save the charge\n\n =True\n creates collumns for the charges and saves the data\n \"\"\"\n if append:\n file = open(filename, 'a')\n else:\n file = open(filename,'w')\n maxParticleID = int(espressopp.analysis.MaxPID(system).compute())\n pid = 0\n comment = \"REMARK generated by ESPResSo++\"\n\n st = \"%d\\n%s\\n\"%(maxParticleID, comment)\n file.write(st)\n\n while pid <= maxParticleID:\n if system.storage.particleExists(pid):\n particle = system.storage.getParticle(pid)\n\n xpos = particle.pos[0]\n ypos = particle.pos[1]\n zpos = particle.pos[2]\n type = particle.type\n vx = particle.v[0]\n vy = particle.v[1]\n vz = particle.v[2]\n q = particle.q\n\n if type in atomtypes:\n atom = atomtypes[type]\n else:\n atom = 'XX'\n\n if velocities == True:\n if charge == True:\n st = \"%s %15.10f %15.10f %15.10f %15.10f %15.10f %15.10f %15.10f\\n\"%(atom, xpos, ypos, zpos, vx, vy, vz, q)\n else:\n st = \"%s %15.10f %15.10f %15.10f %15.10f %15.10f %15.10f\\n\"%(atom, xpos, ypos, zpos, vx, vy, vz)\n else:\n if charge == True:\n st = \"%s %15.10f %15.10f %15.10f %15.10f\\n\"%(atom, xpos, ypos, zpos, q)\n else:\n st = \"%s %15.10f %15.10f %15.10f\\n\"%(atom, xpos, ypos, zpos)\n\n file.write(st)\n pid += 1\n else:\n pid += 1\n\n file.write('END\\n')\n file.close()\n","repo_name":"espressopp/espressopp","sub_path":"src/tools/DumpConfigurations.py","file_name":"DumpConfigurations.py","file_ext":"py","file_size_in_byte":11308,"program_lang":"python","lang":"en","doc_type":"code","stars":39,"dataset":"github-code","pt":"77"} +{"seq_id":"16920860547","text":"import numpy as np\nfrom numpy.linalg import norm\nfrom fym.core import BaseEnv, BaseSystem\nfrom fym.utils.rot import hat\n\n\ndef cross(x, y):\n return np.cross(x, y, axis=0)\n\n\nclass Multicopter(BaseEnv):\n g = 9.81\n m = 1.00\n r = 0.24\n J = np.diag([8.1, 8.1, 14.2]) * 1e-3\n Jinv = np.linalg.inv(J)\n b = 5.42e-5\n d = 1.1e-6\n Kf = np.diag([5.567, 5.567, 6.354]) * 1e-4\n Kt = np.diag([5.567, 5.567, 6.354]) * 1e-4\n rotorf_min = 0\n rotorf_max = 20\n e3 = np.vstack((0, 0, 1))\n nrotors = 4\n B = np.array([\n [1, 1, 1, 1],\n [0, -r, 0, r],\n [r, 0, -r, 0],\n [-d / b, d / b, -d / b, d / b],\n ])\n Lambda = np.eye(4)\n\n def __init__(self, pos, vel, R, omega):\n super().__init__()\n self.pos = BaseSystem(pos)\n self.vel = BaseSystem(vel)\n self.R = BaseSystem(R)\n self.omega = BaseSystem(omega)\n\n def deriv(self, pos, vel, R, omega, rotorfs):\n u = self.B @ rotorfs\n fT, M = u[:1], u[1:]\n\n dpos = vel\n dvel = (\n (self.m * self.g * self.e3 + R @ (-fT * self.e3) - self.Kf @ vel)\n / self.m\n )\n dR = R @ hat(omega)\n domega = self.Jinv @ (\n M - cross(omega, self.J @ omega) - norm(omega) * self.Kt @ omega\n )\n return dpos, dvel, dR, domega\n\n def set_dot(self, t, rotorfs_cmd):\n pos, vel, R, omega = self.observe_list()\n rotorfs = self.saturate(t, rotorfs_cmd)\n dots = self.deriv(pos, vel, R, omega, rotorfs)\n self.pos.dot, self.vel.dot, self.R.dot, self.omega.dot = dots\n return dict(rotorfs=rotorfs)\n\n def saturate(self, t, rotorfs_cmd):\n rotorfs = np.clip(rotorfs_cmd, self.rotorf_min, self.rotorf_max)\n return self.Lambda @ rotorfs\n\n\nclass Line(BaseEnv):\n u_min = -10.\n u_max = 10.\n def __init__(self, pos, vel):\n super().__init__()\n self.pos = BaseSystem(pos)\n self.vel = BaseSystem(vel)\n\n def deriv(self, pos, vel, u):\n dvel = u\n dpos = vel\n return dpos, dvel\n\n def set_dot(self, t, action):\n pos, vel = self.observe_list()\n u = self.saturate(action)\n dots = self.deriv(pos, vel, u)\n self.pos.dot, self.vel.dot = dots\n\n def saturate(self, action):\n u = np.clip(action, self.u_min, self.u_max)\n return u\n\n\nclass ThreeDOF(BaseEnv):\n V = 1\n u_min = -3\n u_max = 3\n def __init__(self, pos, yaw):\n super().__init__()\n self.pos = BaseSystem(pos)\n self.yaw = BaseSystem(yaw)\n\n def set_dot(self, t, u):\n yaw = self.yaw.state.squeeze()\n self.pos.dot = np.vstack((\n self.V * np.cos(yaw),\n self.V * np.sin(yaw)\n ))\n self.yaw.dot = u / self.V\n\n\nclass SecondOrder(BaseEnv):\n freq = 100\n damp = 0.707\n def __init__(self, u_min=None, u_max=None):\n super().__init__()\n self.x = BaseSystem(np.vstack([0]))\n self.xdot = BaseSystem(np.vstack([0]))\n self.u_min = u_min\n self.u_max = u_max\n\n def deriv(self, x, xdot, u):\n dx = xdot\n dxdot = -self.freq**2 * x - 2 * self.damp * self.freq * xdot \\\n + self.freq**2 * u\n return dx, dxdot\n\n def set_dot(self, t, u):\n x, xdot = self.observe_list()\n dots = self.deriv(x, xdot, u)\n self.x.dot, self.xdot.dot = dots\n\n def saturate(self, action):\n if self.u_min or self.u_max:\n u = np.clip(action, self.u_min, self.u_max)\n else:\n u = action\n return u\n","repo_name":"JungYT/lyapunov-rl","sub_path":"plant.py","file_name":"plant.py","file_ext":"py","file_size_in_byte":3555,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"43892691735","text":"import json\r\nfrom cloudant.client import Cloudant\r\n\r\ndef update_user_training(username, part): #admin name\r\n try:\r\n\r\n with open(\"IBMcred.json\", 'r') as credentials:\r\n config_data = json.load(credentials)\r\n\r\n account_name = config_data['username']\r\n api_key = config_data['apikey']\r\n\r\n client = Cloudant.iam(account_name, api_key, connect=True)\r\n\r\n my_database = client['corporateapp']\r\n doc = my_database[f'Machine_Op:{username}']\r\n\r\n if 'trained_in' in doc:\r\n doc['trained_in'][f'{part}'] = f'admin_name' #{}\r\n\r\n # Update the document in Cloudant\r\n doc.save()\r\n\r\n print(\"Training approved successfully.\")\r\n else:\r\n print(\"The 'trained_in' dictionary does not exist in the document.\")\r\n\r\n client.disconnect()\r\n except Exception as e:\r\n print(f\"An error occurred: {e}\")\r\n\r\n#create_user_json('jenny','flap')\r\n\r\n#def approval_interface(username, part):\r\n # create a print or? on admin screen","repo_name":"LaurenG123/Corporate-App","sub_path":"update_train_user_json.py","file_name":"update_train_user_json.py","file_ext":"py","file_size_in_byte":1033,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"35359538295","text":"from networkx.classes import neighbors\nimport networkx as nx\nimport plotly.graph_objs as go\nimport dash\nimport dash_html_components as html\nimport dash_core_components as dcc\nfrom dash.dependencies import Input, Output\n\n\nApp = dash.Dash(__name__)\n \n\"\"\"\nRetourne le noeud qui correspond au coordonnées passé en\nparamètre dans la séquence. (On utilisera Pos)\n\"\"\"\ndef searchByPos(x,y,seq) :\n for elt in seq.keys() :\n if seq[elt][0] == x and seq[elt][1] == y :\n return elt\n return None\n\n\"\"\"\nAttribue à chque communauté une couleur selon son état.\n\"\"\"\ndef colored(seq,nb_nodes,hearth) :\n res = []\n for com in range(0,nb_nodes) :\n if com == hearth :\n text = 'rgb(0,255,0)'\n else : \n if seq[com]!=0 :\n text = 'rgb('+str(255*(1-seq[com]))+',0,0)'\n else :\n text = 'rgb(255,255,255)'\n res.append(text)\n return res\n \n\"\"\"\nModifie la variable Burned_nodes qui contient tous les noeuds brulés.\nA partir du foyer mis en paramètre.\nMet à jour le tableau Com_percent_size.\n\"\"\" \ndef update_fire(Graph,hearth) : \n Burned_nodes.add(hearth)\n new_burned_nodes = set()\n for burned_node in Burned_nodes :\n for neighbor in neighbors(Graph,burned_node) :\n if neighbor not in Burned_nodes :\n new_burned_nodes.add(neighbor)\n Burned_nodes.update(new_burned_nodes) \n #Update number of burned nodes by community\n Com_size_fire[Com_vector[hearth]] += 1\n for burned_node in new_burned_nodes :\n if burned_node not in Burning_sequence :\n Com_size_fire[Com_vector[burned_node]] +=1\n #Update percent of burned nodes by community \n for com in MetaGraph :\n Com_percent_fire[com] = Com_size_fire[com] / Com_size[com] \n \n\n\"\"\"\nInitialise le graphique, les variables globales et lance l'app.\n\"\"\" \ndef draw(graph,burning_sequence,com_vector,metagraph=None) :\n print(\"Computing display...\")\n #Global Variables\n global App \n\n global Graph\n Graph = graph.copy()\n \n global Com_vector\n Com_vector = com_vector\n \n #Metagraph setting is optionnal, will be initialize is not given\n global MetaGraph\n if metagraph is not None :\n MetaGraph = metagraph\n else:\n MetaGraph = nx.Graph()\n for edge in graph.edges() :\n com_1 = Com_vector[edge[0]]\n com_2 = Com_vector[edge[1]]\n if com_1 not in MetaGraph.nodes() :\n MetaGraph.add_node(com_1)\n if com_2 not in MetaGraph.nodes() :\n MetaGraph.add_node(com_2)\n if com_1 != com_2 :\n if not MetaGraph.has_edge(com_1,com_2) :\n MetaGraph.add_edge(com_1,com_2)\n \n #Associate Node to a coordinate\n global Pos\n Pos = nx.layout.spring_layout(MetaGraph)\n \n global Burning_sequence\n i = 0\n Burning_sequence = [0]*len(burning_sequence)\n for elt in burning_sequence :\n Burning_sequence[i] = elt\n i = i +1 \n \n global Burned_nodes\n Burned_nodes = set()\n \n global Index\n Index = 0\n \n #Associate community with its size\n global Com_size\n Com_size = [0]*MetaGraph.number_of_nodes() \n for i in range(0,Graph.number_of_nodes()) :\n Com_size[Com_vector[i]] += 1 \n \n #Associate community wiith its number of burned nodes\n global Com_size_fire\n Com_size_fire = [0]*MetaGraph.number_of_nodes() \n \n #Associate community with its percentage of burned nodes\n global Com_percent_fire\n Com_percent_fire = [0]*MetaGraph.number_of_nodes()\n \n #Draw the trace of edges\n edge_trace_x = [0]*(3*len(MetaGraph.edges()))\n edge_trace_y = [0]*(3*len(MetaGraph.edges()))\n index = 0\n for edge in MetaGraph.edges():\n x0 = Pos[edge[0]][0]\n y0 = Pos[edge[0]][1]\n x1 = Pos[edge[1]][0]\n y1 = Pos[edge[1]][1]\n edge_trace_x[index] = x0\n edge_trace_y[index] = y0\n index+=1\n edge_trace_x[index] = x1\n edge_trace_y[index] = y1\n index+=1\n edge_trace_x[index] = None\n edge_trace_y[index] = None\n index+=1 \n edge_trace = go.Scatter(\n x=edge_trace_x,\n y=edge_trace_y, \n line=dict(width=0.5,color='#888'),\n hoverinfo='none', \n mode=\"lines\")\n \n #Create the sequence which contains the color of nodes\n color_seq = [0]*MetaGraph.number_of_nodes()\n color_seq = colored(Com_percent_fire,MetaGraph.number_of_nodes(),Com_vector[Burning_sequence[Index]])\n \n #Draw the trace of node\n node_trace_x = []\n node_trace_y = []\n node_trace_text = [] \n for node in MetaGraph.nodes():\n x = Pos[node][0]\n y = Pos[node][1]\n node_trace_x.append(x)\n node_trace_y.append(y)\n node_trace_text.append(node) \n node_trace = go.Scatter(\n x=node_trace_x,\n y=node_trace_y,\n text=node_trace_text,\n mode='markers',\n hoverinfo='text',\n marker=dict(\n color=color_seq,\n size=30, \n line=dict(width=2)))\n \n #Draw the figure which contains edge_trace and node_trace \n global Fig\n Fig = go.Figure(data=[edge_trace, node_trace],\n layout=go.Layout(\n showlegend=False,\n xaxis=dict(showgrid=False, zeroline=False, showticklabels=False),\n yaxis=dict(showgrid=False, zeroline=False, showticklabels=False)))\n Fig.update_layout(plot_bgcolor='rgb(72,70,92)',paper_bgcolor = 'rgb(72,70,92)')\n \n #Create the html page which will be use by Dash\n App.layout = html.Div([\n html.H1(\"MétaGraphe de \"+str(MetaGraph.number_of_nodes())+\" communautés\"),\n html.Div(dcc.Graph(id='Graph',figure=Fig)),\n html.Div(html.P('En vert la communauté du prochain foyer * ',className='anotation')),\n html.Div(id='resultat'),\n html.Button('Brûlons!',id='button'),\n html.Div(id='output') \n ]) \n App.run_server() \n\n\"\"\"\nAffiche les données de la communauté sur laquelle l'on passe sa souris.\n\"\"\"\n@App.callback(\n Output('output','children'),\n [Input('Graph','hoverData')])\ndef display_selected_data(hoverData):\n if hoverData is not None :\n x = hoverData['points'][0]['x']\n y = hoverData['points'][0]['y']\n com = searchByPos(x, y, Pos)\n text_com = 'Communauté : '+ str(com)\n text_size = ' Taille : ' + str(Com_size[com])\n text_size_fire = ' Taille du Feu : ' + str(Com_size_fire[com])\n text_percent_fire = ' Pourcentage de Brûlure : ' + str(round(Com_percent_fire[com]*100,2)) + '%'\n return [html.P(text_com),html.P(text_size),html.P(text_size_fire),html.P(text_percent_fire)]\n else :\n return [html.P(\"\")] \n\"\"\" \nRépends le feu à travers le graphe et met à jour la couleur des noeuds. \n\"\"\"\n@App.callback( \n Output('Graph','figure'),\n [Input('button','n_clicks')])\ndef update_graph(n_clicks):\n #specify with use global Index\n global Index \n if Index != len(Burning_sequence):\n #At the load of the page n_clicks = None\n if n_clicks is not None :\n update_fire(Graph,Burning_sequence[Index])\n Index += 1\n if Index != len(Burning_sequence) : \n color_seq = colored(Com_percent_fire,MetaGraph.number_of_nodes(),Com_vector[Burning_sequence[Index]])\n Fig.update_traces(\n marker=dict(\n color=color_seq,\n size=30, \n line=dict(width=2)))\n #Trigger on last Turn, Erase the hearth point color\n if Index == len(Burning_sequence) :\n color_seq = colored(Com_percent_fire,MetaGraph.number_of_nodes(),-1)\n Fig.update_traces(\n marker=dict(\n color=color_seq,\n size=30, \n line=dict(width=2))) \n return Fig\n \n\"\"\" \nAffiche le Burning number quand le feu s'est entièrement répandu\n\"\"\"\n@App.callback(\n Output('resultat','children'),\n [Input('button','n_clicks')])\ndef display_results(n_clicks) :\n if Index == len(Burning_sequence) :\n burning_number = 'Burning Number : ' + str(len(Burning_sequence))\n return [html.P(burning_number,className='burning')]\n else :\n return [html.P(\"\")]","repo_name":"BerruetLilian/Burning-Graph","sub_path":"src/visual_meta.py","file_name":"visual_meta.py","file_ext":"py","file_size_in_byte":8488,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"8995039173","text":"#importacion de librerias a utilizar\nimport streamlit as st\nfrom streamlit_option_menu import option_menu\n#importacion de paginas externas\nimport paginas.home,paginas.cosmicos,paginas.neutrones,paginas.cherenkov ,paginas.kamiokande,paginas.machine,paginas.desarrollo# noqa: E401\n\nst.set_page_config(\n page_title=\"Machine Learning para deteccion de Neutrino\"\n)\n\nclass MultiApp:\n def __init__(self):\n self.apps=[]\n def add_app(self,title,function):\n self.apps.append({\n \"title\":title,\n \"function\":function\n })\n def run():\n with st.sidebar:\n app=option_menu(\n menu_title=\"Contenido\",\n options=[\"Inicio\",\"Radiacion Cosmica\",\"Neutrones\",\"Detector Cherenkov\",\"Super-Kamiokande\",\"Machine Learning\",\"Desarrollo\"], # noqa: E501\n default_index=0,\n styles={\n \"container\":{\"padding\":\"5!important\",\"background-color\":'black'},\n \"icon\":{\n \"color\":\"white\",\"font-size\":\"23px\"\n },\n \"nav-link\":{\n \"color\":\"white\",\"font-size\":\"20px\",\"text-aling\":\"left\",\"margin\":\"0px\",\"--hover-color\":\"blue\"\n },\n \"nav-link-selected\":{\"background-color\":\"#2F6CA6\"},\n }\n )\n if app==\"Inicio\":\n paginas.home.app()\n if app==\"Radiacion Cosmica\":\n paginas.cosmicos.app()\n if app==\"Neutrones\":\n paginas.neutrones.app()\n if app==\"Detector Cherenkov\":\n paginas.cherenkov.app()\n if app==\"Super-Kamiokande\":\n paginas.kamiokande.app()\n if app==\"Machine Learning\":\n paginas.machine.app()\n if app==\"Desarrollo\":\n paginas.desarrollo.app()\n run()","repo_name":"urieljc/ExpoFisicaNuclear","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1842,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"20893482212","text":"from utils import parser\nfrom time import struct_time\nfrom datetime import datetime\nfrom argparse import ArgumentParser\n\nimport pytest\n\n\n@pytest.mark.parametrize(\"time_input,expected_time\", [\n ('00:00', (0, 0)),\n ('01:10', (1, 10)),\n ('20:17', (20, 17)),\n ('23:59', (23, 59)),\n])\ndef test_to_time_with_valid_input(time_input, expected_time):\n expected_hour, expected_minute = expected_time\n time_output = parser.to_time(time_input)\n assert isinstance(time_output, struct_time)\n assert time_output.tm_hour == expected_hour\n assert time_output.tm_min == expected_minute\n\n\n@pytest.mark.parametrize(\"time_input\", [\n '00:70',\n '25:10',\n '20;17',\n 'AAAA',\n])\ndef test_to_time_with_invalid_input(time_input):\n with pytest.raises(ValueError, match=f\"{time_input} .*\") as excinfo:\n parser.to_time(time_input)\n assert 'InputValidationError' == excinfo.typename\n\n\n@pytest.mark.parametrize(\"date_input,expected_date\", [\n ('2020-08-17', (2020, 8, 17)),\n ('1999-01-30', (1999, 1, 30)),\n ('2050-06-28', (2050, 6, 28)),\n ('2001-09-12', (2001, 9, 12)),\n])\ndef test_to_date_with_valid_input(date_input, expected_date):\n expected_year, expected_month, expected_day = expected_date\n date_output = parser.to_date(date_input)\n assert isinstance(date_output, datetime)\n assert date_output.year == expected_year\n assert date_output.month == expected_month\n assert date_output.day == expected_day\n\n\n@pytest.mark.parametrize(\"date_input\", [\n '00:70',\n '4040-44-20',\n '2020-02-31',\n '2000_09_08',\n])\ndef test_to_date_with_invalid_input(date_input):\n with pytest.raises(ValueError, match=f\"{date_input} .*\") as excinfo:\n parser.to_date(date_input)\n assert 'InputValidationError' == excinfo.typename\n\n\ndef test_configure_argument_parser():\n cmd_parser = parser.configure_argument_parser()\n assert isinstance(cmd_parser, ArgumentParser)\n","repo_name":"Wason1797/RoadRestrictionChecker","sub_path":"test/test_parser.py","file_name":"test_parser.py","file_ext":"py","file_size_in_byte":1920,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"27870024111","text":"import tensorflow as tf\nimport os\nfrom pre_processing import get_data\n\nMAX_DOCUMENT_LENGTH = 300 # length of word vector consisting of word IDs\nSAVE_DIR = '../data'\n\ndata_dir = os.path.join(\"..\", \"data\")\n\nfor i in range(1, 4):\n x_train, x_test, y_train, y_test = get_data(data_dir, i, preprocess=True)\n vocab_processor = tf.contrib.learn.preprocessing.VocabularyProcessor(\n MAX_DOCUMENT_LENGTH)\n vocab_processor.fit(x_train)\n vocab_processor.save(os.path.join(SAVE_DIR, \"vocab_question_{}\".format(i)))\n","repo_name":"gongjoonamu/machine-learning","sub_path":"projects/ae_onboarding_classifier/utils/write_vocab_files.py","file_name":"write_vocab_files.py","file_ext":"py","file_size_in_byte":522,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"11199892363","text":"import math\nimport streamlit as st\nfrom os import remove\nfrom skimage.io import imread\nfrom sklearn.metrics import mean_squared_error\nfrom copy import copy\nfrom decimal import Decimal\nimport numpy as np\n\nfrom main import Transform\n\n\ndef show(fodw, fimage):\n st.image(fodw)\n al = st.slider(\"Zwiększenie czerni\", 0.0, 1.0, 0.38, 0.01)\n bt = st.slider(\"Zwiększenie bieli\", 0.0, 1.0 - al, 0.0, 0.01)\n fnodw = tomograf.norm(copy(fodw), al, bt)\n # print(nodw)\n st.image(fnodw)\n # rmse = math.sqrt(mean_squared_error(fimage, fnodw))\n rmse = math.sqrt(((tomograf.cut_all(fimage) - fnodw)**2).mean())\n # for i, v in enumerate(fimage):\n # for j, k in enumerate(v):\n # if k > 1 or k < -1:\n # print(k)\n st.write(\"RMSE: \" + str(rmse*100) + \"E-2\")\n\n\nst.write(\"\"\"\n# Tomograf\n\"\"\")\n\nimage_bytes = st.file_uploader(\"Choose a image\", type=\"jpg\")\n\nif image_bytes is not None:\n tomograf = Transform()\n m = 1.0\n # tomograf.free_all()\n with open(\"tem.jpg\", 'wb') as f:\n f.write(image_bytes.read())\n image = imread(\"tem.jpg\", True)\n remove(\"tem.jpg\")\n st.image(image)\n\n decoders = st.slider(\"Liczba dekoderów\", 50, 1000, 800, 10)\n rang = st.slider(\"Rozpiętość (w stopniach)\", 100, 330, 250, 1)\n step = st.slider(\"Krok (w stopniach)\", 0.1, 5.0, 0.5, 0.1)\n\n if not st.checkbox(\"Przetwarzanie iteracyjne\"):\n\n if st.checkbox(\"Włącz tomograf\"):\n sinogram = tomograf.call_transform(image, decoders, rang * math.pi / 180, step * math.pi / 180)\n tomograf.free_bitmap()\n st.image(sinogram)\n fil = st.checkbox(\"Filtruj sinogram\")\n if st.checkbox(\"Odwrotna transformacja\"):\n odw = tomograf.reverse_transform(decoders, rang * math.pi / 180, step * math.pi / 180,\n image.shape, fil)\n show(odw, image)\n\n else:\n beg_s, end_s = st.slider(\"Obrót od, do\", 0, 360, (90, 180), 1)\n print(beg_s, end_s)\n if st.checkbox(\"Włącz tomograf\"):\n sinogram = tomograf.call_transform(image, decoders, rang * math.pi / 180, step * math.pi / 180)\n tomograf.free_bitmap()\n cutted = sinogram[int(beg_s / 360 * sinogram.shape[0]):int(end_s / 360 * sinogram.shape[0]), ]\n st.image(cutted)\n fil = st.checkbox(\"Filtruj sinogram\")\n if st.checkbox(\"Odwrotna transformacja\"):\n odw = tomograf.reverse_transform_it(decoders, rang * math.pi / 180, step * math.pi / 180,\n image.shape, fil, beg_s, end_s)\n show(odw, image)\n","repo_name":"er713/IwM-Tomograf","sub_path":"Python/gui.py","file_name":"gui.py","file_ext":"py","file_size_in_byte":2676,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"12735126471","text":"import math\nimport unittest\nimport random\n\n# functions for wallis pi construction\n\ndef wallis(nIter):\n\tprod=1;\n\tfor i in range(1,nIter+1):\n\t\texp=(4*i*i)/((4*i*i)-1)\n\t\tprod*=exp;\n\n\tprod*=2\n\treturn prod\n\n\n# function to calculate distance from origin\n# check if its <=(0.5)(inside circle)\ndef distanceChk(x,y):\n\n\td=((x-0.5)**2+(y-0.5)**2)**0.5\n\tif(d<=0.5):\n\t\treturn True\n\telse:\n\t\treturn False\n\n# function to calculate pi using monte carlo\ndef monte_carlo(nThrows):\n\n\tcountCircle=0\n\tcountSquare=0\n\tfor _ in range(nThrows):\n\t\tx=random.random()\n\t\ty=random.random()\n\n\t\tif(distanceChk(x,y)):\n\t\t\tcountCircle+=1\n\t\t\n\t\tcountSquare+=1\n\n\tpiVal=4*(countCircle/countSquare)\n\treturn piVal\n\n\nclass TestWallis(unittest.TestCase):\n def test_low_iters(self):\n for i in range(0, 5):\n pi = wallis(i)\n self.assertTrue(abs(pi - math.pi) > 0.15, msg=f\"Estimate with just {i} iterations is {pi} which is too accurate.\\n\")\n \n def test_high_iters(self):\n for i in range(500, 600):\n pi = wallis(i)\n self.assertTrue(abs(pi - math.pi) < 0.01, msg=f\"Estimate with even {i} iterations is {pi} which is not accurate enough.\\n\")\n\n\nclass TestMC(unittest.TestCase):\n def test_randomness(self):\n pi0 = monte_carlo(15000)\n pi1 = monte_carlo(15000)\n \n self.assertNotEqual(pi0, pi1, \"Two different estimates for PI are exactly the same. This is almost impossible.\")\n\n self.assertFalse(abs(pi0 - pi1) > 0.05, \"Two different estimates of PI are too different. This should not happen\")\n\n def test_accuracy(self):\n for i in range(500, 600):\n pi = monte_carlo(i)\n self.assertTrue(abs(pi - math.pi) < 0.4, msg=f\"Estimate with even {i} iterations is {pi} which is not accurate enough.\\n\")\n \n \nif __name__ == \"__main__\":\n unittest.main()\n\n\n","repo_name":"Genskill2/02-bootcamp-estimate-pi-AI-Factor-y","sub_path":"estimate.py","file_name":"estimate.py","file_ext":"py","file_size_in_byte":1848,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"31825279104","text":"# vim: set fileencoding=utf-8\nimport re\nfrom typing import Any, Dict\nfrom flask import Blueprint, request, Response, url_for, abort\n\nfrom bemani.common import GameConstants\nfrom bemani.data import UserID\nfrom bemani.frontend.app import loginrequired, jsonify, render_react\nfrom bemani.frontend.bishi.bishi import BishiBashiFrontend\nfrom bemani.frontend.templates import templates_location\nfrom bemani.frontend.static import static_location\nfrom bemani.frontend.types import g\n\n\nbishi_pages = Blueprint(\n \"bishi_pages\",\n __name__,\n url_prefix=f\"/{GameConstants.BISHI_BASHI.value}\",\n template_folder=templates_location,\n static_folder=static_location,\n)\n\n\n@bishi_pages.route(\"/players\")\n@loginrequired\ndef viewplayers() -> Response:\n frontend = BishiBashiFrontend(g.data, g.config, g.cache)\n return render_react(\n \"All BishiBashi Players\",\n \"bishi/allplayers.react.js\",\n {\"players\": frontend.get_all_players()},\n {\n \"refresh\": url_for(\"bishi_pages.listplayers\"),\n \"player\": url_for(\"bishi_pages.viewplayer\", userid=-1),\n },\n )\n\n\n@bishi_pages.route(\"/players/list\")\n@jsonify\n@loginrequired\ndef listplayers() -> Dict[str, Any]:\n frontend = BishiBashiFrontend(g.data, g.config, g.cache)\n return {\n \"players\": frontend.get_all_players(),\n }\n\n\n@bishi_pages.route(\"/players/\")\n@loginrequired\ndef viewplayer(userid: UserID) -> Response:\n frontend = BishiBashiFrontend(g.data, g.config, g.cache)\n djinfo = frontend.get_all_player_info([userid])[userid]\n if not djinfo:\n abort(404)\n latest_version = sorted(djinfo.keys(), reverse=True)[0]\n\n return render_react(\n f'{djinfo[latest_version][\"name\"]}\\'s BishiBashi Profile',\n \"bishi/player.react.js\",\n {\n \"playerid\": userid,\n \"own_profile\": userid == g.userID,\n \"player\": djinfo,\n \"versions\": {\n version: name for (game, version, name) in frontend.all_games()\n },\n },\n {\n \"refresh\": url_for(\"bishi_pages.listplayer\", userid=userid),\n },\n )\n\n\n@bishi_pages.route(\"/players//list\")\n@jsonify\n@loginrequired\ndef listplayer(userid: UserID) -> Dict[str, Any]:\n frontend = BishiBashiFrontend(g.data, g.config, g.cache)\n djinfo = frontend.get_all_player_info([userid])[userid]\n\n return {\n \"player\": djinfo,\n }\n\n\n@bishi_pages.route(\"/options\")\n@loginrequired\ndef viewsettings() -> Response:\n frontend = BishiBashiFrontend(g.data, g.config, g.cache)\n userid = g.userID\n djinfo = frontend.get_all_player_info([userid])[userid]\n if not djinfo:\n abort(404)\n\n return render_react(\n \"BishiBashi Game Settings\",\n \"bishi/settings.react.js\",\n {\n \"player\": djinfo,\n \"versions\": {\n version: name for (game, version, name) in frontend.all_games()\n },\n },\n {\n \"updatename\": url_for(\"bishi_pages.updatename\"),\n },\n )\n\n\n@bishi_pages.route(\"/options/name/update\", methods=[\"POST\"])\n@jsonify\n@loginrequired\ndef updatename() -> Dict[str, Any]:\n frontend = BishiBashiFrontend(g.data, g.config, g.cache)\n version = int(request.get_json()[\"version\"])\n name = request.get_json()[\"name\"]\n user = g.data.local.user.get_user(g.userID)\n if user is None:\n raise Exception(\"Unable to find user to update!\")\n\n # Grab profile and update dj name\n profile = g.data.local.user.get_profile(GameConstants.BISHI_BASHI, version, user.id)\n if profile is None:\n raise Exception(\"Unable to find profile to update!\")\n if len(name) == 0 or len(name) > 6:\n raise Exception(\"Invalid profile name!\")\n\n # Convert lowercase to uppercase. We allow lowercase widetext in\n # the JS frontend to allow for Windows IME input of hiragana/katakana.\n def conv(char: str) -> str:\n i = ord(char)\n if i >= 0xFF41 and i <= 0xFF5A:\n return chr(i - (0xFF41 - 0xFF21))\n else:\n return char\n\n name = \"\".join([conv(a) for a in name])\n\n if (\n re.match(\n \"^[\"\n + \"\\uFF20-\\uFF3A\"\n + \"\\uFF10-\\uFF19\" # widetext A-Z, @\n + \"\\u3041-\\u308D\\u308F\\u3092\\u3093\" # widetext 0-9\n + \"\\u30A1-\\u30ED\\u30EF\\u30F2\\u30F3\\u30FC\" # hiragana\n + \"\\u3000\" # katakana\n + \"\\u301C\" # widetext blank space\n + \"\\u30FB\" # widetext ~\n + \"\\u30FC\" # widetext middot\n + \"\\u2212\" # widetext long dash\n + \"\\u2605\" # widetext short dash\n + \"\\uFF01\" # widetext heavy star\n + \"\\uFF03\" # widetext !\n + \"\\uFF04\" # widetext #\n + \"\\uFF05\" # widetext $\n + \"\\uFF06\" # widetext %\n + \"\\uFF08\" # widetext &\n + \"\\uFF09\" # widetext (\n + \"\\uFF0A\" # widetext )\n + \"\\uFF0B\" # widetext *\n + \"\\uFF0F\" # widetext +\n + \"\\uFF1C\" # widetext /\n + \"\\uFF1D\" # widetext <\n + \"\\uFF1E\" # widetext =\n + \"\\uFF1F\" # widetext >\n + \"\\uFFE5\" # widetext ?\n + \"]*$\", # widetext Yen symbol\n name,\n )\n is None\n ):\n raise Exception(\"Invalid profile name!\")\n profile = frontend.update_name(profile, name)\n g.data.local.user.put_profile(GameConstants.BISHI_BASHI, version, user.id, profile)\n\n # Return that we updated\n return {\n \"version\": version,\n \"name\": frontend.sanitize_name(name),\n }\n","repo_name":"DragonMinded/bemaniutils","sub_path":"bemani/frontend/bishi/endpoints.py","file_name":"endpoints.py","file_ext":"py","file_size_in_byte":5586,"program_lang":"python","lang":"en","doc_type":"code","stars":185,"dataset":"github-code","pt":"77"} +{"seq_id":"29343385579","text":"#Cálculo del dígito verificador de un rut\ndef contar_caracteres(cadena):\n contador = 0\n while cadena[contador:]:\n contador += 1\n return contador\nrut = input(\"Ingrese su rut sin puntos y sin el digito verificador:\")\ndigitos = (len(rut))\nif digitos == 8:\n num8 = eval(rut[0])\n num7 = eval(rut[1])\n num6 = eval(rut[2])\n num5 = eval(rut[3])\n num4 = eval(rut[4])\n num3 = eval(rut[5])\n num2 = eval(rut[6])\n num1 = eval(rut[7])\n suma = (num1*2)+(num2*3)+(num3*4)+(num4*5)+(num5*6)+(num6*7)+(num7*2)+(num8*3)\n resto = (suma % 11)\n resta = (11 - resto)\n if (resta == 11):\n print(\"dv=0\")\n if (resta == 10):\n print(\"dv=k\")\n if (resta < 10):\n print(\"dv=\",resta)\nif digitos == 7:\n num7 = eval(rut[0])\n num6 = eval(rut[1])\n num5 = eval(rut[2])\n num4 = eval(rut[3])\n num3 = eval(rut[4])\n num2 = eval(rut[5])\n num1 = eval(rut[6])\n suma = (num1*2)+(num2*3)+(num3*4)+(num4*5)+(num5*6)+(num6*7)+(num7*2)\n resto = (suma % 11)\n resta = (11 - resto)\n if (resta == 11):\n print(\"dv=0\")\n if (resta == 10):\n print(\"dv=k\")\n if (resta < 10):\n print(\"dv=\",resta)","repo_name":"pabloschwarzenberg/grader","sub_path":"hito1_ej5/hito1_ej5_658ae59301d460812b26b26189c3c65c.py","file_name":"hito1_ej5_658ae59301d460812b26b26189c3c65c.py","file_ext":"py","file_size_in_byte":1174,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"40361827329","text":"# -*- coding: utf-8 -*-\nfrom sys import exit\nfrom PyQt5.QtWidgets import *\nfrom PyQt5.QtGui import *\nfrom PyQt5.QtCore import *\nfrom capture_core import *\n# 使用matplotlib绘制柱状图\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport json\nfrom monitor_system import start_monitor\nfrom forged_packet import startForged\nfrom multiprocessing import Process\n\n\nclass Ui_MainWindow(QMainWindow):\n\n core = None\n timer = None\n Monitor = None\n Forged = None\n\n def setupUi(self):\n self.setWindowTitle(\"WireWhale\")\n self.resize(950, 580)\n\n #设置程序图标\n icon = QIcon()\n icon.addPixmap(QPixmap(\"img/shark.jpg\"), QIcon.Normal, QIcon.Off)\n self.setWindowIcon(icon)\n self.setIconSize(QSize(20, 20))\n #中间布局,设为透明\n self.centralWidget = QWidget(self)\n self.centralWidget.setStyleSheet(\"background:transparent;\")\n\n #栅栏布局,使得窗口自适应\n self.gridLayout = QGridLayout(self.centralWidget)\n self.gridLayout.setContentsMargins(0, 0, 0, 0)\n self.gridLayout.setSpacing(6)\n\n #顶部控件布局\n self.horizontalLayout = QHBoxLayout()\n self.horizontalLayout.setContentsMargins(10, 2, 10, 1)\n self.horizontalLayout.setSpacing(20)\n\n #三个显示区布局\n self.verticalLayout = QVBoxLayout()\n self.verticalLayout.setContentsMargins(10, 0, 3, 10)\n self.verticalLayout.setSpacing(6)\n\n # 初始主窗口字体\n font = QFont()\n with open('data.json', 'r') as file_obj:\n '''读取json文件'''\n old_font = json.load(file_obj) # 返回列表数据,也支持字典\n if old_font[\"font\"]:\n font.setFamily(old_font[\"font\"])\n font.setPointSize(int(old_font[\"size\"]))\n else:\n if platform == 'Windows':\n font.setFamily(\"Lucida Sans Typewriter\")\n old_font[\"font\"] = \"Lucida Sans Typewriter\"\n if platform == \"Linux\":\n font.setFamily(\"Noto Mono\")\n old_font[\"font\"] = \"Noto Mono\"\n font.setPointSize(11)\n with open('data.json', 'w') as file_obj:\n '''写入json文件'''\n json.dump(old_font, file_obj)\n\n #数据包显示框\n self.info_tree = QTreeWidget(self.centralWidget)\n self.info_tree.setFrameStyle(QFrame.Box | QFrame.Plain)\n self.info_tree.setAutoScroll(True)\n self.info_tree.setRootIsDecorated(False)\n self.info_tree.setFont(font)\n self.info_tree.setColumnCount(7) #设置表格为7列\n #固定行高,取消每次刷新所有行,避免更新数据时不流畅\n self.info_tree.setUniformRowHeights(True)\n #设置表头\n self.info_tree.headerItem().setText(0, \"No.\")\n self.info_tree.headerItem().setText(1, \"Time\")\n self.info_tree.headerItem().setText(2, \"Source\")\n self.info_tree.headerItem().setText(3, \"Destination\")\n self.info_tree.headerItem().setText(4, \"Protocol\")\n self.info_tree.headerItem().setText(5, \"Length\")\n self.info_tree.headerItem().setText(6, \"Info\")\n self.info_tree.setStyleSheet(\"background:transparent;\")\n self.info_tree.setSortingEnabled(True)\n self.info_tree.sortItems(0, Qt.AscendingOrder)\n self.info_tree.setColumnWidth(0, 75)\n self.info_tree.setColumnWidth(1, 130)\n self.info_tree.setColumnWidth(2, 150)\n self.info_tree.setColumnWidth(3, 150)\n self.info_tree.setColumnWidth(4, 85)\n self.info_tree.setColumnWidth(5, 60)\n for i in range(7):\n self.info_tree.headerItem().setBackground(i,\n QBrush(QColor(Qt.white)))\n self.info_tree.setSelectionBehavior(\n QTreeWidget.SelectRows) #设置选中时为整行选中\n self.info_tree.setSelectionMode(QTreeWidget.SingleSelection) #设置只能选中一行\n \"\"\"显示排序图标\"\"\"\n self.info_tree.header().setSortIndicatorShown(True)\n self.info_tree.clicked.connect(self.on_tableview_clicked)\n\n #数据包详细内容显示框\n self.treeWidget = QTreeWidget(self.centralWidget)\n self.treeWidget.setAutoScroll(True)\n self.treeWidget.setTextElideMode(Qt.ElideMiddle)\n self.treeWidget.header().setStretchLastSection(True)\n self.treeWidget.setStyleSheet(\"background:transparent; color:white;\")\n self.treeWidget.header().hide()\n self.treeWidget.setFont(font)\n # 设为只有一列\n self.treeWidget.setColumnCount(1)\n self.treeWidget.setFrameStyle(QFrame.Box | QFrame.Plain)\n\n #hex显示区域\n self.hexBrowser = QTextBrowser(self.centralWidget)\n self.hexBrowser.setText(\"\")\n self.hexBrowser.setFont(font)\n self.hexBrowser.setStyleSheet(\"background:transparent; color:white;\")\n self.hexBrowser.setFrameStyle(QFrame.Box | QFrame.Plain)\n\n # 允许用户通过拖动三个显示框的边界来控制子组件的大小\n self.splitter = QSplitter(Qt.Vertical)\n self.splitter.addWidget(self.info_tree)\n self.splitter.addWidget(self.treeWidget)\n self.splitter.addWidget(self.hexBrowser)\n self.verticalLayout.addWidget(self.splitter)\n\n self.gridLayout.addLayout(self.verticalLayout, 1, 0, 1, 1)\n\n #过滤器输入框\n self.Filter = QLineEdit(self.centralWidget)\n self.Filter.setPlaceholderText(\"Apply a capture filter … \")\n self.Filter.setStyleSheet(\"background:white\")\n self.Filter.setFont(font)\n self.horizontalLayout.addWidget(self.Filter)\n\n #过滤器按钮\n self.FilterButton = QPushButton(self.centralWidget)\n self.FilterButton.setText(\"开始\")\n icon1 = QIcon()\n icon1.addPixmap(QPixmap(\"img/go.png\"), QIcon.Normal, QIcon.Off)\n self.FilterButton.setIcon(icon1)\n self.FilterButton.setIconSize(QSize(20, 20))\n self.FilterButton.setStyleSheet(\"background:white\")\n self.FilterButton.clicked.connect(self.on_start_action_clicked)\n self.horizontalLayout.addWidget(self.FilterButton)\n \"\"\"\n 网卡选择框\n \"\"\"\n self.choose_nicbox = QComboBox(self.centralWidget)\n self.choose_nicbox.setFont(font)\n self.choose_nicbox.setStyleSheet(\"background:white; color:black;\")\n self.horizontalLayout.addWidget(self.choose_nicbox)\n\n self.horizontalLayout.setStretch(0, 8)\n self.horizontalLayout.setStretch(1, 1)\n self.horizontalLayout.setStretch(2, 4)\n self.gridLayout.addLayout(self.horizontalLayout, 0, 0, 1, 1)\n \"\"\"初始网卡复选框\"\"\"\n row_num = len(keys)\n self.choose_nicbox.addItem(\"All\")\n for i in range(row_num):\n self.choose_nicbox.addItem(keys[i])\n\n self.setCentralWidget(self.centralWidget)\n \"\"\"\n 顶部菜单栏\n \"\"\"\n self.menuBar = QMenuBar(self)\n self.menuBar.setGeometry(QRect(0, 0, 953, 23))\n self.menuBar.setAccessibleName(\"\")\n self.menuBar.setDefaultUp(True)\n\n self.menu_F = QMenu(self.menuBar)\n self.menu_F.setTitle(\"文件(F)\")\n\n self.edit_menu = QMenu(self.menuBar)\n self.edit_menu.setTitle(\"编辑(E)\")\n\n self.capture_menu = QMenu(self.menuBar)\n self.capture_menu.setTitle(\"捕获(C)\")\n\n self.menu_H = QMenu(self.menuBar)\n self.menu_H.setTitle(\"帮助(H)\")\n\n self.menu_Analysis = QMenu(self.menuBar)\n self.menu_Analysis.setTitle(\"分析(A)\")\n\n self.menu_Statistic = QMenu(self.menuBar)\n self.menu_Statistic.setTitle(\"统计(S)\")\n self.setMenuBar(self.menuBar)\n\n #顶部工具栏\n self.mainToolBar = QToolBar(self)\n self.addToolBar(Qt.TopToolBarArea, self.mainToolBar)\n self.statusBar = QStatusBar(self)\n self.mainToolBar.setStyleSheet(\"background: #EDEDED;\")\n self.mainToolBar.setMaximumHeight(25)\n self.setStatusBar(self.statusBar)\n\n #字体设置键\n font_set = QAction(self)\n font_set.setText(\"主窗口字体\")\n font_set.triggered.connect(self.on_font_set_clicked)\n\n #背景图片设置\n change_border = QAction(self)\n change_border.setText(\"背景图片\")\n change_border.triggered.connect(self.on_change_border_clicked)\n\n #开始键\n self.start_action = QAction(self)\n icon2 = QIcon()\n icon2.addPixmap(QPixmap(\"img/start.png\"), QIcon.Normal, QIcon.Off)\n self.start_action.setIcon(icon2)\n self.start_action.setText(\"开始\")\n self.start_action.setShortcut('F1')\n self.start_action.triggered.connect(self.on_start_action_clicked)\n\n #停止键\n self.stop_action = QAction(self)\n icon3 = QIcon()\n icon3.addPixmap(QPixmap(\"img/stop.png\"), QIcon.Normal, QIcon.Off)\n self.stop_action.setIcon(icon3)\n self.stop_action.setText(\"停止\")\n self.stop_action.setShortcut('F3')\n self.stop_action.setDisabled(True) #开始时该按钮不可点击\n self.stop_action.triggered.connect(self.on_stop_action_clicked)\n\n #暂停键\n self.pause_action = QAction(self)\n p_icon = QIcon()\n p_icon.addPixmap(QPixmap(\"img/pause.png\"), QIcon.Normal, QIcon.Off)\n self.pause_action.setIcon(p_icon)\n self.pause_action.setText(\"暂停\")\n self.pause_action.setShortcut('F2')\n self.pause_action.setDisabled(True) # 开始时该按钮不可点击\n self.pause_action.triggered.connect(self.on_pause_action_clicked)\n\n #重新开始键\n self.actionRestart = QAction(self)\n icon4 = QIcon()\n icon4.addPixmap(QPixmap(\"img/restart.png\"), QIcon.Normal, QIcon.Off)\n self.actionRestart.setIcon(icon4)\n self.actionRestart.setText(\"重新开始\")\n self.actionRestart.setShortcut('F4')\n self.actionRestart.setDisabled(True) # 开始时该按钮不可点击\n self.actionRestart.triggered.connect(self.on_actionRestart_clicked)\n\n #更新数据键\n self.action_update = QAction(self)\n icon5 = QIcon()\n icon5.addPixmap(QPixmap(\"img/update.png\"), QIcon.Normal, QIcon.Off)\n self.action_update.setIcon(icon5)\n self.action_update.setText(\"继续更新\")\n self.action_update.setShortcut('F5')\n self.action_update.setDisabled(True)\n self.action_update.triggered.connect(\n lambda: self.timer.start(flush_time) and self.action_update.setDisabled(True)\n )\n\n #帮助文档\n action_readme = QAction(self)\n action_readme.setText(\"使用文档\")\n action_about = QAction(self)\n action_about.setText(\"关于\")\n action_about.triggered.connect(self.on_action_about_clicked)\n\n #打开文件键\n action_openfile = QAction(self)\n action_openfile.setText(\"打开\")\n action_openfile.setShortcut(\"ctrl+O\")\n action_openfile.triggered.connect(self.on_action_openfile_clicked)\n\n #保存文件键\n action_savefile = QAction(self)\n action_savefile.setText(\"保存\")\n action_savefile.setShortcut(\"ctrl+S\")\n action_savefile.triggered.connect(self.on_action_savefile_clicked)\n\n #退出键\n self.action_exit = QAction(self)\n self.action_exit.setCheckable(False)\n self.action_exit.setText(\"退出\")\n self.action_exit.triggered.connect(self.on_action_exit_clicked)\n self.action_exit.setShortcut('ctrl+Q')\n self.action_exit.setStatusTip('退出应用程序')\n\n #构造包\n self.forged_action = QAction(self)\n self.forged_action.setText(\"伪造包\")\n self.forged_action.setShortcut('F7')\n self.forged_action.triggered.connect(self.forged_action_clicked)\n\n #流量监测\n self.action_track = QAction(self)\n self.action_track.setText(\"流量监测\")\n self.action_track.setShortcut('F6')\n self.action_track.triggered.connect(self.on_action_track_clicked)\n\n #IP地址类型统计图\n self.IP_statistics = QAction(self)\n self.IP_statistics.setText(\"IP地址类型统计\")\n self.IP_statistics.triggered.connect(self.on_IP_statistics_clicked)\n\n #报文类型统计图\n self.message_statistics = QAction(self)\n self.message_statistics.setText(\"报文类型统计\")\n self.message_statistics.triggered.connect(\n self.on_message_statistics_clicked)\n \"\"\"\n 添加工具栏:开始,暂停,停止,重新开始\n \"\"\"\n self.mainToolBar.addAction(self.start_action)\n self.mainToolBar.addAction(self.pause_action)\n self.mainToolBar.addAction(self.stop_action)\n self.mainToolBar.addAction(self.actionRestart)\n self.mainToolBar.addAction(self.action_update)\n\n self.menu_F.addAction(action_openfile)\n self.menu_F.addAction(action_savefile)\n self.menu_F.addAction(self.action_exit)\n self.menu_F.showFullScreen()\n\n self.edit_menu.addAction(font_set)\n self.edit_menu.addAction(change_border)\n\n #捕获菜单栏添加子菜单\n self.capture_menu.addAction(self.start_action)\n self.capture_menu.addAction(self.pause_action)\n self.capture_menu.addAction(self.stop_action)\n self.capture_menu.addAction(self.actionRestart)\n\n self.menu_H.addAction(action_readme)\n self.menu_H.addAction(action_about)\n\n self.menu_Analysis.addAction(self.forged_action)\n self.menu_Analysis.addAction(self.action_track)\n\n self.menu_Statistic.addAction(self.IP_statistics)\n self.menu_Statistic.addAction(self.message_statistics)\n\n self.menuBar.addAction(self.menu_F.menuAction())\n self.menuBar.addAction(self.edit_menu.menuAction())\n self.menuBar.addAction(self.capture_menu.menuAction())\n self.menuBar.addAction(self.menu_Analysis.menuAction())\n self.menuBar.addAction(self.menu_Statistic.menuAction())\n self.menuBar.addAction(self.menu_H.menuAction())\n\n # self.statusBar.showMessage('实时更新的信息', 0) # 状态栏本身显示的信息 第二个参数是信息停留的时间,单位是毫秒,默认是0(0表示在下一个操作来临前一直显示)\n \"\"\"底部状态栏\n 利用self.comNum.setText()实时更新状态栏信息\n \"\"\"\n self.comNum = QLabel('下载速度:')\n self.baudNum = QLabel('上传速度:')\n self.getSpeed = QLabel('收包速度:')\n self.sendSpeed = QLabel('发包速度:')\n self.netNic = QLabel('Welcome to WireWhale! ^ _ ^')\n self.statusBar.setStyleSheet(\"background: #EDEDED;\")\n \"\"\"各个单元空间占比\"\"\"\n self.statusBar.addPermanentWidget(self.netNic, stretch=2)\n self.statusBar.addPermanentWidget(self.getSpeed, stretch=1)\n self.statusBar.addPermanentWidget(self.sendSpeed, stretch=1)\n self.statusBar.addPermanentWidget(self.comNum, stretch=1)\n self.statusBar.addPermanentWidget(self.baudNum, stretch=1)\n\n QMetaObject.connectSlotsByName(self)\n self.core = Core(self)\n # 设置定时器将抓包列表置底\n self.timer = QTimer(self)\n self.timer.timeout.connect(self.info_tree.scrollToBottom)\n self.show()\n\n \"\"\"\n 重写窗口关闭事件\n \"\"\"\n\n def closeEvent(self, QCloseEvent):\n def close_to_do():\n self.core.clean_out()\n if self.Monitor and self.Monitor.is_alive():\n self.Monitor.terminate()\n if self.Forged and self.Forged.is_alive():\n self.Forged.terminate()\n exit()\n\n if self.core.start_flag or self.core.pause_flag:\n # 没有停止抓包\n reply = QMessageBox.question(\n self, 'Message', \"您是否要停止捕获,并保存已捕获的分组?\\n警告:若不保存,您捕获的分组将会丢失\",\n QMessageBox.Save | QMessageBox.Close | QMessageBox.Cancel,\n QMessageBox.Cancel)\n if reply == QMessageBox.Cancel:\n QCloseEvent.ignore()\n if reply == QMessageBox.Close:\n self.core.stop_capture()\n close_to_do()\n elif reply == QMessageBox.Save:\n self.core.stop_capture()\n self.on_action_savefile_clicked()\n close_to_do()\n elif self.core.stop_flag and not self.core.save_flag:\n \"\"\"\n 已停止,但没有保存文件\n \"\"\"\n reply = QMessageBox.question(\n self, 'Message', \"您是否保存已捕获的分组?\\n警告:若不保存,您捕获的分组将会丢失\",\n QMessageBox.Save | QMessageBox.Close | QMessageBox.Cancel,\n QMessageBox.Cancel)\n if reply == QMessageBox.Cancel:\n QCloseEvent.ignore()\n elif reply == QMessageBox.Save:\n self.on_action_savefile_clicked()\n close_to_do()\n else:\n close_to_do()\n elif self.core.save_flag or not self.core.start_flag:\n \"\"\"\n 未工作状态\n \"\"\"\n reply = QMessageBox.question(self, 'Message', \"您是否要退出本程序?\",\n QMessageBox.Yes | QMessageBox.No,\n QMessageBox.No)\n if reply == QMessageBox.Yes:\n close_to_do()\n else:\n QCloseEvent.ignore()\n\n \"\"\"绘制背景\"\"\"\n\n def paintEvent(self, a0: QPaintEvent):\n painter = QPainter(self)\n pixmap = QPixmap(\"img/Whale1.jpg\")\n painter.drawPixmap(self.rect(), pixmap)\n\n \"\"\"\n 数据包视图 数据记录点击事件\n 点击列表中一条记录时,在下面的frame框中显示帧的详细信息\n \"\"\"\n\n def on_tableview_clicked(self):\n selected_row = self.info_tree.currentItem().text(0) #当前选择的编号\n #表格停止追踪更新\n if selected_row and selected_row.isdigit():\n self.timer.stop()\n self.show_infoTree((int)(selected_row))\n if not self.core.pause_flag and not self.core.stop_flag:\n self.action_update.setDisabled(False)\n\n \"\"\"\n 展开帧的详细信息\n \"\"\"\n\n def show_infoTree(self, selected_row):\n \"\"\"\n 清空Frame Information内容\n \"\"\"\n self.treeWidget.clear()\n \"\"\"\n 添加树节点\n Item1: 第一层树节点\n Item1_1: 第二层树节点,Item1的子节点\n QTreeWidgetItem(parentNode, text) parentNode:父节点 text:当前节点内容\n \"\"\"\n parentList, childList, hex_dump = self.core.on_click_item(selected_row)\n p_num = len(parentList)\n for i in range(p_num):\n item1 = QTreeWidgetItem(self.treeWidget)\n item1.setText(0, parentList[i])\n c_num = len(childList[i])\n for j in range(c_num):\n item1_1 = QTreeWidgetItem(item1)\n item1_1.setText(0, childList[i][j])\n self.set_hex_text(hex_dump)\n\n \"\"\"\n 获取当前选择的网卡\n \"\"\"\n\n def get_choose_nic(self):\n card = self.choose_nicbox.currentText()\n self.netNic.setText('当前网卡:' + card)\n if (card == 'All'):\n a = None\n elif platform == 'Windows':\n a = netcards[card]\n elif platform == 'Linux':\n a = card\n else:\n a = None\n return a\n\n \"\"\"\n 设置hex区文本\n \"\"\"\n\n def set_hex_text(self, text):\n self.hexBrowser.setText(text)\n\n \"\"\"\n 设置字体点击事件\n \"\"\"\n\n def on_font_set_clicked(self):\n font, ok = QFontDialog.getFont()\n if ok:\n with open('data.json', 'r') as file_obj:\n '''读取json文件'''\n old_font = json.load(file_obj) # 返回列表数据,也支持字典\n old_font[\"font\"] = font.family()\n old_font[\"size\"] = font.pointSize()\n with open('data.json', 'w') as file:\n json.dump(old_font, file)\n self.info_tree.setFont(font)\n self.treeWidget.setFont(font)\n self.hexBrowser.setFont(font)\n\n \"\"\"\n 设置背景图片\n \"\"\"\n\n def on_change_border_clicked(self):\n imgName, imgType = QFileDialog.getOpenFileName(\n self, \"打开图片\", \"C:/\", \"*.jpg;;*.png;;All Files(*)\")\n with open('data.json', 'r') as file_obj:\n '''读取json文件'''\n old_image = json.load(file_obj) # 返回列表数据,也支持字典\n old_image[\"imageUrl\"] = imgName\n with open('data.json', 'w') as file:\n json.dump(old_image, file)\n window_pale = QPalette()\n window_pale.setBrush(self.backgroundRole(), QBrush(QPixmap(imgName)))\n self.setPalette(window_pale)\n\n \"\"\"\n 开始键点击事件\n \"\"\"\n\n def on_start_action_clicked(self):\n if self.core.stop_flag:\n # 重新开始清空面板内容\n self.info_tree.clear()\n self.treeWidget.clear()\n self.set_hex_text(\"\")\n self.core.start_capture(self.get_choose_nic(), self.Filter.text())\n \"\"\"\n 点击开始后,过滤器不可编辑,开始按钮、网卡选择框全部设为不可选\n 激活暂停、停止键、重新开始键\n \"\"\"\n self.start_action.setDisabled(True)\n self.Filter.setEnabled(False)\n self.FilterButton.setEnabled(False)\n self.choose_nicbox.setEnabled(False)\n self.actionRestart.setDisabled(False)\n self.pause_action.setEnabled(True)\n self.stop_action.setEnabled(True)\n self.timer.start(flush_time)\n\n \"\"\"\n 暂停事件点击事件\n \"\"\"\n\n def on_pause_action_clicked(self):\n self.core.pause_capture()\n \"\"\"\n 激活开始、停止、重新开始键、过滤器、网卡选择框\n \"\"\"\n self.start_action.setEnabled(True)\n self.stop_action.setDisabled(False)\n self.actionRestart.setDisabled(False)\n self.Filter.setDisabled(True)\n self.FilterButton.setDisabled(True)\n self.choose_nicbox.setDisabled(False)\n self.pause_action.setDisabled(True)\n self.action_update.setDisabled(True)\n self.timer.stop()\n\n \"\"\"\n 菜单栏停止键点击事件\n \"\"\"\n\n def on_stop_action_clicked(self):\n self.core.stop_capture()\n \"\"\"\n 激活开始键、重新开始键、过滤器、网卡选择框\n \"\"\"\n self.stop_action.setDisabled(True)\n self.pause_action.setDisabled(True)\n self.start_action.setEnabled(True)\n self.Filter.setDisabled(False)\n self.FilterButton.setDisabled(False)\n self.choose_nicbox.setDisabled(False)\n self.action_update.setDisabled(True)\n self.timer.stop()\n\n \"\"\"\n 重新开始键响应事件\n \"\"\"\n\n def on_actionRestart_clicked(self):\n # 重新开始清空面板内容\n self.timer.stop()\n self.core.restart_capture(self.get_choose_nic(), self.Filter.text())\n self.info_tree.clear()\n self.treeWidget.clear()\n self.set_hex_text(\"\")\n \"\"\"\n 点击开始后,过滤器不可编辑,开始按钮,网卡选择框全部设为不可选\n 激活暂停、停止键、重新开始键\n \"\"\"\n self.actionRestart.setDisabled(False)\n self.start_action.setDisabled(True)\n self.Filter.setEnabled(False)\n self.FilterButton.setEnabled(False)\n self.choose_nicbox.setEnabled(False)\n self.pause_action.setEnabled(True)\n self.stop_action.setEnabled(True)\n self.timer.start(flush_time)\n\n \"\"\"\n IP地址类型统计图绘制\n \"\"\"\n\n def on_IP_statistics_clicked(self):\n IP = self.core.get_network_count()\n IPv4_count = IP[\"ipv4\"]\n IPv6_count = IP[\"ipv6\"]\n IP_count = IPv4_count + IPv6_count\n if IP_count == 0:\n reply = QMessageBox.information(self, \"提示\", \"你还没有抓包!\",\n QMessageBox.Cancel)\n\n else:\n IPv4_fre = IPv4_count / IP_count\n IPv6_fre = IPv6_count / IP_count\n data = {\n 'IPv4': (IPv4_fre, '#7199cf'),\n 'IPv6': (IPv6_fre, '#4fc4aa'),\n }\n\n fig = plt.figure(figsize=(6, 4))\n\n # 创建绘图区域\n ax1 = fig.add_subplot(111)\n ax1.set_title('IPv4 & IPv6 Statistical Chart')\n\n # 生成x轴的每个元素的位置,列表是[1,2,3,4]\n xticks = np.arange(1, 3)\n\n # 自定义柱状图的每个柱的宽度\n bar_width = 0.6\n\n IP_type = data.keys()\n values = [x[0] for x in data.values()]\n colors = [x[1] for x in data.values()]\n\n # 画柱状图,设置柱的边缘为透明\n bars = ax1.bar(xticks, values, width=bar_width, edgecolor='none')\n\n # 设置y轴的标签\n ax1.set_ylabel('Proportion')\n\n ax1.set_xticks(xticks)\n ax1.set_xticklabels(IP_type)\n\n # 设置x,y轴的范围\n ax1.set_xlim([0, 3.5])\n ax1.set_ylim([0, 1])\n\n # 给每一个bar分配颜色\n for bar, color in zip(bars, colors):\n bar.set_color(color)\n plt.show()\n\n \"\"\"\n 数据包类型数量统计\n \"\"\"\n\n def on_message_statistics_clicked(self):\n trans = self.core.get_transport_count()\n\n TCP_count = trans[\"tcp\"]\n UDP_count = trans[\"udp\"]\n ARP_count = trans[\"arp\"]\n ICMP_count = trans[\"icmp\"]\n\n if TCP_count + UDP_count + ARP_count + ICMP_count == 0:\n reply = QMessageBox.information(self, \"提示\", \"你还没有抓包!\",\n QMessageBox.Cancel)\n\n else:\n\n labels = 'TCP', 'ICMP', 'UDP', 'ARP'\n fracs = [TCP_count, ICMP_count, UDP_count, ARP_count]\n explode = [0.1, 0.1, 0.1, 0.1] # 0.1 凸出这部分,\n plt.axes(\n aspect=1\n ) # set this , Figure is round, otherwise it is an ellipse\n # autopct ,show percet\n plt.pie(\n x=fracs,\n labels=labels,\n explode=explode,\n autopct='%3.1f %%',\n shadow=True,\n labeldistance=1.1,\n startangle=90,\n pctdistance=0.6)\n plt.show()\n\n \"\"\"\n 打开文件事件\n \"\"\"\n\n def on_action_openfile_clicked(self):\n if self.core.start_flag or self.core.pause_flag:\n QMessageBox.warning(self, \"警告\", \"请停止当前抓包!\")\n return\n self.core.open_pcap_file()\n\n \"\"\"\n 保存文件点击事件\n \"\"\"\n\n def on_action_savefile_clicked(self):\n if self.core.start_flag or self.core.pause_flag:\n QMessageBox.warning(self, \"警告\", \"请停止当前抓包!\")\n return\n self.core.save_captured_to_pcap()\n\n \"\"\"\n 菜单栏追踪流键点击事件\n \"\"\"\n\n def on_action_track_clicked(self):\n if not self.Monitor or not self.Monitor.is_alive():\n self.Monitor = Process(target=start_monitor)\n self.Monitor.start()\n\n ''\n\n def forged_action_clicked(self):\n if not self.Forged or not self.Forged.is_alive():\n self.Forged = Process(target=startForged)\n self.Forged.start()\n\n about = \"软件著作者:张桓皓 张兴\\n\\n\" + \"软件主要功能如下:\\n\" + \"1. 侦听指定网卡或所有网卡,抓取流经网卡的数据包;\\n\" + \"2. 解析捕获的数据包每层的每个字段,查看数据包的详细内容;\\n\" + \"3. 可通过不同的需求设置了BPF过滤器,获取指定地址、端口或协议等相关条件的报文;\\n\" + \"4. 针对应用进行流量监测,监测结果实时在流量图显示,并可设置流量预警线,当流量超过预警线时自动报警;\\n\" + \"5. 提供了以饼状图的形式统计ARP、TCP、UDP、ICMP报文,以柱状图的形式统计IPv4、IPv6报文;\\n\" + \"6. 可将抓取到的数据包另存为pcap文件,并能通过打开一个pcap文件对其中的数据包进行解析;\\n\" + \"7. 可逐层逐字段构造数据包,实现自定义数据包发送。\\n\\n\" + \"解释权归著作者所有\"\n\n def on_action_about_clicked(self):\n QMessageBox.information(self, \"关于\", self.about)\n\n \"\"\"\n 退出点击事件\n \"\"\"\n\n def on_action_exit_clicked(self, event):\n self.closeEvent(event)\n\n \"\"\"\n 进度加载框\n num: 加载数据数量\n \"\"\"\n\n def showDialog(self, num):\n progress = QProgressDialog(self)\n progress.setWindowTitle(\"请稍等\")\n progress.setLabelText(\"正在加载数据...\")\n progress.setCancelButtonText(\"取消\")\n progress.setMinimumDuration(1) #进度条加载时间\n progress.setWindowModality(Qt.WindowModal)\n progress.setRange(0, num)\n for i in range(num):\n progress.setValue(i)\n if progress.wasCanceled():\n QMessageBox.warning(self, \"提示\", \"操作失败\")\n break\n progress.setValue(num)\n QMessageBox.information(self, \"提示\", \"操作成功\")\n\n \"\"\"键盘点击事件\"\"\"\n\n def keyReleaseEvent(self, event):\n if event.key() == Qt.Key_Up or event.key() == Qt.Key_Down:\n self.timer.stop()\n selected_row = self.info_tree.currentItem().text(0)\n if selected_row and selected_row.isdigit():\n self.show_infoTree(int(selected_row))\n self.action_update.setDisabled(False)\n if event.key() == Qt.Key_F5:\n self.timer.start(flush_time)\n self.action_update.setDisabled(True)\n\n\ndef start():\n app = QApplication([])\n ui = Ui_MainWindow()\n ui.setupUi()\n app.exec()\n","repo_name":"zhanghuanhao/WireWhale","sub_path":"main_ui.py","file_name":"main_ui.py","file_ext":"py","file_size_in_byte":30378,"program_lang":"python","lang":"en","doc_type":"code","stars":67,"dataset":"github-code","pt":"77"} +{"seq_id":"32051446237","text":"import requests\nimport re\nimport locale\nfrom bs4 import BeautifulSoup as soap\n\nlocale.setlocale(locale.LC_ALL, 'en_US.UTF-8')\nresults = {}\n\n\ndef main():\n url = 'https://www.borsaitaliana.it/borsa/azioni/' \\\n 'obbligazioni-convertibili/dati-completi.html?isin=IT0005256059&lang=en'\n\n isin = re.search(r'isin=(\\w+)', url, re.I).group(1)\n resource = requests.get(url)\n if not resource.text:\n print('Page is not found: ' + url)\n return ''\n\n content = resource.text\n # f = open('D:/Documents/test.html', 'w')\n # f.write(resource.text)\n # f.close()\n # file = open('D:/Documents/test.html', 'r')\n # content = file.read()\n # file.close()\n\n doc = soap(content, 'html.parser')\n rows = doc.find_all(string=re.compile('Stream Prices'))[0].find_parent('table').find_all('tr')\n if not rows:\n print('Table is empty')\n return\n\n columns = {}\n columns_counter = 0\n patterns = {\n r'Stream\\s+Prices': 'stream',\n r'No': 'number',\n r'Bid\\s+Quantity': 'bid_quantity',\n r'Ask\\s+Quantity': 'ask_quantity',\n r'Bid\\s+Price': 'bid',\n r'Ask\\s+Price': 'ask',\n }\n\n keys = results.keys()\n for row in rows:\n if columns_counter < 5:\n columns = get_columns(row, patterns.items())\n columns_counter = columns.__len__()\n continue\n\n for column, index in columns.items():\n value = row.find_all('td')[index].text.strip()\n if not value:\n continue\n\n value = locale.atof(value)\n if value:\n if isin in keys:\n results[isin][column] = value\n else:\n results[isin] = {column: value}\n\n print(results)\n\n\ndef get_columns(row, patterns, tag='th'):\n columns = {}\n index = 0\n for cell in row.find_all(tag):\n for pattern, column in patterns:\n if re.findall(pattern, cell.text, re.I):\n columns[column] = index\n index += 1\n\n return columns\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"SolistTV/myPythonTests","sub_path":"PythonApplication1/PythonApplication1.py","file_name":"PythonApplication1.py","file_ext":"py","file_size_in_byte":2090,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"41196058150","text":"from datetime import datetime\nfrom typing import Optional\n\nfrom server.model.base import Model\n\n\ndef iso8601(timestamp: datetime):\n return timestamp.strftime(\"%Y-%m-%dT%H:%M:%S.%f\")\n\n\nclass Question(Model):\n db_name = 'questions'\n\n schema = {\n 'user_id': str,\n 'article_id': str,\n 'question': str,\n 'answer': Optional[str],\n 'created_at': datetime,\n 'answered_at': Optional[datetime],\n }\n\n field_formats = {\n 'created_at': iso8601,\n 'answered_at': lambda x: iso8601(x) if x else str(x),\n }\n","repo_name":"Taller-2/app-server","sub_path":"server/model/question.py","file_name":"question.py","file_ext":"py","file_size_in_byte":564,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"40667572726","text":"import os\nfrom flask import Blueprint, request, flash, render_template, send_file, jsonify\nfrom models import *\nfrom forms import SpammerForm\nimport utils\n\nspamsub = Blueprint(\n 'spamsub',\n __name__,\n template_folder='templates'\n )\n\n@spamsub.route('/', methods=['GET', 'POST'])\ndef index():\n \"\"\" Index page \"\"\"\n count = Address.query.count()\n form = SpammerForm()\n # try to validate, and check for AJAX submission\n if form.validate_on_submit():\n if not utils.check_if_exists(form.address.data):\n flash(\n u\"We've added %s to the database.\" % form.address.data,\n \"text-success\")\n else:\n flash(\n u\"We already know about %s, though.\" % form.address.data,\n \"text-success\")\n if request.is_xhr:\n # OK to send back a fragment\n return render_template(\n 'form.jinja',\n form=form,\n )\n # GET or no JS, so render a full page\n return render_template(\n 'index.jinja',\n form=form,\n count=count,\n recaptcha_public_key=app.config['RECAPTCHA_PUBLIC_KEY'])\n\n@spamsub.route('download', methods=['GET'])\ndef download():\n \"\"\" Download the latest version of spammers.txt \"\"\"\n utils.update_db()\n return send_file(\n os.path.join(utils.basename, \"git_dir/spammers.txt\"),\n as_attachment=True,\n attachment_filename=\"spammers.txt\")\n\n@spamsub.route('updates', methods=['GET'])\ndef updates():\n \"\"\" Check for updates in GitHub repo if more than an hour's passed \"\"\"\n vals = {\n 'last_updated': utils.sync_check(),\n 'count': Address.query.count(),\n }\n return jsonify(vals)\n\n\n\n\n\n","repo_name":"drcongo/spammy-recruiters","sub_path":"webapp/apps/spamsub/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1742,"program_lang":"python","lang":"en","doc_type":"code","stars":279,"dataset":"github-code","pt":"77"} +{"seq_id":"37061390958","text":"#Part 1\ns = 0\nwith open(\"input.txt\") as f:\n group = ''\n for line in f.readlines():\n if line.strip() != '': group+=line.strip()\n else:\n s+= len(set(group))\n group=''\n if group!='': s+= len(set(group))\nprint(s)\n\n#Part 2\ns = 0\nwith open(\"input.txt\") as f:\n newgroup = True\n for line in f.readlines():\n if line.strip() != '' and newgroup: \n group = list(line.strip())\n newgroup = False\n elif line.strip() != '':\n group = [c for c in group if c in list(line.strip())]\n else:\n s+= len(group)\n group = []\n newgroup = True\n if group != []: s+= len(group)\nprint(s)","repo_name":"Devilmoon/AoC2020","sub_path":"Day 6/solve.py","file_name":"solve.py","file_ext":"py","file_size_in_byte":696,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"40252949528","text":"'''\nhttps://leetcode-cn.com/problems/24-game\n\n你有 4 张写有 1 到 9 数字的牌。你需要判断是否能通过 *,/,+,-,(,) 的运算得到 24。\n示例 1:\n输入: [4, 1, 8, 7]\n输出: True\n解释: (8-4) * (7-1) = 24\n\n示例 2:\n输入: [1, 2, 1, 2]\n输出: False\n\n注意:\n\n除法运算符 / 表示实数除法,而不是整数除法。例如 4 / (1 - 2/3) = 12 。\n每个运算符对两个数进行运算。特别是我们不能用 - 作为一元运算符。例如,[1, 1, 1, 1] 作为输入时,表达式 -1 - 1 - 1 - 1 是不允许的。\n你不能将数字连接在一起。例如,输入为 [1, 2, 1, 2] 时,不能写成 12 + 12 。\n'''\n\nclass Solution:\n def judgePoint24(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: bool\n \"\"\"\n \n\n\nif __name__ == '__main__':\n s = Solution()\n # ret = s.\n print(s)\n","repo_name":"chanfengsr/AllPrivateProject","sub_path":"Python/LeetCodeTraining/题库/0679 24点游戏(24 Game).py","file_name":"0679 24点游戏(24 Game).py","file_ext":"py","file_size_in_byte":874,"program_lang":"python","lang":"zh","doc_type":"code","stars":5,"dataset":"github-code","pt":"77"} +{"seq_id":"70784452089","text":"from django.shortcuts import render\nfrom .models import Pet, Animal, Breed\nfrom django.views.generic import TemplateView\nfrom django.db.models import F, Max\nfrom random import randint\nfrom django.views.generic import CreateView, UpdateView, DeleteView, ListView\nfrom .forms import PetForm, BreedForm\nfrom django.urls import reverse_lazy\nfrom django.http import JsonResponse\n\n# Create your views here.\n\ndef getbreedlist(request, breed_id):\n if breed_id != 0:\n res = list(Breed.objects.all().filter(animal_type_ref=breed_id).values())\n else:\n res = list(Breed.objects.all().values())\n return JsonResponse(res, safe=False)\n\nclass Landing(TemplateView):\n template_name = 'index.html'\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context['amount'] = Pet.objects.count()\n if context['amount'] > 0:\n max_id = Pet.objects.all().aggregate(max_id=Max(\"id\"))['max_id']\n item=None\n while True:\n pk = randint(1, max_id)\n item = Pet.objects.filter(pk=pk).first()\n if item:\n break\n context['random_card'] = item\n return context\n\nclass AddPet(CreateView):\n model = Pet\n form_class = PetForm\n success_url = reverse_lazy('pets:pet_list')\n template_name = 'pet_edit.html'\n\nclass EditPet(UpdateView):\n model = Pet\n form_class = PetForm\n success_url = reverse_lazy('pets:pet_list')\n template_name = 'pet_edit.html'\n\nclass PetList(ListView):\n template_name = 'pet_list.html'\n context_object_name = 'pets'\n model = Pet\n\nclass AddBreed(CreateView):\n model = Breed\n form_class = BreedForm\n success_url = reverse_lazy('pets:add_pet')\n template_name = 'breed_edit.html'\n\nclass PetDetail(DeleteView):\n model = Pet\n template_name = 'pet.html'\n context_object_name = 'pet'\n","repo_name":"Konunacmep/skillfactory-module-d-project-shaidulov","sub_path":"pets/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1903,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"17655488905","text":"'''\n rmtoo\n Free and Open Source Requirements Management Tool\n\n Blackbox rmtoo test\n\n (c) 2010-2012,2017 by flonatel GmbH & Co. KG\n\n For licensing details see COPYING\n'''\nfrom rmtoo.tests.lib.BBHelper import BBHelper\n\ncmd_line_parsms = '''json:{\"actions\": {\"create_makefile_dependencies\":\n\"${ENV:rmtoo_test_dir}/makefile_deps\"}}'''\n\n\nclass RMTTestBB006(BBHelper):\n\n out_test_dir = \"tests/RMTTest-Blackbox/RMTTest-BB006\"\n in_test_dir = \"tests/blackbox-test/bb006-test\"\n\n def rmttest_pos_001(self):\n \"BB Basic with one requirement - check makefile dependencies\"\n self.run_test(unify_output_dirs=[\"makefile_deps\"],\n relaxed=True,\n cmd_line_params=[\n \"-j\", \"file://\" + self.out_test_dir\n + \"/input/Config.json\",\n \"-j\", cmd_line_parsms])\n","repo_name":"florath/rmtoo","sub_path":"rmtoo/tests/RMTTest-Blackbox/RMTTest-BB006/RMTTest-BB006.py","file_name":"RMTTest-BB006.py","file_ext":"py","file_size_in_byte":876,"program_lang":"python","lang":"en","doc_type":"code","stars":195,"dataset":"github-code","pt":"77"} +{"seq_id":"22864041710","text":"import math\r\n\r\n\r\ndef main():\r\n print(\"Enter the value of modulo (q) :\")\r\n q = int(input())\r\n print(\"Enter the primituve of \",q,\" : \")\r\n p = int(input())\r\n\r\n print(\"Enter the private key of Alice (person1) : \")\r\n a = int(input())\r\n\r\n print(\"Enter the private key of Bob (person 2) : \")\r\n b = int(input())\r\n\r\n # Calculating their public key\r\n\r\n Y_A = (p ** a) % q # public key of alice\r\n Y_B = (p ** b) % q # public key of bob\r\n\r\n\r\n # Calculating secret key of alice and bob\r\n\r\n K_A = (Y_B ** a) % q\r\n K_B = (Y_A ** b) % q\r\n\r\n print(\"The secret key of Alice :\",K_A)\r\n print(\"The secret key of Bob :\",K_B)\r\n\r\n if K_A == K_B:\r\n print(\"Alice and Bob can communicate\")\r\n print(\"They share secret number : \",K_B)\r\n\r\n else:\r\n print(\"They cannot communicate\")\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n\r\n\r\n","repo_name":"TanayDajgude/LP-2","sub_path":"DiffieHelman.py","file_name":"DiffieHelman.py","file_ext":"py","file_size_in_byte":875,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"28755959397","text":"#!/usr/bin/env python3\n\nimport kopf\nimport kubernetes.config as k8s_config\nimport kubernetes.client as k8s_client\nimport requests\nimport pykube\nimport yaml\n\n\nodoo_crd = k8s_client.V1CustomResourceDefinition(\n api_version=\"apiextensions.k8s.io/v1\",\n kind=\"CustomResourceDefinition\",\n metadata=k8s_client.V1ObjectMeta(name=\"odoos.operators.nurlanf.github.io\"),\n spec=k8s_client.V1CustomResourceDefinitionSpec(\n group=\"operators.nurlanf.github.io\",\n versions=[k8s_client.V1CustomResourceDefinitionVersion(\n name=\"v1\",\n served=True,\n storage=True,\n schema=k8s_client.V1CustomResourceValidation(\n open_apiv3_schema=k8s_client.V1JSONSchemaProps(\n type=\"object\",\n properties={\n \"spec\": k8s_client.V1JSONSchemaProps(\n type=\"object\",\n properties={\n \"version\": k8s_client.V1JSONSchemaProps(\n type=\"string\",\n enum=[\"13.0\",\"14.0\",\"15.0\"]\n ),\n \"auto_backup\": k8s_client.V1JSONSchemaProps(\n type=\"boolean\"\n )\n }\n ),\n \"status\": k8s_client.V1JSONSchemaProps(\n type=\"object\",\n x_kubernetes_preserve_unknown_fields=True\n )\n }\n )\n )\n )],\n scope=\"Namespaced\",\n names=k8s_client.V1CustomResourceDefinitionNames(\n plural=\"odoos\",\n singular=\"odoo\",\n kind=\"Odoo\",\n short_names=[\"od\"]\n )\n )\n)\n\ntry:\n k8s_config.load_kube_config()\nexcept k8s_config.ConfigException:\n k8s_config.load_incluster_config()\n\napi_instance = k8s_client.ApiextensionsV1Api()\ntry:\n api_instance.create_custom_resource_definition(odoo_crd)\nexcept k8s_client.rest.ApiException as e:\n if e.status == 409:\n print(\"CRD already exists\")\n else:\n raise e\n\ndef get_odoo_configmap_name(api, auto_backup=False):\n addons_path = f\"/mnt/extra-addons\"\n\n if auto_backup:\n addons_path += \",/mnt/default\"\n\n doc = yaml.safe_load(f\"\"\"\n apiVersion: v1\n kind: ConfigMap\n data:\n odoo.conf: |\n [options]\n addons_path = {addons_path}\n \"\"\")\n\n kopf.adopt(doc)\n\n # Actually create an object by requesting the Kubernetes API.\n configmap = pykube.ConfigMap(api, doc)\n configmap.create()\n api.session.close()\n\n return configmap.metadata['name']\n\n\n@kopf.on.create('operators.nurlanf.github.io', 'v1', 'odoos')\ndef create_odoo(namespace, spec, body, **kwargs):\n\n api = pykube.HTTPClient(pykube.KubeConfig.from_env())\n\n configmap = get_odoo_configmap_name(api, auto_backup=spec['auto_backup'])\n # Render the pod yaml with some spec fields used in the template.\n doc = yaml.safe_load(f\"\"\"\n apiVersion: v1\n kind: Pod\n spec:\n containers:\n - name: postgres\n image: postgres:11\n env:\n - name: POSTGRES_USER\n value: odoo\n - name: POSTGRES_PASSWORD\n value: odoo\n - name: POSTGRES_DB\n value: postgres\n - name: PGDATA\n value: \"/var/lib/postgresql/data/postgres\"\n - name: odoo\n image: odoo:{spec['version']}\n env:\n - name: HOST\n value: localhost\n - name: USER\n value: odoo\n - name: PASSWORD\n value: odoo\n volumeMounts:\n - name: odoo-conf\n mountPath: /etc/odoo/odoo.conf\n subPath: odoo.conf\n volumes:\n - name: odoo-conf\n configMap:\n name: {configmap}\n\n \"\"\")\n\n # Make it our child: assign the namespace, name, labels, owner references, etc.\n kopf.adopt(doc)\n\n pod = pykube.Pod(api, doc)\n pod.create()\n api.session.close()\n\n # Update the parent's status.\n return {'children': [pod.metadata['uid']]}\n\n","repo_name":"nurlanf/odoo-operator","sub_path":"odoo-operator.py","file_name":"odoo-operator.py","file_ext":"py","file_size_in_byte":4302,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"73365673209","text":"from collections import defaultdict\nfrom typing import List\n\n\nclass Solution:\n def findItinerary(self, tickets: List[List[str]]) -> List[str]:\n routers = defaultdict(list)\n path = [\"JFK\"]\n for a, b in tickets:\n routers[a].append(b)\n\n def backtrack(start) -> bool:\n if len(path) == len(tickets) + 1:\n return True\n routers[start].sort()\n for _ in routers[start]:\n arrival = routers[start].pop(0)\n path.append(arrival)\n if backtrack(arrival):\n return True\n routers[start].append(arrival)\n path.pop()\n\n backtrack(\"JFK\")\n return path\n\n\nif __name__ == '__main__':\n print(Solution().findItinerary(tickets=[[\"MUC\", \"LHR\"], [\"JFK\", \"MUC\"], [\"SFO\", \"SJC\"], [\"LHR\", \"SFO\"]]))\n","repo_name":"aotemiao/my-leetcode","sub_path":"leet-code-classic/reconstruct-itinerary/findItinerary.py","file_name":"findItinerary.py","file_ext":"py","file_size_in_byte":866,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"28257771466","text":"import os\nfrom matplotlib import pyplot as plt\nim_path1 = 'img/appearance'\nim_path2 = 'img/structure'\nim_names1 = ['red.png', 'rainbow.png', 'blue.png']\nim_names2 = ['bob.png', 'fade.png', 'straight.png']\n\n# lets make a grid of 2*3\nfig = plt.figure(figsize=(10, 5))\n# lets plot the first row\nfor i in range(3):\n ax = fig.add_subplot(2, 3, i+1)\n # lets add img names without suffix to the title\n\n ax.imshow(plt.imread(os.path.join(im_path1, im_names1[i])))\n ax.set_title(im_names1[i].split('.')[0])\n\n ax.axis('off')\n\n# lets plot the second row\nfor i in range(3):\n ax = fig.add_subplot(2, 3, i+4)\n ax.imshow(plt.imread(os.path.join(im_path2, im_names2[i])))\n ax.set_title(im_names2[i].split('.')[0])\n ax.axis('off')\n\n# lets save the figure\nfig.savefig('img/row_col.png')","repo_name":"abbasmammadov/Artificial-Barber","sub_path":"utils/visualizations.py","file_name":"visualizations.py","file_ext":"py","file_size_in_byte":795,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"77"} +{"seq_id":"493799894","text":"import datetime\nfrom math import sqrt\n\n\ndef now():\n \"\"\"Get a current datetime object.\"\"\"\n return datetime.datetime.now()\n\n\ndef factorial(number):\n \"\"\"Calculate factorials\n\n Args:\n number (int): the number for which to calculate.\n\n Returns:\n The (int) factorial.\n \"\"\"\n product = 1\n for term in range(number, 1, -1):\n product *= term\n return product\n\n\ndef fibonacci_sequence(maximum=None):\n \"\"\"Generate (yield) successive terms in a Fibonacci sequence.\"\"\"\n a, b = 0, 1\n\n while True:\n yield b\n a, b = b, a + b\n if maximum and b > maximum:\n raise StopIteration\n\n\ndef primes(maximum_value, verbose=True):\n \"\"\"Generate a sequence of prime numbers, using Sieve of Eratosthenes.\n\n Args:\n maximum_value (int) or (float): the upper limit of primes to generate.\n verbose (bool): prints primes found to stdout if True.\n\n Returns:\n Yields a sequence of prime numbers, one per iteration.\n \"\"\"\n known_nonprimes = set()\n range_stop = int(maximum_value) + 1\n\n for cursor in range(2, range_stop):\n if cursor not in known_nonprimes:\n more_nonprimes = range(cursor ** 2, range_stop, cursor)\n known_nonprimes.update(more_nonprimes)\n\n if verbose:\n print('found prime: %12d' % cursor)\n\n yield cursor\n\n\ndef is_divisor(factor, number):\n \"\"\"Determine if a potential factor divides cleanly.\n\n Args\n factor (int): potential divisor\n number (int): the bigger number to test against.\n\n Returns:\n True or False.\n \"\"\"\n return not bool(number % factor)\n\n\ndef proper_divisors(number, verbose=False):\n \"\"\"Find proper divisors of a number.\n\n Args:\n number (int): the number to resolve.\n\n Returns:\n A list of integers.\n \"\"\"\n range_stop = int((number / 2) + 1)\n candidates = set(range(1, range_stop))\n found, max_divisor = [], None\n\n while len(candidates):\n candidate = candidates.pop()\n\n if is_divisor(candidate, number):\n found.append(candidate)\n\n # crash out early or set \"done\" threshold for greatest divisor\n if max_divisor and candidate >= max_divisor:\n break\n elif max_divisor is None and candidate > 1:\n max_divisor = int(number / candidate)\n range_stop = max_divisor\n\n else:\n candidates.difference_update(range(candidate ** 2, range_stop, candidate))\n\n if verbose:\n print('found %d divisors for %d' % (len(found), number))\n\n return found\n\n\ndef find_factors(number):\n \"\"\"Find the factors and their powers of a number.\n\n Args:\n number (int): the number to factor.\n\n Returns:\n A (dict) whose keys are the factors and values their powers.\n \"\"\"\n unfactored_portion, accumulated_factors = number, {}\n\n for prime in primes(sqrt(number), verbose=False):\n while is_divisor(prime, unfactored_portion):\n accumulated_factors[prime] = accumulated_factors.get(prime, 0) + 1\n unfactored_portion /= prime\n\n if unfactored_portion > 1:\n accumulated_factors[unfactored_portion] = 1\n\n return accumulated_factors\n","repo_name":"peterjpierce/project_euler","sub_path":"shared/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":3246,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"9672714735","text":"\"\"\"\nGiven an array of non-negative integers, you are initially positioned at the first index of the array.\n\nEach element in the array represents your maximum jump length at that position.\n\nYour goal is to reach the last index in the minimum number of jumps.\n\nExample:\n\nInput: [2,3,1,1,4]\nOutput: 2\nExplanation: The minimum number of jumps to reach the last index is 2.\n Jump 1 step from index 0 to 1, then 3 steps to the last index.\nNote:\n\nYou can assume that you can always reach the last index.\n\"\"\"\nfrom typing import List\n\n\ndef jump(nums: List[int]) -> int:\n num_hops = 0\n curr_idx = 0\n last_idx = len(nums) - 1\n while curr_idx != last_idx:\n best_idx = curr_idx + 1\n best_val = nums[best_idx] + best_idx\n for next_idx in range(nums[curr_idx] + curr_idx, curr_idx, -1):\n if next_idx >= last_idx:\n best_idx = last_idx\n break\n if next_idx + nums[next_idx] > best_val:\n best_idx = next_idx\n best_val = next_idx + nums[next_idx]\n curr_idx = best_idx\n num_hops += 1\n return num_hops\n","repo_name":"bensenberner/ctci","sub_path":"hard/jump_game_ii_45.py","file_name":"jump_game_ii_45.py","file_ext":"py","file_size_in_byte":1115,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"41494855277","text":"#!/usr/bin/python\n\nimport json\n\ndata = {\n \"germ_data_request_summary\": {\n \"category\": [\n {\n \"|code\": \"433\",\n \"|terminology\": \"openehr\",\n \"|value\": \"event\"\n }\n ],\n \"composer\": [\n {\n \"|name\": \"OpenEHR-Perl-STRUCTURED\"\n }\n ],\n }\n}\n\nclass InformationOrder():\n \"\"\"\n Holds InformationOrder data\n \"\"\"\n\n class category:\n code = ''\n value = ''\n terminology = ''\n composition = ''\n\n lookup = {\n '433' : {'|code' : '433', '|value' : 'event1', '|terminology' : 'openehr' },\n '434' : {'|code' : '434', '|value' : 'event2', '|terminology' : 'openehr' },\n '435' : {'|code' : '435', '|value' : 'event3', '|terminology' : 'openehr' },\n '436' : {'|code' : '436', '|value' : 'event4', '|terminology' : 'openehr' },\n }\n\n def __init__(self, code):\n self.code = self.lookup[code]['|code']\n self.value = self.lookup[code]['|value']\n self.terminology = self.lookup[code]['|terminology']\n self.composition = self.lookup[code]\n\n def compose(self):\n self.composition = self.lookup[self.code]\n return self.composition\n\n def set_code(self, code):\n self.__init__(code)\n\n class composer:\n name = ''\n composition = ''\n\n def __init__(self, data):\n self.name = data\n self.composition = { '|name' : data }\n\n def compose(self):\n return self.composition\n\n def set_name(self, data):\n self.__init__(data)\n\n composition = {}\n\n document_root = 'gel_data_request_summary'\n uids = []\n categories = [ category('433') ]\n composers = [ composer('OpenEHR-Python') ]\n\n def set_item(self, item_name, object_name, data ):\n if data:\n self.item_name = []\n for item in data:\n self.item_name.append( object_name(item))\n\n def set_category(self, data):\n \"\"\"\n Category data should be provided as a an \n array of strings with each string being a valid\n category code\n \"\"\"\n if data:\n self.categories = []\n for item in data:\n self.categories.append( self.category(item) )\n\n def set_composer(self, data):\n \"\"\"\n composer_name parameter should be provided as a string value\n Default value set to the name of the generating class function\n \"\"\"\n if data:\n self.composers = []\n for item in data:\n self.composers.append( self.composer(item) )\n\n def write_document_root(self):\n self.composition[self.document_root] = {}\n\n def write_category(self):\n self.composition[self.document_root]['category'] = []\n for category in self.categories:\n self.composition[self.document_root]['category'].append(category.composition)\n\n def write_composer(self):\n self.composition[self.document_root]['composer'] = []\n for composer in self.composers:\n self.composition[self.document_root]['composer'].append(composer.composition)\n\n def write(self):\n self.write_document_root()\n self.write_category()\n self.write_composer()\n\n\n def read(self, data):\n def read_categories(data):\n categories = []\n for item in data:\n item_obj = category( item['|code'])\n categories.append(item_obj)\n return categories\n\n def read_composers(data):\n composers = []\n for item in data:\n item_obj = composer( item['|name'])\n composers.append(item_obj)\n return composers\n\n\n self.document_root = list(data.keys())[0]\n self.categories = read_categories( data[self.document_root]['category'] )\n self.composers = read_composers( data[self.document_root]['composer'] )\n\n\ncompos1 = InformationOrder()\n#compos1.set_category(['433', '434'])\ncompos1.set_item('categories', compos1.category, ['433', '434'])\ncompos1.set_composer(['David Ramlakhan', 'John Duncan'])\ncompos1.write()\nprint('Compos1:', json.dumps(compos1.composition, indent=4))\n\n#compos2 = InformationOrder()\n#compos2.read(data)\n#composition = compos2.write()\n#print('Compos2:', json.dumps(composition, indent=4) )\n","repo_name":"dram1964/OpenEHR-Py","sub_path":"app/information_order2.py","file_name":"information_order2.py","file_ext":"py","file_size_in_byte":4444,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"15502805528","text":"AGENDA = {}\n\n# Funcoes\ndef mostrar_contatos():\n if AGENDA:\n for contato in AGENDA:\n buscar_contato(contato)\n else:\n print('>>>>>> Agenda vazia')\n\ndef buscar_contato(contato):\n try:\n print('Nome:', contato)\n print('Telefone:', AGENDA[contato]['telefone'])\n print('Endereco:', AGENDA[contato]['endereco'])\n print('Email:', AGENDA[contato]['email'])\n print('---------------------------------------')\n except KeyError:\n print('>>>>>> Contato {} nao encontrado'.format(contato))\n except Exception as error:\n print('>>>>>> Um erro inesperado ocorreu: ', error) \n\ndef ler_detalhes_contato():\n telefone = input('Digite o telefone do contato: ')\n email = input('Digite o email do contato: ')\n endereco = input('Digite o endereco do contato: ')\n return telefone, email, endereco\n \n\ndef incluir_editar_contato(contato, telefone, endereco, email):\n AGENDA[contato] = {\n 'telefone': telefone,\n 'endereco': endereco,\n 'email': email,\n }\n salvar()\n print('>>>>>> Contato {} adicionado/editado com sucesso!'.format(contato))\n\ndef excluir_contato(contato):\n try:\n AGENDA.pop(contato)\n salvar()\n print()\n print('>>>>>> Contato {} excluido com sucesso!'.format(contato))\n print()\n except KeyError:\n print('>>>>>> Contato {} nao encontrado!'.format(contato)) \n except Exception as error:\n print('>>>>>> Um erro inesperado ocorreu: ', error)\n\ndef exportar_contatos(nome_do_arquivo):\n try:\n with open(nome_do_arquivo, 'w') as arquivo:\n for contato in AGENDA:\n telefone = AGENDA[contato]['telefone']\n endereco = AGENDA[contato]['endereco']\n email = AGENDA[contato]['email']\n\n arquivo.write('{},{},{},{}\\n'.format(contato, telefone, endereco, email)) \n print('>>>>>> Agenda exportada com sucesso!')\n except Exception as error:\n print('>>>>>> Um erro inesperado ocorreu: ', error)\n\ndef importar_contatos(nome_do_arquivo):\n try:\n with open(nome_do_arquivo, 'r') as arquivo:\n linhas = arquivo.readlines()\n for linha in linhas:\n detalhes = linha.strip().split(',')\n\n nome = detalhes[0]\n telefone = detalhes[1]\n email = detalhes[2]\n endereco = detalhes[3]\n\n incluir_editar_contato(nome, telefone, endereco, email)\n except FileNotFoundError:\n print('>>>>>> Arquivo nao encontrado!')\n except Exception as error:\n print('>>>>>> Um erro inesperado ocorreu: ', error) \n\ndef salvar():\n exportar_contatos('database.csv')\n\ndef carregar():\n try:\n with open('database.csv', 'r') as arquivo:\n linhas = arquivo.readlines()\n for linha in linhas:\n detalhes = linha.strip().split(',')\n\n nome = detalhes[0]\n telefone = detalhes[1]\n email = detalhes[2]\n endereco = detalhes[3]\n\n AGENDA[nome] = {\n 'telefone': telefone,\n 'endereco': endereco,\n 'email': email,\n }\n print('>>>>>> Database carregada com sucesso! \\n {} contatos carregados'.format(len(AGENDA))) \n except FileNotFoundError:\n print('>>>>>> Arquivo nao encontrado!')\n except Exception as error:\n print('>>>>>> Um erro inesperado ocorreu: ', error)\n\ndef imprimir_menu():\n print('------------------------------------')\n print('1 - Mostrar todos os contatos')\n print('2 - Buscar contato')\n print('3 - Incluir contato')\n print('4 - Editar contato')\n print('5 - Excluir contato')\n print('6 - Exportar contatos para CSV')\n print('7 - Importar contatos CSV')\n print('0 - Fechar agenda')\n print('------------------------------------')\n\n# Inicio do programa\ncarregar()\nwhile True:\n imprimir_menu()\n\n opcao = input('Digite uma opcao: ')\n if opcao == '1':\n mostrar_contatos()\n elif opcao == '2':\n contato = input('Digite o nome do contato: ')\n buscar_contato(contato)\n elif opcao == '3':\n contato = input('Digite o nome do contato: ')\n\n try:\n AGENDA[contato]\n print('>>>>>> Contato já existente')\n except KeyError: \n telefone, email, endereco = ler_detalhes_contato()\n incluir_editar_contato(contato, telefone, email, endereco)\n elif opcao == '4':\n contato = input('Digite o nome do contato: ')\n\n try:\n AGENDA[contato]\n print('>>>>>> Editando contato', contato)\n telefone, email, endereco = ler_detalhes_contato()\n incluir_editar_contato(contato, telefone, email, endereco)\n except KeyError: \n print('>>>>>> Contato nao existente')\n elif opcao == '5':\n contato = input('Digite o nome do contato: ')\n excluir_contato(contato)\n elif opcao == '6':\n nome_do_arquivo = input('Digite o nome do arquivo a ser exportado: ')\n exportar_contatos(nome_do_arquivo)\n elif opcao == '7':\n nome_do_arquivo = input('Digite o nome do arquivo a ser importado: ')\n importar_contatos(nome_do_arquivo)\n elif opcao == '0':\n print('>>>>>> Agenda fechada')\n break \n else:\n print('>>>>>> Opcao invalida')\n","repo_name":"sousa2323/agenda","sub_path":"agenda.py","file_name":"agenda.py","file_ext":"py","file_size_in_byte":5440,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"10658282428","text":"import os\nfrom config import ttable_url\n\n\ndef show_list_timetable():\n list_ttables = os.listdir('Timetable')\n dict_ttables = {}\n for _, v in enumerate(list_ttables):\n # print(f\"{v[:5]} {v.split('.')[0][6:]}\")\n dict_ttables[v[:5]] = v.split('.')[0][6:]\n # print(dict_ttables)\n return dict_ttables\n\n\n\ndef get_timetable(number_PK):\n tt_list = os.listdir('Timetable')\n # print(tt_list[0].split()[0])\n for _, v in enumerate(tt_list):\n if v.split()[0] == number_PK:\n # print(number_PK)\n tt_item = v.split('.')[0]\n tt_url = f'{ttable_url}/{v}'\n # print(tt_item, tt_url)\n return [tt_item, tt_url]\n\n\n # print(f\"Ваше ПК: {tt_item}\\n\\n\"\n # f\"Ваше расписание: {tt_url} (скачать)\\n\")\n\n\ndef main():\n show_list_timetable()\n # get_timetable(input('Выберите номер Вашего ПК: '))\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"EvgeniiPlus/tgBot_GrOIRO","sub_path":"timetable.py","file_name":"timetable.py","file_ext":"py","file_size_in_byte":960,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"43006172597","text":"def sumofn(n):\n a=[]\n s=0\n a.append(0)\n a.append(1)\n if(n==0):\n return a[0]\n elif(n==1):\n return a[1]\n else:\n for i in range(2,n+1):\n a.append(a[i-1]+a[i-2])\n s=sum(a)\n return s\nn=int(input())\nn%=60\nprint(sumofn(n)%10)","repo_name":"navneetsn18/Coursera-Algorithmic-Toolbox","sub_path":"Solution Week 2/program 6.py","file_name":"program 6.py","file_ext":"py","file_size_in_byte":281,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"9180218771","text":"from rest_framework import generics\nfrom rest_framework.response import Response\nfrom swachh_toilets.models import SwachhToilet\nfrom .authentication import CustomAuthentication\nfrom swachh_toilets.api.serializers import ToiletSerializer\nfrom .pagination import StandardResultsSetPageNumberPagination\n\n\nclass ToiletList(generics.ListAPIView):\n authentication_classes = (CustomAuthentication,)\n serializer_class = ToiletSerializer\n pagination_class = StandardResultsSetPageNumberPagination\n\n def get_queryset(self):\n coordinates = [\n float(self.request.GET.get('longitude')),\n float(self.request.GET.get('latitude'))\n ]\n\n \"\"\"\n from .authentication import Profile\n profile = Profile().get_user_profile(\n self.request,\n self.request.user['id']\n )\n coordinates = profile['location']['coordinates']\n \"\"\"\n queryset = SwachhToilet.objects(\n location__near=coordinates,\n location__max_distance=1000\n )\n q = self.request.query_params.get('q', None)\n if q is not None:\n queryset = queryset.filter(qci_id__icontains=q)\n return queryset\n\n def list(self, request):\n queryset = self.filter_queryset(self.get_queryset())\n\n page = self.paginate_queryset(queryset)\n if page is not None:\n serializer = self.get_serializer(page, many=True)\n return self.get_paginated_response(serializer.data)\n\n serializer = self.get_serializer(queryset, many=True)\n # result = [ x.values()[0] for x in serializer.data ]\n return Response(serializer.data)\n\n\nclass ToiletDetail(generics.RetrieveAPIView):\n lookup_field = 'id'\n serializer_class = ToiletSerializer\n queryset = SwachhToilet.objects.all()\n","repo_name":"samayamnag/swachh-toilets","sub_path":"swachh_toilets/api/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1871,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"72230595768","text":"import tempfile\nimport unittest\n\nimport aiostripe\nfrom aiostripe.test.helper import StripeResourceTest\n\n\nclass FileUploadTest(StripeResourceTest):\n async def test_create_file_upload(self):\n test_file = tempfile.TemporaryFile()\n\n await aiostripe.FileUpload.create(\n purpose='dispute_evidence',\n file=test_file\n )\n\n self.requestor_mock.request.assert_called_with('post', '/v1/files',\n params={\n 'purpose': 'dispute_evidence',\n 'file': test_file\n },\n headers={'Content-Type': 'multipart/form-data'})\n\n async def test_fetch_file_upload(self):\n await aiostripe.FileUpload.retrieve('fil_foo')\n\n self.requestor_mock.request.assert_called_with('get', '/v1/files/fil_foo',\n {}, None)\n\n async def test_list_file_uploads(self):\n await aiostripe.FileUpload.list()\n\n self.requestor_mock.request.assert_called_with('get', '/v1/files',\n {})\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"cypreess/aiostripe","sub_path":"aiostripe/test/resources/test_file_uploads.py","file_name":"test_file_uploads.py","file_ext":"py","file_size_in_byte":1348,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"21256946982","text":"import re, os\r\n\r\nAreaCodeRegex = re.compile(r'\\d\\d-\\d\\d\\d')\r\n\r\nAreaCodes=[]\r\n\r\nfor filename in os.listdir():\r\n if filename.endswith('txt'):\r\n file = open(filename)\r\n file = file.read()\r\n mo = AreaCodeRegex.findall(file)\r\n AreaCodes.append(mo)\r\n\r\nprint(AreaCodes)\r\n \r\n\r\n","repo_name":"martgrz/Automate-the-Boring-Stuff-with-Python-Projects-Solutions","sub_path":"Chapter 8 Regex Search.py","file_name":"Chapter 8 Regex Search.py","file_ext":"py","file_size_in_byte":306,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"42845447836","text":"import multiprocessing\nimport sys\nimport os\nfrom glob import glob\nfrom functools import partial\nsys.path.append(os.getcwd())\nimport fire\nimport matplotlib.pyplot as plt\nfrom tqdm import tqdm\n\ndef find_files_with_ext(search_folder, exts=['.JPG', '.jpg', '.png']):\n all_files = glob(search_folder + '**/**', recursive=True)\n bag = []\n if exts:\n for _ext in exts:\n bag += [file for file in all_files if file.endswith(_ext)]\n else:\n bag = all_files\n return bag\n\ndef get_all_input_files(source_dir, input_files_types=['.JPG', '.jpg', '.png']):\n \"\"\"Get the list of images files from the source directory\"\"\"\n return find_files_with_ext(source_dir, input_files_types)\n\ndef crop_and_save(cords, image, file_path):\n (x1, x2, y1, y2) = cords\n cropped_image = image[y1:y2, x1:x2]\n dest_file = file_path + \".png\"\n try:\n plt.imsave(dest_file, cropped_image, cmap='Greys_r')\n # print('Saved file to {}'.format(dest_file))\n except:\n print('>>>>>>>>>>>>>> Missed file to {}'.format(dest_file))\n\n\ndef crop_to_box(image_file_path, destination_dir, text_file_ext):\n try:\n gt_text_file_path = image_file_path.replace(\".jpg\", \".txt\")\n source_image_path = image_file_path\n file_name = source_image_path.split(\"/\")[-1].split(\".\")[0]\n if not os.path.exists(destination_dir):\n os.makedirs(destination_dir)\n # Open the text file and get all the coordinates\n with open(gt_text_file_path) as gt_txt_file_pointer:\n count = 0\n coords_data = []\n for line in gt_txt_file_pointer:\n coords_data.append(line.strip().split(\",\"))\n for gt_txt_line in coords_data:\n try:\n # print(gt_txt_line)\n jpgfile = plt.imread(source_image_path)\n # naming convention for the file\n out_file_path = destination_dir +\"/\" + file_name + \"_\" + str(count)\n\n # x1, y1, _, _, x2, y2, _, _, text = gt_txt_line\n x1 = gt_txt_line[0]\n y1 = gt_txt_line[1]\n x2 = gt_txt_line[4]\n y2 = gt_txt_line[5]\n text = \"\".join(gt_txt_line[8:])\n # call fun with cords and images named convention for the cropped image\n crop_and_save((int(x1), int(x2), int(y1), int(y2)), jpgfile, out_file_path) # (int(x1)-11, int(x2)+11, int(y1)-4, int(y2)+4\n count = count + 1\n with open(out_file_path+text_file_ext, \"w\") as fd:\n fd.write(text)\n except FileNotFoundError as fnf_error:\n print(\"error\", fnf_error)\n except Exception as e:\n print(e)\n print(image_file_path)\n\n\ndef prepare_calamari_dataset_from_icdar(in_path, out_path=\"cropped\", text_file_ext=\".gt.txt\"):\n in_files = get_all_input_files(source_dir=in_path)\n\n # pool = multiprocess.Pool()\n # for file in tqdm(in_files):\n # print(file)\n # crop_to_box(image_file_path=file, destination_dir=\"cropped\")\n\n crop_to_box_partial = partial(crop_to_box, destination_dir=out_path, text_file_ext=text_file_ext)\n\n with multiprocessing.Pool() as p:\n r = list(tqdm(p.imap(crop_to_box_partial, in_files), total=len(in_files)))\n # p.map(crop_to_box, in_files)\n # map list to target function\n # pool.map(task, multiprocess_list)\n\n p.close()\n p.join()\n\nif __name__ == '__main__':\n fire.Fire(prepare_calamari_dataset_from_icdar)","repo_name":"dhiraa/hyp-demo","sub_path":"icdar/calamari_dataset.py","file_name":"calamari_dataset.py","file_ext":"py","file_size_in_byte":3601,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"20503399927","text":"#! /usr/bin/env /usr/bin/python\n\n\"\"\"\n This program demonstrates how threading, processing and sequential call works for\nI/O bound tasks.\n\nEx:\n python performanceLOCALIO.py threading 15 localhost 25000\n python performanceLOCALIO.py sequential 15 localhost 25000\n python performanceLOCALIO.py processing 15 localhost 25000\n--------------------\nBefore running this program needs to run server program : \"perfserver.py\"\n\"\"\"\n\nimport urllib2\nimport time\nimport sys\nimport threading\nimport multiprocessing\nfrom socket import *\n\ndef opener(*args):\n conn = create_connection(('localhost', 25000))\n while True:\n buf = conn.recv(40)\n if not buf:\n break\n sys.stdout.write(buf)\n\n\n\nif __name__ == '__main__':\n\n if sys.argv[1] == \"sequential\":\n start = time.time()\n for i in range(int(sys.argv[2])):\n opener(sys.argv[3:])\n print(\"sequential took {0} sec\".format(time.time()-start))\n elif sys.argv[1] == \"threading\":\n threads = []\n\n for i in range(int(sys.argv[2])):\n threads.append(threading.Thread(target=opener, args=(sys.argv[3:],)))\n\n start = time.time()\n for trd in threads:\n trd.start()\n\n for trd in threads:\n trd.join()\n print(\"threading took {0} sec\".format(time.time()-start))\n elif sys.argv[1] == \"processing\":\n procs = []\n\n for i in range(int(sys.argv[2])):\n procs.append(multiprocessing.Process(target=opener, args=(sys.argv[3:],)))\n\n start = time.time()\n for proc in procs:\n proc.start()\n\n for proc in procs:\n proc.join()\n print(\"multiprocessing took {0} sec\".format(time.time()-start))\n","repo_name":"uday4a9/python","sub_path":"socket/performanceLOCALIO.py","file_name":"performanceLOCALIO.py","file_ext":"py","file_size_in_byte":1726,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"6468416324","text":"import socket\n# we first define the IP and port which are necessary arguments while creating a server or client socket\nHOST = \"192.168.56.1\"\nPORT = 9090\n# then we create the socket. This is done for both client and server sockets\nserver_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n# then we specify that the socket is a server socket using the following\nserver_socket.bind((HOST, PORT))\n# we can/should also specify the number of queued messages that can be kept waiting\nserver_socket.listen(5)\n# we have now successfully created a server socket above that's binded to a specific HOST and PORT\n# we can now proceed to make it available to accept messages from clients\n\nwhile True:\n connection_socket, address = server_socket.accept()\n # the accept() method produces a socket for accepting client messages and the address of the client\n print(f\"We have successfully connected to {address}\") # just to show that the connection took place\n # now we need to make the connection_socket receive messages a message from the client using the recv() method\n message = connection_socket.recv(1024).decode(\"utf-8\")\n # we have now created a message variable that stores the received message that comes with a maximum of 1024bytes,\n # and the bytes received are further decoded to readable string as a utf-8\n print(f\"The message received by the client is: \\n {message}\")\n # after receiving the message from the client, we would like to send a confirmation message to the client\n reply = input(\"Enter the reply message:\\n\")\n connection_socket.send(f\"{reply}\".encode(\"utf-8\"))\n print(\"The connection has ended after sending a confirmation message\")\n connection_socket.close()\n","repo_name":"ramogi4960/Hackerrank-challenges","sub_path":"Socket Programming/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":1716,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"42155591838","text":"from numpy import sin, cos, radians\n\nclass RotatingCamera:\n\tdef __init__(self, CAMRAD, mouse):\n\t\tpi3d=self.getpi3d()\n\t\tself.CAMERA = pi3d.Camera()\n\t\tself.CAMRAD = CAMRAD\n\t\tself.mouserot=0.0\n\t\tself.tilt=15.0\n\t\tself.frame=0\n\t\tself.omx,self.omy=mouse.position()\n\t\t\n\tdef update(self,mouse):\n\t\tself.mx,self.my= mouse.position()\n\t\tself.mouserot -= (self.mx - self.omx) * 0.2\n\t\tself.tilt -= (self.my - self.omy) * 0.1\n\t\tself.omx=self.mx\n\t\tself.omy=self.my\n\t\tself.CAMERA.reset()\n\t\tFIXED=True\n\t\tif(FIXED):\n\t\t\tself.CAMERA.rotate(0,0,0)\n\t\t\tself.CAMERA.position((0,0,-10))\n\t\telse:\n\t\t\tself.CAMERA.rotate(-self.tilt,self.mouserot,0)\n\t\t\tself.CAMERA.position((self.CAMRAD * sin(radians(self.mouserot)) * cos(radians(self.tilt)),\n\t\t\t\t\t\t\t\t self.CAMRAD * sin(radians(self.tilt)),\n\t\t\t\t\t\t\t\t -self.CAMRAD * cos(radians(self.mouserot)) * cos(radians(self.tilt))))\n\n\t@staticmethod\n\tdef getpi3d():\n\t\timport sys\n\t\tsys.path.insert(1, '/home/pi/pi3d')\n\t\timport pi3d\n\t\treturn pi3d\n","repo_name":"scottalmond/EscapeRoom","sub_path":"testing/rotatingCamera.py","file_name":"rotatingCamera.py","file_ext":"py","file_size_in_byte":954,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"74211980728","text":"#%%\nimport pandas as pd\nimport datetime\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom tensorflow import keras\nfrom keras.models import Sequential\nfrom keras.optimizers import Adam\nfrom keras import layers\nfrom copy import deepcopy\n\ndf = pd.read_csv('MSFT.csv')\ndf = df[['Date', 'Close']]\n\ndef str_to_datetime(s):\n split = s.split('-')\n year, month, day = int(split[0]), int(split[1]), int(split[2])\n return datetime.datetime(year=year, month=month, day=day)\n\ndf['Date'] = df['Date'].apply(str_to_datetime) # Apply the function to df['Date'] variable\ndf.index = df.pop('Date') # Replace index with date\n\n# plt.plot(df.index, df['Close'])\n\ndef df_to_windowed_df(dataframe, first_date_str, last_date_str, n=3):\n first_date = str_to_datetime(first_date_str)\n last_date = str_to_datetime(last_date_str)\n\n # target date for prediction \n target_date = first_date\n \n dates = []\n X, Y = [], []\n\n last_time = False\n while True:\n df_subset = dataframe.loc[:target_date].tail(n+1)\n \n if len(df_subset) != n+1:\n print(f'Error: Window of size {n} is too large for date {target_date}')\n return\n\n values = df_subset['Close'].to_numpy()\n x, y = values[:-1], values[-1]\n\n dates.append(target_date)\n X.append(x)\n Y.append(y)\n\n next_week = dataframe.loc[target_date:target_date+datetime.timedelta(days=7)]\n next_datetime_str = str(next_week.head(2).tail(1).index.values[0])\n next_date_str = next_datetime_str.split('T')[0]\n year_month_day = next_date_str.split('-')\n year, month, day = year_month_day\n next_date = datetime.datetime(day=int(day), month=int(month), year=int(year))\n \n if last_time:\n break\n \n target_date = next_date\n\n if target_date == last_date:\n last_time = True\n \n ret_df = pd.DataFrame({})\n ret_df['Target Date'] = dates\n \n X = np.array(X)\n for i in range(0, n):\n X[:, i]\n ret_df[f'Target-{n-i}'] = X[:, i]\n \n ret_df['Target'] = Y\n\n return ret_df\n\n# Start day second time around: '2021-03-25'\nwindowed_df = df_to_windowed_df(df, \n '2021-03-25', \n '2022-03-23', \n n=3)\n\n# Convert into numpy array to fit into tensorflow model\ndef windowed_df_to_date_X_y(windowed_dataframe):\n df_as_np = windowed_dataframe.to_numpy()\n \n dates = df_as_np[:, 0]\n # Values of Target-3 ~ Target-1\n middle_matrix = df_as_np[:, 1:-1]\n X = middle_matrix.reshape((len(dates), middle_matrix.shape[1], 1))\n \n Y = df_as_np[:, -1]\n \n return dates, X.astype(np.float32), Y.astype(np.float32)\n\ndates, X, y = windowed_df_to_date_X_y(windowed_df)\n\n# Training data\nq_80 = int(len(dates) * .8)\nq_90 = int(len(dates) * .9)\ndates_train, X_train, y_train = dates[:q_80], X[:q_80], y[:q_80] # Training data: 0~80% (80%)\ndates_val, X_val, y_val = dates[q_80:q_90], X[q_80:q_90], y[q_80:q_90] # Validation data: 80~90% (10%)\ndates_test, X_test, y_test = dates[q_90:], X[q_90:], y[q_90:] # Testing data: 90~100% (10%)\n\n\"\"\"\nplt.plot(dates_train, y_train)\nplt.plot(dates_val, y_val)\nplt.plot(dates_test, y_test)\n\nplt.legend(['Train', 'Validation', 'Test'])\n\"\"\"\n\n# Train the model\nmodel = Sequential([layers.Input((3, 1)), # Input: 3 past days, 1 feature\n layers.LSTM(64), # Arbitrary value of neuron\n layers.Dense(32, activation='relu'), \n layers.Dense(32, activation='relu'),\n layers.Dense(1)])\n\nmodel.compile(loss='mse', # Loss function to minimize: mean squared error\n optimizer=Adam(learning_rate=0.001),\n metrics=['mean_absolute_error']) # Average error\n\nmodel.fit(X_train, y_train, validation_data=(X_val, y_val), epochs=100)\n\n# Prediction model with training data\ntrain_predictions = model.predict(X_train).flatten()\n# Prediction model with validation data\nval_predictions = model.predict(X_val).flatten()\n# Prediction model with testing data\ntest_predictions = model.predict(X_test).flatten()\n\n# Plot\nplt.plot(dates_train, train_predictions) # Predicted data using past 3 days\nplt.plot(dates_train, y_train) # Actual observation\nplt.plot(dates_val, val_predictions)\nplt.plot(dates_val, y_val)\nplt.plot(dates_test, test_predictions)\nplt.plot(dates_test, y_test)\nplt.legend(['Training Predictions', \n 'Training Observations',\n 'Validation Predictions', \n 'Validation Observations',\n 'Testing Predictions', \n 'Testing Observations'])\n# %%\nrecursive_predictions = []\nrecursive_dates = np.concatenate([dates_val, dates_test])\n\nfor target_date in recursive_dates:\n last_window = deepcopy(X_train[-1])\n next_prediction = model.predict(np.array([last_window])).flatten()\n recursive_predictions.append(next_prediction)\n last_window[-1] = next_prediction\n \nplt.plot(dates_train, train_predictions)\nplt.plot(dates_train, y_train)\nplt.plot(dates_val, val_predictions)\nplt.plot(dates_val, y_val)\nplt.plot(dates_test, test_predictions)\nplt.plot(dates_test, y_test)\nplt.plot(recursive_dates, recursive_predictions)\nplt.legend(['Training Predictions', \n 'Training Observations',\n 'Validation Predictions', \n 'Validation Observations',\n 'Testing Predictions', \n 'Testing Observations',\n 'Recursive Predictions'])","repo_name":"hyunji0618/StockPredictionPy","sub_path":"forecast.py","file_name":"forecast.py","file_ext":"py","file_size_in_byte":5471,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"8367693580","text":"import threading\nimport tkinter as tk\nfrom tkinter import ttk\n\nfrom PIL import ImageTk, Image, ImageEnhance\n\n\nclass LoadModelWin(tk.Tk):\n\n def __init__(self):\n tk.Tk.__init__(self)\n\n self.title(\"Tree classifier\")\n self.lift()\n self.eval('tk::PlaceWindow . center')\n\n self.master_frame = tk.Frame(self)\n self.master_frame.rowconfigure(0, minsize=80, weight=1)\n self.master_frame.columnconfigure([0, 1, 2], minsize=80, weight=1)\n self.y_pad = 10\n\n self.frame = tk.Frame(master=self.master_frame, relief=tk.RAISED)\n self.frame.grid(row=0, column=1, sticky=\"\")\n\n self.label = tk.Label(master=self.frame, text=\"Loading models, please wait.\")\n self.label.grid(row=4, column=1, pady=self.y_pad)\n\n self.progress_bar = ttk.Progressbar(master=self.frame, orient=\"horizontal\", length=150, mode=\"indeterminate\")\n self.progress_bar.start(10)\n self.progress_bar.grid(row=5, column=1, pady=10)\n\n self.frame.grid()\n self.master_frame.grid()\n\n\nclass ZoomedOutWin(tk.Toplevel):\n\n def __init__(self, master, pil_img):\n tk.Toplevel.__init__(self, master=master)\n self.resizable(False, False)\n self.title(\"Zoomed out image - Click to close\")\n\n width, height = pil_img.size\n\n self.geometry(str(width) + \"x\" + str(height))\n\n tk_img = ImageTk.PhotoImage(pil_img)\n img = tk.Label(self, image=tk_img)\n img.image = tk_img\n\n btn0 = tk.Button(self, image=tk_img, command=self._close_window)\n btn0.image = tk_img\n btn0.place(x=0, y=0)\n\n def _close_window(self):\n self.destroy()\n\n\nclass TreeWin(tk.Tk):\n\n def __init__(self, class_name, tree_list):\n tk.Tk.__init__(self)\n\n self.tree_list = tree_list\n self.class_name = class_name\n self.remove_count = 0\n\n self.img_size = 200\n\n self.columnconfigure(0, weight=1)\n self.rowconfigure(0, weight=1)\n self.widget_list = []\n self.lift()\n\n self.master_frame = tk.Frame(self)\n self.master_frame.grid(sticky=tk.NSEW)\n self.master_frame.columnconfigure(0, weight=1)\n self.master_frame.rowconfigure(0, weight=1)\n\n self.label = tk.Label(master=self.master_frame, text=\"Remove trees that are not: \"\n + class_name + \" | Images left:\"\n + str(len(self.tree_list)))\n self.label.config(font=(\"\", 30))\n self.label.grid(row=0, column=0)\n\n # Create a frame for the canvas and scrollbar(s).\n self.scrollbar_frame = tk.Frame(self.master_frame)\n self.scrollbar_frame.grid(column=0, sticky=tk.NSEW)\n self.scrollbar_frame.columnconfigure(0, weight=1)\n self.scrollbar_frame.rowconfigure(0, weight=1)\n\n # Add a canvas in that frame.\n self.canvas = tk.Canvas(self.scrollbar_frame)\n self.canvas.grid(sticky=tk.NSEW)\n self.canvas.columnconfigure([0, 1, 2], weight=1)\n\n # Create a vertical scrollbar linked to the canvas.\n self.scrollbar = tk.Scrollbar(self.scrollbar_frame, orient=tk.VERTICAL, command=self.canvas.yview)\n self.scrollbar.grid(row=0, column=2, sticky=tk.NSEW)\n self.canvas.configure(yscrollcommand=self.scrollbar.set)\n\n self.buttons_frame = tk.Frame(self.canvas, bd=160)\n self._load_images()\n\n self.canvas.create_window((0, 0), window=self.buttons_frame, anchor=tk.NW)\n\n # Needed to make bbox info available.\n self.buttons_frame.update_idletasks()\n\n # Get bounding box of canvas with Buttons.\n self.bbox = self.canvas.bbox(tk.ALL)\n self.canvas.configure(scrollregion=self.bbox,\n width=self.winfo_screenwidth(),\n height=self.winfo_screenheight() - 100)\n\n def _zoom_out(self, tree):\n \"\"\"\n Create a zoomed out window for a selected tree.\n The tree will also be highlighted for easy identification.\n @param tree: Tree to be zoomed out.\n \"\"\"\n\n image_path = tree[0]\n tree_data = tree[1]\n x_min, y_min, x_max, y_max = tree_data[0], tree_data[1], tree_data[2], tree_data[3]\n\n # Open image and mark the tree.\n main_pil_image = Image.open(image_path)\n tree_img = main_pil_image.crop((x_min, y_min, x_max, y_max))\n main_pil_image = ImageEnhance.Brightness(main_pil_image).enhance(0.5)\n main_pil_image.paste(tree_img, (x_min, y_min, x_max, y_max))\n\n width, height = main_pil_image.size\n\n enlarge = 200\n plus_x = 0\n plus_y = 0\n\n # X-corrections\n if x_min - enlarge < 0:\n plus_x = enlarge - x_min\n x_min = 0\n else:\n x_min -= 200\n\n if x_max + enlarge > width:\n minus_x = (x_max + enlarge) - width\n x_max = width + plus_x\n x_min -= minus_x\n else:\n x_max += enlarge + plus_x\n\n # Y-corrections.\n if y_min - enlarge < 0:\n plus_y = enlarge - y_min\n y_min = 0\n else:\n y_min -= enlarge\n\n if y_max + enlarge > height:\n minus_y = (y_max + enlarge) - height\n y_max = height + plus_y\n y_min -= minus_y\n else:\n y_max += enlarge + plus_y\n\n # Create a zoomed out square image.\n zoom_out_img = main_pil_image.crop((x_min, y_min, x_max, y_max)).resize((500, 500))\n ZoomedOutWin(master=self, pil_img=zoom_out_img)\n\n def _load_images(self):\n \"\"\"\n Loads all the images into the grid and adds the logic for removal\n of it graphically and from the tree_list.\n \"\"\"\n self.buttons_frame.grid_forget()\n\n row_index = 1\n column_index = 0\n total_trees = 0\n btn_count = 0\n btn_list = []\n\n pil_img_cache = dict()\n\n for row in self.tree_list:\n\n image_path = row[0]\n tree_row = row[1]\n accuracy = tree_row[5]\n\n # Skip de-classed trees.\n if tree_row[4] != self.class_name:\n continue\n\n # Add open pil-image to cache for loading.\n if image_path not in pil_img_cache:\n pil_img_cache[image_path] = Image.open(image_path)\n\n min_x, min_y, max_x, max_y = tree_row[0], tree_row[1], tree_row[2], tree_row[3]\n\n # Image.\n main_pil_image = pil_img_cache[image_path]\n pil_img = main_pil_image.crop((min_x, min_y, max_x, max_y)).resize((self.img_size, self.img_size))\n img = ImageTk.PhotoImage(pil_img)\n img_frame = tk.Frame(self.buttons_frame)\n img_frame.grid(row=row_index, column=column_index, sticky=tk.NSEW, padx=0, pady=10)\n\n # Button.\n button = tk.Button(img_frame,\n relief=tk.RIDGE,\n image=img,\n width=self.img_size,\n height=self.img_size,\n command=lambda index=btn_count: self._remove_class(self.tree_list[index]))\n button.image = img\n button.grid(row=0, column=0, sticky=tk.NSEW)\n text = tk.Label(master=img_frame, text=accuracy)\n text.grid(row=1)\n\n img_button = tk.Button(img_frame, text=\"Zoom out\",\n command=lambda index=btn_count: self._zoom_out(self.tree_list[index]))\n img_button.grid(row=2)\n\n btn_list.append(img_button)\n\n column_index += 1\n btn_count += 1\n self.widget_list.append(img_frame)\n\n total_trees += 1\n\n # Rows with 8 trees.\n if column_index % 8 == 0:\n column_index = 0\n row_index += 1\n\n if total_trees == 16:\n break\n\n def _remove_class(self, tree):\n \"\"\"\n Removes the class from a selected tree and simply labels it as a \"tree\".\n Also updates the tree count in the window.\n\n Format of tree: ['img_path', [0, 392, 50, 479, 'Birch', 75.61432123184204]]\n\n @param tree: Selected tree to be labeled as \"tree\".\n \"\"\"\n\n tree[1][4] = \"Tree\"\n\n self.tree_list.remove(tree)\n self.label.config(text=\"Remove trees that are not: \"\n + self.class_name + \" | Images left:\"\n + str(len(self.tree_list)))\n\n # Close window when empty.\n if len(self.tree_list) == 0:\n self.destroy()\n return\n\n for widget in self.buttons_frame.winfo_children():\n widget.grid_forget()\n\n self._load_images()\n","repo_name":"jonaselmesten/forest-classifier","sub_path":"gui/window.py","file_name":"window.py","file_ext":"py","file_size_in_byte":8810,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"30139208627","text":"import socket\nfrom _thread import *\nimport threading\nimport hashlib\nimport os\nimport time\nfrom datetime import date\nfrom datetime import datetime\n\nbuff_size = 1024\n\nport = 8080\n\nconexiones_esperadas = 1\n\nhost = '0.0.0.0'\n\nprint_lock = threading.Lock()\n\nesperando = True\n\narch1 = 'arch1.txt'\n\narch2 = 'arch2.mp4'\n\nclientes = []\n\n#funcion que corre en paralelo para interactuar con los clientes\ndef thread(conn, i):\n ehlo = conn.recv(1024).decode('ascii')\n print('El cliente', i, 'se conecto')\n print(ehlo)\n\n log = open ('log.txt','a')\n log.write('\\n'+time.ctime(time.time()))\n \n\n conn.send('Que archivo desea?'.encode('ascii'))\n\n numero_archivo = int.from_bytes(conn.recv(1),'big')\n\n \n\n #espera a que todos los clientes esten listos para enviar el archivo\n while True:\n if esperando:\n print('Esperando el numero de clientes')\n else:\n break\n\n tiempo_inicio=time.time()\n\n\n if numero_archivo == 1:\n conn.send(len(arch1).to_bytes(4,'big'))\n conn.send(arch1.encode('ascii')) #envia nombre del archivo\n conn.send(os.path.getsize(arch1).to_bytes(8,'big')) # envia tamaño del archivo\n log.write('\\n'+str(arch1)+'-'+str(os.path.getsize(arch1)))\n f = open(arch1, 'rb')\n\n if numero_archivo == 2:\n conn.send(len(arch2).to_bytes(4,'big'))\n conn.send(arch2.encode('ascii')) #envia nombre del archivo\n conn.send(os.path.getsize(arch2).to_bytes(8,'big')) # envia tamaño del archivo\n log.write('\\n'+str(arch2)+'-'+str(os.path.getsize(arch2)))\n f = open(arch2, 'rb')\n \n conn.recv(1024)\n\n \n\n # objeto hash\n h = hashlib.sha1()\n\n #envia segmentos del archivo\n seg = f.read(buff_size)\n while seg:\n conn.send(seg)\n h.update(seg) #actualiza el hash\n seg = f.read(buff_size)\n\n tiempo_final = round(time.time() - tiempo_inicio,5)\n time.sleep(1)\n f.close()\n\n print('Se envio el archivo al cliente: ', i)\n\n hash_archivo = h.hexdigest()\n conn.send(hash_archivo.encode('ascii'))\n print('se envio el hash del archivo al cliente: ', i)\n print(hash_archivo)\n \n log.write('\\n cliente '+clientes[i-1]+' en tiempo '+str(tiempo_final)+' seg')\n \n print(\"El tiempo transcurrido fue: \"+ str(tiempo_final)+\" segundos\")\n \n log.close()\n\n conn.close()\n\n\ndef receive_connections():\n conexiones = 0\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n s.bind((host, port))\n s.listen(5)\n print('Socket esta escuchando en el puerto: ' + str(port))\n\n while True:\n c, addr = s.accept()\n conexiones += 1\n print('Conectado con el cliente en: ', addr[0], ':', addr[1])\n clientes.append(str(conexiones)+'/'+str(addr[0]))\n start_new_thread(thread, (c,conexiones))\n\n #avisa que ya hay la cantidad de clientes necesarios para enviar el archivo\n global esperando\n esperando = conexiones < conexiones_esperadas\n \n s.close()\n\n\nif __name__ == '__main__':\n receive_connections()\n\n","repo_name":"mitooos/Lab3Redes","sub_path":"servidor/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3048,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"21717649838","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Oct 10 01:19:07 2020\n\n@author: John\n\"\"\"\nimport os\nimport pdb\nimport votesim\nimport logging\nimport numpy as np\nfrom votesim.benchmarks import tactical\n\nnewdir = 'output'\nnewdir = os.path.join(os.getcwd(), newdir)\nos.makedirs(newdir, exist_ok=True)\nos.chdir(newdir)\n\n\ndef test_model():\n methods = ['plurality', 'irv', 'score']\n name = 'tactical-model-1'\n e = tactical.tactical_model(name, methods, seed=10)\n df = e.dataframe()\n return df\n\n\ndef test_benchmark():\n methods = ['plurality', 'irv', 'score']\n benchmark = tactical.tactical_dummy()\n df = benchmark.run(methods, cpus=1,)\n \n # test re-run capability\n e2 = benchmark.rerun(index=0, df=df)\n \n # Check to make sure outputs of re-run are the same. \n s1 = df.loc[0]\n s2 = e2.dataseries()\n for key in s2.keys():\n print(key, '=', s1[key])\n assert np.all(s1[key] == s2[key])\n \n return df\n\n\nif __name__ == '__main__':\n df1 = test_model()\n logging.basicConfig()\n logger = logging.getLogger('votesim.utilities.recorder')\n logger.setLevel(logging.DEBUG)\n \n # try:\n df2 = test_benchmark()\n # except Exception:\n # pdb.post_mortem()\n \n","repo_name":"johnh865/election_sim","sub_path":"votesim/benchmarks/tests/test_benchmark_tactical.py","file_name":"test_benchmark_tactical.py","file_ext":"py","file_size_in_byte":1221,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"77"} +{"seq_id":"42405656932","text":"from minisom import MiniSom, fast_norm\nfrom numpy import (array, unravel_index, nditer, linalg, random, subtract, max,\n power, exp, pi, zeros, ones, arange, outer, meshgrid, dot,\n logical_and, mean, std, cov, argsort, linspace, transpose,\n einsum, prod, nan, sqrt, hstack, diff, argmin, multiply,\n nanmean, nansum)\nfrom numpy.linalg import norm\nfrom warnings import warn\nfrom minisom import asymptotic_decay, _build_iteration_indexes\n\n\n\nclass MiniSom3D(MiniSom):\n def __init__(self, x, y, z, input_len, sigma=1.0, learning_rate=0.5, decay_function=asymptotic_decay,\n neighborhood_function='gaussian', topology='rectangular', activation_distance='euclidean',\n random_seed=None):\n \"\"\"Initializes a Self Organizing Maps.\n\n A rule of thumb to set the size of the grid for a dimensionality\n reduction task is that it should contain 5*sqrt(N) neurons\n where N is the number of samples in the dataset to analyze.\n\n E.g. if your dataset has 150 samples, 5*sqrt(150) = 61.23\n hence a map 8-by-8 should perform well.\n\n Parameters\n ----------\n x : int\n x dimension of the SOM.\n\n y : int\n y dimension of the SOM.\n\n input_len : int\n Number of the elements of the vectors in input.\n\n sigma : float, optional (default=1.0)\n Spread of the neighborhood function, needs to be adequate\n to the dimensions of the map.\n (at the iteration t we have sigma(t) = sigma / (1 + t/T)\n where T is #num_iteration/2)\n learning_rate : initial learning rate\n (at the iteration t we have\n learning_rate(t) = learning_rate / (1 + t/T)\n where T is #num_iteration/2)\n\n decay_function : function (default=None)\n Function that reduces learning_rate and sigma at each iteration\n the default function is:\n learning_rate / (1+t/(max_iterarations/2))\n\n A custom decay function will need to to take in input\n three parameters in the following order:\n\n 1. learning rate\n 2. current iteration\n 3. maximum number of iterations allowed\n\n\n Note that if a lambda function is used to define the decay\n MiniSom will not be pickable anymore.\n\n neighborhood_function : string, optional (default='gaussian')\n Function that weights the neighborhood of a position in the map.\n Possible values: 'gaussian', 'mexican_hat', 'bubble', 'triangle'\n\n topology : string, optional (default='rectangular')\n Topology of the map.\n Possible values: 'rectangular', 'hexagonal'\n\n activation_distance : string, callable optional (default='euclidean')\n Distance used to activate the map.\n Possible values: 'euclidean', 'cosine', 'manhattan', 'chebyshev'\n\n Example of callable that can be passed:\n\n def euclidean(x, w):\n return linalg.norm(subtract(x, w), axis=-1)\n\n random_seed : int, optional (default=None)\n Random seed to use.\n \"\"\"\n\n super(MiniSom3D, self).__init__(x, y, input_len, sigma, learning_rate, decay_function, neighborhood_function, topology,\n activation_distance, random_seed)\n self.shape = (x, y, z)\n\n if sigma >= x or sigma >= y or sigma >= z:\n warn('Warning: sigma is too high for the dimension of the map.')\n\n self._random_generator = random.RandomState(random_seed)\n\n self._learning_rate = learning_rate\n self._sigma = sigma\n self._input_len = input_len\n # random initialization\n\n self._weights = self._random_generator.rand(x, y, z, input_len) * 2 - 1\n\n\n self._weights /= linalg.norm(self._weights, axis=-1, keepdims=True)\n\n\n self._activation_map = zeros((x, y, z))\n\n self._neigx = arange(x)\n self._neigy = arange(y) # used to evaluate the neighborhood function\n self._neigz = arange(z) # used to evaluate the neighborhood function\n\n if topology not in ['hexagonal', 'rectangular']:\n msg = '%s not supported only hexagonal and rectangular available'\n raise ValueError(msg % topology)\n self.topology = topology\n\n self._xx, self._yy, self._zz = meshgrid(self._neigx, self._neigy, self._neigz)\n\n self._xx = self._xx.astype(float)\n self._yy = self._yy.astype(float)\n\n self._zz = self._zz.astype(float)\n\n if topology == 'hexagonal':\n self._xx[::-2] -= 0.5\n if neighborhood_function in ['triangle']:\n warn('triangle neighborhood function does not ' +\n 'take in account hexagonal topology')\n\n self._decay_function = decay_function\n\n\n neig_functions = {'gaussian': self._gaussian3D,\n 'mexican_hat': self._mexican_hat3D,\n 'bubble': self._bubble3D,\n 'triangle': self._triangle3D\n }\n\n if neighborhood_function not in neig_functions:\n msg = '%s not supported. Functions available: %s'\n raise ValueError(msg % (neighborhood_function,\n ', '.join(neig_functions.keys())))\n\n if neighborhood_function in ['triangle',\n 'bubble'] and (divmod(sigma, 1)[1] != 0\n or sigma < 1):\n warn('sigma should be an integer >=1 when triangle or bubble' +\n 'are used as neighborhood function')\n\n self.neighborhood = neig_functions[neighborhood_function]\n\n distance_functions = {'euclidean': self._euclidean_distance,\n 'cosine': self._cosine_distance,\n 'manhattan': self._manhattan_distance,\n 'chebyshev': self._chebyshev_distance}\n\n if isinstance(activation_distance, str):\n if activation_distance not in distance_functions:\n msg = '%s not supported. Distances available: %s'\n raise ValueError(msg % (activation_distance,\n ', '.join(distance_functions.keys())))\n\n self._activation_distance = distance_functions[activation_distance]\n elif callable(activation_distance):\n self._activation_distance = activation_distance\n\n\n def _gaussian3D(self, c, sigma):\n \"\"\"Returns a Gaussian centered in c.\"\"\"\n d = 2 * sigma * sigma\n ax = exp(-power(self._xx - self._xx.T[c], 2) / d)\n ay = exp(-power(self._yy - self._yy.T[c], 2) / d)\n az = exp(-power(self._zz - self._zz.T[c], 2) / d)\n return (ax * ay * az).T # the external product gives a matrix\n\n def _mexican_hat3D(self, c, sigma):\n \"\"\"Mexican hat centered in c.\"\"\"\n p = power(self._xx-self._xx.T[c], 2) + power(self._yy-self._yy.T[c], 2) + power(self._zz-self._zz.T[c], 2)\n d = 2*sigma*sigma\n return (exp(-p/d)*(1-2/d*p)).T\n\n def _bubble3D(self, c, sigma):\n \"\"\"Constant function centered in c with spread sigma.\n sigma should be an odd value.\n \"\"\"\n ax = logical_and(self._neigx > c[0]-sigma,\n self._neigx < c[0]+sigma)\n ay = logical_and(self._neigy > c[1]-sigma,\n self._neigy < c[1]+sigma)\n az = logical_and(self._neigz > c[2]-sigma,\n self._neigz < c[2]+sigma)\n return outer(outer(ax, ay), az)*1.\n\n def _triangle3D(self, c, sigma):\n \"\"\"Triangular function centered in c with spread sigma.\"\"\"\n triangle_x = (-abs(c[0] - self._neigx)) + sigma\n triangle_y = (-abs(c[1] - self._neigy)) + sigma\n triangle_z = (-abs(c[1] - self._neigz)) + sigma\n triangle_x[triangle_x < 0] = 0.\n triangle_y[triangle_y < 0] = 0.\n triangle_z[triangle_z < 0] = 0.\n inter = outer(triangle_x, triangle_y)\n return outer(inter, triangle_z)\n\n def pca_weights_init(self, data):\n \"\"\"Initializes the weights to span the first two principal components.\n\n This initialization doesn't depend on random processes and\n makes the training process converge faster.\n\n It is strongly reccomended to normalize the data before initializing\n the weights and use the same normalization for the training data.\n \"\"\"\n if self._input_len == 1:\n msg = 'The data needs at least 2 features for pca initialization'\n raise ValueError(msg)\n self._check_input_len(data)\n if len(self._neigx) == 1 or len(self._neigy) == 1 or len(self._neigz) == 1:\n msg = 'PCA initialization inappropriate:' + \\\n 'One of the dimensions of the map is 1.'\n warn(msg)\n pc_length, pc = linalg.eig(cov(transpose(data)))\n pc_order = argsort(-pc_length)\n for i, c1 in enumerate(linspace(-1, 1, len(self._neigx))):\n for j, c2 in enumerate(linspace(-1, 1, len(self._neigy))):\n for k, c3 in enumerate(linspace(-1, 1, len(self._neigz))):\n self._weights[i, j, k] = c1*pc[pc_order[0]] + c2*pc[pc_order[1]] + c3*pc[pc_order[2]]\n\n def update(self, x, win, t, max_iteration):\n \"\"\"Updates the weights of the neurons.\n\n Parameters\n ----------\n x : np.array\n Current pattern to learn.\n win : tuple\n Position of the winning neuron for x (array or tuple).\n t : int\n Iteration index\n max_iteration : int\n Maximum number of training itarations.\n \"\"\"\n eta = self._decay_function(self._learning_rate, t, max_iteration)\n # sigma and learning rate decrease with the same rule\n sig = self._decay_function(self._sigma, t, max_iteration)\n # improves the performances\n g = self.neighborhood(win, sig) * eta\n # w_new = eta * neighborhood_function * (x-w)\n self._weights += einsum('ijk, ijkl->ijkl', g, x - self._weights)\n\n def train(self, data, num_iteration, random_order=False, verbose=False):\n \"\"\"Trains the SOM.\n\n Parameters\n ----------\n data : np.array or list\n Data matrix.\n\n num_iteration : int\n Maximum number of iterations (one iteration per sample).\n random_order : bool (default=False)\n If True, samples are picked in random order.\n Otherwise the samples are picked sequentially.\n\n verbose : bool (default=False)\n If True the status of the training\n will be printed at each iteration.\n \"\"\"\n self._check_iteration_number(num_iteration)\n self._check_input_len(data)\n random_generator = None\n if random_order:\n random_generator = self._random_generator\n iterations = _build_iteration_indexes(len(data), num_iteration,\n verbose, random_generator)\n for t, iteration in enumerate(iterations):\n self.update(data[iteration], self.winner(data[iteration]),\n t, num_iteration)\n if verbose:\n print('\\n quantization error:', self.quantization_error(data))\n print('\\n topographic error:', self.topographic_error(data))\n\n def get_euclidean_coordinates(self):\n \"\"\"Returns the position of the neurons on an euclidean\n plane that reflects the chosen topology in two meshgrids xx and yy.\n Neuron with map coordinates (1, 4) has coordinate (xx[1, 4], yy[1, 4])\n in the euclidean plane.\n\n Only useful if the topology chosen is not rectangular.\n \"\"\"\n return self._xx.T, self._yy.T, self._zz.T\n\n def convert_map_to_euclidean(self, xyz):\n \"\"\"Converts map coordinates into euclidean coordinates\n that reflects the chosen topology.\n\n Only useful if the topology chosen is not rectangular.\n \"\"\"\n return self._xx.T[xyz], self._yy.T[xyz], self._zz.T[xyz]\n\n def distance_map(self, scaling='sum'):\n \"\"\"Returns the distance map of the weights.\n If scaling is 'sum' (default), each cell is the normalised sum of\n the distances between a neuron and its neighbours. Note that this\n method uses the euclidean distance.\n\n Parameters\n ----------\n scaling : string (default='sum')\n If set to 'mean', each cell will be the normalized\n by the average of the distances of the neighbours.\n If set to 'sum', the normalization is done\n by the sum of the distances.\n \"\"\"\n\n if scaling not in ['sum', 'mean']:\n raise ValueError('scaling should be either \"sum\" or \"mean\" ('\n '\"{scaling}\" not valid)')\n\n um = nan * zeros((self._weights.shape[0],\n self._weights.shape[1],\n self._weights.shape[2],\n 26)) # 2 spots more for hexagonal topology\n\n # ii = [[0, -1, -1, -1, 0, 1, 1, 1]] * 2\n # jj = [[-1, -1, 0, 1, 1, 1, 0, -1]] * 2\n\n ii = [[0, -1, -1, -1, 0, 1, 1, 1, 0,\n 0, -1, -1, -1, 0, 1, 1, 1,\n 0, -1, -1, -1, 0, 1, 1, 1, 0]]*2\n jj = [[-1, -1, 0, 1, 1, 1, 0, -1, 0\n -1, -1, 0, 1, 1, 1, 0, -1,\n -1, -1, 0, 1, 1, 1, 0, -1, 0]]*2\n kk = [[ -1, -1, -1, -1, -1, -1, -1, -1, -1,\n 0, 0, 0, 0, 0, 0, 0, 0,\n 1, 1, 1, 1, 1, 1, 1, 1, 1]] * 2\n\n if self.topology == 'hexagonal': # HAS TO BE REMADE for 3D\n ii = [[1, 1, 1, 0, -1, 0], [0, 1, 0, -1, -1, -1]]\n jj = [[1, 0, -1, -1, 0, 1], [1, 0, -1, -1, 0, 1]]\n\n for x in range(self._weights.shape[0]):\n for y in range(self._weights.shape[1]):\n for z in range(self._weights.shape[2]):\n w_2 = self._weights[x, y, z]\n e = y % 2 == 0 # only used on hexagonal topology\n for a, (i, j, k) in enumerate(zip(ii[e], jj[e], kk[e])):\n if (x+i >= 0 and x+i < self._weights.shape[0]\n and y+j >= 0 and y+j < self._weights.shape[1]\n and z+k >= 0 and z+k < self._weights.shape[2]):\n w_1 = self._weights[x+i, y+j, z+k]\n um[x, y, z, a] = fast_norm(w_2-w_1)\n\n if scaling == 'mean':\n um = nanmean(um, axis=3)\n if scaling == 'sum':\n um = nansum(um, axis=3)\n\n return um/um.max()\n\n def activation_response(self, data):\n \"\"\"\n Returns a matrix where the element i,j is the number of times\n that the neuron i,j have been winner.\n \"\"\"\n self._check_input_len(data)\n a = zeros((self._weights.shape[0], self._weights.shape[1], self._weights.shape[2]))\n for x in data:\n a[self.winner(x)] += 1\n return a\n\n def _distance_from_weights(self, data):\n \"\"\"Returns a matrix d where d[i,j] is the euclidean distance between\n data[i] and the j-th weight.\n \"\"\"\n input_data = array(data)\n weights_flat = self._weights.reshape(-1, self._weights.shape[-1])\n input_data_sq = power(input_data, 2).sum(axis=1, keepdims=True)\n weights_flat_sq = power(weights_flat, 2).sum(axis=1, keepdims=True)\n cross_term = dot(input_data, weights_flat.T)\n return sqrt(-2 * cross_term + input_data_sq + weights_flat_sq.T)\n\n\n def quantization(self, data):\n \"\"\"Assigns a code book (weights vector of the winning neuron)\n to each sample in data.\"\"\"\n self._check_input_len(data)\n winners_coords = argmin(self._distance_from_weights(data), axis=1)\n return self._weights[unravel_index(winners_coords, self._weights.shape[:-1])]\n\n def topographic_error(self, data):\n \"\"\"Returns the topographic error computed by finding\n the best-matching and second-best-matching neuron in the map\n for each input and then evaluating the positions.\n\n A sample for which these two nodes are not adjacent counts as\n an error. The topographic error is given by the\n the total number of errors divided by the total of samples.\n\n If the topographic error is 0, no error occurred.\n If 1, the topology was not preserved for any of the samples.\"\"\"\n self._check_input_len(data)\n if self.topology == 'hexagonal':\n msg = 'Topographic error not implemented for hexagonal topology.'\n raise NotImplementedError(msg)\n total_neurons = prod(self._activation_map.shape)\n if total_neurons == 1:\n warn('The topographic error is not defined for a 1-by-1 map.')\n return nan\n\n t = 1.42\n # b2mu: best 2 matching units\n b2mu_inds = argsort(self._distance_from_weights(data), axis=1)[:, :2]\n b2my_xyz = unravel_index(b2mu_inds, self._weights.shape[:-1])\n b2mu_x, b2mu_y, b2mu_z = b2my_xyz[0], b2my_xyz[1], b2my_xyz[2]\n dxdydz = hstack([diff(b2mu_x), diff(b2mu_y), diff(b2mu_z)])\n distance = norm(dxdydz, axis=1) # matrix or vector norm, might not work for nd array\n return (distance > t).mean()","repo_name":"moisaoana/symbolic_analysis","sub_path":"Som_Code/Minisom3D.py","file_name":"Minisom3D.py","file_ext":"py","file_size_in_byte":17491,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"28803276503","text":"\"\"\"\nMisc Utility functions\n\"\"\"\nimport os\nimport logging\nimport datetime\nimport torch\nfrom collections import OrderedDict\n\n\ndef get_logger(logdir):\n logger = logging.getLogger(\"ptsemseg\")\n ts = str(datetime.datetime.now()).split(\".\")[0].replace(\" \", \"_\")\n ts = ts.replace(\":\", \"_\").replace(\"-\", \"_\")\n file_path = os.path.join(logdir, \"run_{}.log\".format(ts))\n hdlr = logging.FileHandler(file_path)\n formatter = logging.Formatter(\"%(asctime)s %(levelname)s %(message)s\")\n hdlr.setFormatter(formatter)\n logger.addHandler(hdlr)\n logger.setLevel(logging.INFO)\n return logger\n\n\ndef print_parameter_number(model, model_name):\n total_params = sum(p.numel() for p in model.parameters())\n trainable_params = sum(p.numel() for p in model.parameters() if p.requires_grad)\n log_str = 'Total number of trainable parameters / all parameters in {}: {} / {}'\\\n .format(model_name, trainable_params, total_params)\n print(log_str)\n\n\ndef load_value_file(file_path):\n with open(file_path, 'r') as input_file:\n value = float(input_file.read().rstrip('\\n\\r'))\n\n return value\n\n\ndef load_model(model, snapshot):\n new_state_dict = OrderedDict()\n for k, v in snapshot.items():\n head = k[:7]\n name = k[7:] if head == 'module.' else k\n new_state_dict[name] = v\n model.load_state_dict(new_state_dict, strict=False)\n\n\ndef _strip_DataParallel(net):\n if isinstance(net, torch.nn.DataParallel):\n return _strip_DataParallel(net.module)\n return net\n\n\ndef remove_adjust_features(model):\n _strip_DataParallel(model).adjust_features = None\n\n\ndef get_last_features_size(model):\n return _strip_DataParallel(model).fc.in_features\n\n\ndef get_model_state(model):\n return _strip_DataParallel(model).state_dict()\n\n\nclass AverageMeter(object):\n \"\"\"Computes and stores the average and current value\"\"\"\n def __init__(self):\n self.reset()\n\n def reset(self):\n self.val = 0\n self.avg = 0\n self.sum = 0\n self.count = 0\n\n def update(self, val, n=1):\n self.val = val\n self.sum += val * n\n self.count += n\n if self.count > 0:\n self.avg = self.sum / self.count","repo_name":"deepmd/Distil-2D-3D","sub_path":"libs/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2200,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"2517481508","text":"import threading\nimport requests\nimport sys\nimport os\nimport time\n\nipturl = str()\nWord = str()\nWordL = list()\nerrorC = list()\nSCD = dict()\n\nthreads = []\n\ndef urlGenerator(inputS, word):\n return \"https://\" + inputS + \"/\" + word\n\ndef Broute_Force(url):\n global SCD\n try:\n response = requests.get(url)\n SCD[url] = int(response.status_code)\n except:\n print(\"URL error for : \" + url)\n SCD[url] = 404\n\ndef inputP():\n try:\n global ipturl\n global WordF\n global errorC\n global WordL\n\n ipturl = sys.argv[1]\n WordF = sys.argv[2]\n\n for x in range(3, len(sys.argv)):\n errorC.append(int(sys.argv[x]))\n\n if not(os.path.isfile(WordF)):\n raise Exception(\"The input of word file does not exist or accessing from wrong location(path)\")\n\n WordL = open(WordF, 'r').readlines()\n\n for x in range(len(WordL)):\n WordL[x] = WordL[x].strip()\n except Exception as e:\n print(e)\n sys.exit()\n\n\ndef actionHandler():\n global WordL\n global ipturl\n urls = []\n global SCD\n global threads\n\n for word in WordL:\n urls.append(urlGenerator(ipturl, word))\n\n for url in urls:\n threads.append(threading.Thread(\n target=Broute_Force, args=(url,)))\n\n for x in range(len(threads)):\n threads[x].start()\n\n\ndef outputGeneration():\n global SCD\n global errorC\n global threads\n\n for x in range(len(threads)):\n threads[x].join()\n\n file = open('ans.txt', 'w')\n\n print(\"Printing and writing require one in ans.txt file !\")\n\n for key, value in SCD.items():\n print(key + \" [Status code \" + str(value) + \"]\" +\"\\n\")\n if value in errorC:\n to_write = key + \" [Status code \" + str(value) + \"]\" + \"\\n\"\n file.write(to_write)\n\n file.close()\n\n\nif __name__ == \"__main__\":\n start_time = time.time()\n inputP()\n actionHandler()\n outputGeneration()\n print(\"%s sec\" % (time.time() - start_time))\n","repo_name":"pawanptu/CloudSEK_Backend_Assignment_Solution","sub_path":"CloudSEK.py","file_name":"CloudSEK.py","file_ext":"py","file_size_in_byte":2024,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"13412637943","text":"import unittest\nfrom where_is_the_nearest_bikeshare_bike.where_is_the_nearest_bikeshare_bike import angle_between_gps\n\n\nclass AngleBetweenGpsTests(unittest.TestCase):\n\n def test_north_angle(self):\n # Use same GPS except increase latitude\n\n from_gps = {\n \"longitude\": -121.49305556,\n \"latitude\": 38.58638889\n }\n\n to_gps = {\n \"longitude\": -121.49305556,\n \"latitude\": 48.58638889\n }\n\n expected = 0\n\n actual = angle_between_gps(from_gps, to_gps)\n\n self.assertEqual(expected, actual, 'test_north_angle')\n\n def test_east_angle(self):\n # Use same GPS except increase longitude\n\n from_gps = {\n \"longitude\": -121.49305556,\n \"latitude\": 48.58638889\n }\n\n to_gps = {\n \"longitude\": -111.49305556,\n \"latitude\": 48.58638889\n }\n\n expected = 90\n\n actual = angle_between_gps(from_gps, to_gps)\n\n self.assertEqual(expected, actual, 'test_east_angle')\n\n def test_south_angle(self):\n # Use same GPS except decrease latitude\n\n from_gps = {\n \"longitude\": -121.49305556,\n \"latitude\": 48.58638889\n }\n\n to_gps = {\n \"longitude\": -121.49305556,\n \"latitude\": 38.58638889\n }\n\n expected = 180\n\n actual = angle_between_gps(from_gps, to_gps)\n\n self.assertEqual(expected, actual, 'test_south_angle')\n\n def test_west_angle(self):\n # Use same GPS except decrease longitude\n\n from_gps = {\n \"longitude\": -121.49305556,\n \"latitude\": 28.58638889\n }\n\n to_gps = {\n \"longitude\": -151.49305556,\n \"latitude\": 28.58638889\n }\n\n expected = 270\n\n actual = angle_between_gps(from_gps, to_gps)\n\n self.assertEqual(expected, actual, 'test_west_angle')\n\n def test_angle_between_citypark_and_nearest(self):\n\n city_park_gps = {\n \"longitude\": -121.49305556,\n \"latitude\": 38.58638889\n }\n\n nearest_hub_gps = {\n \"longitude\": -121.4920789003372,\n \"latitude\": 38.586078791838744\n }\n\n # https://www.calculator.net/slope-calculator.html?type=1&x11=-121.49305556&y11=38.58638889&x12=-121.4920789003372&y12=38.586078791838744&x=36&y=14\n # Compass uses north (y axis) as 0.\n expected = 108\n\n actual = angle_between_gps(city_park_gps, nearest_hub_gps)\n\n self.assertEqual(expected, actual, 'test_angle_between_citypark_and_nearest hub')\n\n\n def test_north_east(self):\n\n city_park_gps = {\n \"longitude\": -121.49305556,\n \"latitude\": 38.58638889\n }\n\n chipotle_in_arden = {\n \"longitude\": -121.417894,\n \"latitude\": 38.59719\n }\n\n # https://www.wolframalpha.com/input/?i=graph+(-121.49305556,+38.58638889)+(-121.417894,+38.59719)\n\n expected = 82\n\n actual = angle_between_gps(city_park_gps, chipotle_in_arden)\n\n self.assertEqual(expected, actual, 'test_north_east city park to arden')\n\n def test_north_west(self):\n\n city_park_gps = {\n \"longitude\": -121.49305556,\n \"latitude\": 38.58638889\n }\n\n jimboom_street_bridge = {\n \"longitude\": -121.5070186,\n \"latitude\": 38.5997365\n }\n\n # https://www.wolframalpha.com/input/?i=graph+(-121.49305556,+38.58638889)+(-121.5070186,+38.5997365)\n\n # https://www.calculator.net/slope-calculator.html?type=1&x11=-121.49305556&y11=38.58638889&x12=-121.5070186&y12=38.5997365&x=58&y=21\n # 136\n # Compass uses north (y axis) as 0.\n expected = 314\n\n actual = angle_between_gps(city_park_gps, jimboom_street_bridge)\n\n self.assertEqual(expected, actual, 'test_north_west city park to jimboom street bridge')\n\n def test_south_west(self):\n\n city_park_gps = {\n \"longitude\": -121.49305556,\n \"latitude\": 38.58638889\n }\n\n amtrak_station = {\n \"longitude\": -121.5016916,\n \"latitude\": 38.5840008,\n }\n\n # https://www.wolframalpha.com/input/?i=graph+(-121.49305556,+38.58638889)+(-121.5016916,+38.5840008)\n\n # https://www.calculator.net/slope-calculator.html?type=1&x11=-121.49305556&y11=38.58638889&x12=-121.5070186&y12=38.5997365&x=58&y=21\n # 195\n # Compass uses north (y axis) as 0.\n expected = 255\n\n actual = angle_between_gps(city_park_gps, amtrak_station)\n\n self.assertEqual(expected, actual, 'test_south_west city park to amtrak')\n\n def test_south_east(self):\n\n city_park_gps = {\n \"longitude\": -121.49305556,\n \"latitude\": 38.58638889\n }\n\n sacramento_bicycle_kitchen = {\n \"longitude\": -121.4821897,\n \"latitude\": 38.5783092\n }\n\n # https://www.wolframalpha.com/input/?i=graph+(-121.49305556,+38.58638889)+(-121.4821897,+38.5783092)\n\n expected = 127\n\n actual = angle_between_gps(city_park_gps, sacramento_bicycle_kitchen)\n\n self.assertEqual(expected, actual, 'test_south_east citypark to bike kitchen')\n\n","repo_name":"BrianHenryIE/Bikeshare-Siri-Shortcuts","sub_path":"test/test_angle_between_gps.py","file_name":"test_angle_between_gps.py","file_ext":"py","file_size_in_byte":5216,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"28108699782","text":"import requests\n\ndef get_url(url: str):\n \"\"\"\n Function that will call a provide GET API endpoint url and return its status code and either its content or error message as a string\n\n Parameters\n ----------\n url : str\n URL of the GET API endpoint to be called\n\n Returns\n -------\n int\n API call response status code\n str\n Text from API call response\n \"\"\"\n\n try:\n\n get_response = requests.get(url=url)\n\n status_code = get_response.status_code\n\n if status_code == 200:\n response = get_response.json()\n else:\n response = f\"Error: Status Code {status_code}\"\n\n return status_code, response\n \n except Exception as e:\n return 0, str(e)\n\n\n \n\n\n","repo_name":"neeschal00/frankfurter-currency-converter","sub_path":"api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":759,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"38676754721","text":"import pandas\nimport random\n\nstu_id = [i+j+k for k in [18000000000, 19000000000, 20000000000] for j in [307110000, 307130000, 300180000] for i in range(500)]\nRoads = [\"Avenue\", \"Street\", \"Road\", \"Lane\"]\nCities = [\"Tokyo\", \"Delhi\", \"Manila\", \"Sao Paulo\", \"Guangzhou\", \"Shanghai\", \"Beijing\", \"Los Angeles\", \"Bangkok\",\n \"Seoul\", \"Buenos Aires\", \"Paris\", \"London\", \"Madrid\", \"Hong Kong\"]\nnames = pandas.read_csv(r\"..\\playground\\names.csv\")\nnames = list(names['names'])\nuser = []\nfor stu in stu_id:\n surname = random.choice(names)\n lastname = random.choice(names)\n user.append(\n [surname + ' ' + lastname,\n random.choices([\"Male\", \"Female\", \"None\"], [10, 10, 1], k=1)[0],\n random.choice(['189', '186', '137', '191', '158']) + str(random.randint(10000000, 100000000)),\n str(stu) + '@fudan.edu.cn',\n str(random.randint(1, 999)) + ' ' + random.choice(names)[:6] + ' ' + random.choice(Roads) + ', ' + random.choice(\n Cities),\n random.choice([0, 1])])\nuser = pandas.DataFrame(user, columns=['name', 'sex', 'phone', 'email', 'address', 'vip'])\nprint(user)\nuser.to_csv(r'..\\data\\users.csv')\n","repo_name":"super-dainiu/MyBookDB","sub_path":"playground/random_user.py","file_name":"random_user.py","file_ext":"py","file_size_in_byte":1156,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"77"} +{"seq_id":"31603101243","text":"from torch import nn\nfrom models.backbone import *\n\n\n#Create the twin network with the backbone model and the projection head\nclass MinSpeak(nn.Module):\n\n\tdef __init__(self, args):\n\t\tself.args = args\n\t\tself.backbone = ResidualBLSTM(Resblock, [2])\n\t\tself.end_dim = self.backbone.flatten.end_dim\n\t\tself.dim_out = 567\n\t\t#projection head\n\t\tself.proj_head = nn.Sequential(\n\t\t\tnn.Linear(self.end_dim , self.end_dim , bias=False),\n\t\t\tnn.BatchNorm1d(self.end_dim ),\n\t\t\tnn.ReLU(inplace=True),\n\t\t\tnn.Linear(self.end_dim , self.end_dim , bias=False),\n\t\t\tnn.BatchNorm1d(self.end_dim ),\n\t\t\tnn.ReLU(inplace=True),\n\t\t\tnn.BatchNorm1d(self.dim_out, affine=False)\n\t\t)\n\t\tself.predictor = nn.Sequential(\n\t\t\tnn.Linear(self.dim_out, self.end_dim, bias=False),\n\t\t\tnn.BatchNorm1d(self.end_dim),\n\t\t\tnn.ReLU(inplace=True), # hidden layer\n\t\t\tnn.Linear(self.end_dim, self.dim_out)\n\t\t)\n\n\tdef forward(self, x1, x2):\n\t\t#Input of positive speaker samples x1 and x2\n\n\t\to1 = self.proj_head(self.backbone(x1))\n\t\to2 = self.proj_head(self.backbone(x2))\n\n\t\tp1 = self.predictor(o1)\n\t\tp2 = self.predictor(o2)\n\n\t\treturn p1, p2, o1.detach(), o2.detach()","repo_name":"aitor-alvarez/self-supervised-speaker","sub_path":"models/MinSpeak.py","file_name":"MinSpeak.py","file_ext":"py","file_size_in_byte":1112,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"44524674951","text":"#Tan Hyong Hsing\n#20DDT21F1002\n\ncar_price = 90000\nminimum_dp = 0.10 * car_price\nfixed_interest_rate = 0.027\n\n\ncustomer_dp = int(input('Please enter your downpayment: '))\n\nif customer_dp < minimum_dp :\n print('You are not eligible for the bank loan.')\nelse :\n loan_amount = car_price - customer_dp\n loan = int(input('How long you want to make a loan in years(1 to 9 years only): '))\n total_interest = fixed_interest_rate * loan_amount * loan\n loan_period_in_month = loan * 12\n monthly_installment = (loan_amount + total_interest) / loan_period_in_month\n two_decimal = round(monthly_installment ,2)\n print('You need to pay RM ' + str(two_decimal) + ' monthly as your monthly payment.')","repo_name":"TANHYONGHSING/legend","sub_path":"PYTHONPROJECT/lab_exercise1(3).py","file_name":"lab_exercise1(3).py","file_ext":"py","file_size_in_byte":708,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"5411491111","text":"\n\ndef screen():\n\tprint(\"\\n\\t\")\n\tprint(\" =================================================\")\n\tprint(\"\\n\\t\\tFUNCTION\\n\")\n\tprint(\" *** CAPITALIZE all the COMMANDSs\")\n\tprint(\" **** Enter all DATA in LOWERCASE\")\n\tprint(\" **** Do not use space bar\")\n\tprint(\" -------------------------------------------------\")\n\tprint(\" + (data)\t\tinsert\")\n\tprint(\" - (data)\t\tdelete\"); \n\tprint(\" = (data)\t\treplace data\") \n\tprint(\" E\t\t\tbe empty\") \n\tprint(\" @\t\t\tprint present position\")\n\tprint(\" A\t\t\tlength of array\") #create\n\tprint(\" L\t\t\tprint array\")\n\tprint(\" R\t\t\treverse array\")#create\n\tprint(\" S\t\t\tsort\");#create\n\tprint(\" V\t\t\tsort(reverse)\"); #create\n\tprint(\" <\t\t\tgo to first index\")\n\tprint(\" >\t\t\tgo to last index\")\n\tprint(\" N\t\t\tgo to next index\")\n\tprint(\" P\t\t\tgo to previous index\")\n\tprint(\" M (index)\t\tMove (numberth) index\") \n\tprint(\" MN\t\t\tmove forward one\")\n\tprint(\" MP\t\t\tmove backward one\")\n\tprint(\" Mn\t\t\tmove to end of list \")\n\tprint(\"\\n Q\t\t\tquit\")\n\tprint(\" =================================================\")\n\ndef printarr(array): #L\n\tif (length == 0) :\n\t\tprint(\" This array is empty\")\n\telse :\n\t\tfor i in range(0,length,1):\n\t\t\tif ((i == position or (position == -1 and length != 0))): print('('+array[i]+')',end=' ')\n\t\t\telse: print(array[i],end=' ')\n\t\tprint()\n\ndef insert(array, data):#+\n\tglobal position\n\tposition+=1\n\tlength=len(array)\n\tarray.insert(position,data)\n\ndef delete(array,index):#-\n\tglobal position\n\tdel my_array[position]\n\tif position == length-1 : position=0\n\ndef move(array,index):\n\tglobal position\n\tif position > index:\n\t\tfor i in range(position,index,-1):\n\t\t\ttemp = array[i - 1]\n\t\t\tarray[i - 1] = array[i]\n\t\t\tarray[i] = temp\n\telif position < index:\n\t\tfor i in range(position,index,+1):\n\t\t\ttemp = array[i + 1]\n\t\t\tarray[i + 1] = array[i]\n\t\t\tarray[i] = temp\n\n\n\n#main function\nscreen()\nposition = -1\nlength = 0\nmy_array = []\n\nwhile True:\n\tlength=len(my_array)\n\tprintarr(my_array) \n\ttype=[]\n\ttype=input(\" >>> \")\n\tif type[0] == 'Q':\n\t\tprint(\"- The end -\")\n\t\tbreak\n\tfor i in range(len(type)):\n\t\tif type[i]=='E':\n\t\t\tmy_array.clear()\n\t\t\tposition=-1\n\t\t\tbreak\n\t\tif type[i] == 'S':\n\t\t\tmy_array.sort()\n\t\t\tposition=length-1\n\t\t\tbreak\n\t\tif type[i] == 'V':\n\t\t\tmy_array.sort(reverse=True)\n\t\t\tposition=length-1\n\t\t\tbreak\n\t\tif type[i] == 'R':\n\t\t\tmy_array.reverse()\n\t\t\tposition=length-1\n\t\t\tbreak\n\t\tif type[i] == '=':\n\t\t\tmy_array[position]=type[i+1]\n\t\t\ti+=1\n\t\tif type[i] == 'L':\n\t\t\tif (length == 0) :\n\t\t\t\tprint(\" This array is empty\")\n\t\t\telse :\n\t\t\t\tprint(\" my_array : \", my_array)\n\t\t\tbreak\n\t\tif type[i] == '@':\n\t\t\tprint(\" my_array[\",position,\"] = \",my_array[position])\n\t\tif type[i] == 'A':\n\t\t\tprint(\" length :\",length)\n\t\t\tbreak\n\t\tif type[i]=='+':\n\t\t\tif position>30: break\n\t\t\tinsert(my_array,type[i+1])\n\t\t\ti+=1\n\t\tif type[i]=='-':\n\t\t\tif length==0: break\n\t\t\tdelete(my_array,position)\n\t\tif type[i]=='<':\n\t\t\tposition=0\n\t\tif type[i]=='>':\n\t\t\tposition=length-1\n\t\tif type[i]=='N':\n\t\t\tif position>=30: break\n\t\t\tposition+=1\n\t\tif type[i]=='P':\n\t\t\tif position<0 : break\n\t\t\tposition-=1\n\t\tif type[i]=='M':\n\t\t\tif type[i+1]=='N':\n\t\t\t\tmove(my_array, position + 1)\n\t\t\telif type[i + 1] == 'P':\n\t\t\t\tmove(my_array, position - 1)\n\t\t\telif type[i + 1] == 'n':\n\t\t\t\tmove(my_array, length - 1)\n\t\t\t\tposition=length-1\n\t\t\telse :\n\t\t\t\tmove(my_array, int(type[i+1]))\n\t\t\t\tposition=int(type[i+1])\n\t\t\ti+=1\n","repo_name":"nimod7890/Data_Structure","sub_path":"Array/array_adt.py","file_name":"array_adt.py","file_ext":"py","file_size_in_byte":3249,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"12635096128","text":"\nimport openpyxl\nimport psycopg2\nimport pandas as pd\nimport os\nimport os.path\nfrom datetime import date, timedelta\nimport datetime\nfrom twilio.rest import Client\nimport sys\n\n\nid = 'id#'\ntoken = 'token#'\ntwilio = 'twilio_number'\ntwilioClient = Client(id, token)\njy = 'my_number'\n\n\n# sql connection\nhostname = 'hostname_of_database'\nusername = 'username'\npassword = 'password'\ndatabase = 'database'\nport = 1234\n\n\n# open sql connection and create cursor\nmyConnection = psycopg2.connect(host=hostname, user=username, password=password, dbname=database, port=port)\ncur = myConnection.cursor()\n\n\ntoday = date.today()\nnow = datetime.datetime.now()\n\n\n# OneDrive Folder Location\nloc = \"folder_location\"\n\n\n# running 'cohort_builder' query and importing it into pandas, turning account_number into integer to join\n\nquery = \"select * from tablename where condition\"\n# Importing the resulting SQL query into Pandas using read_sql\ninfo = pd.read_sql(query, con=myConnection)\ninfo['customer_id'] = info['customer_id'].astype(int)\n\n# OneDrive Folder\n# Getting the list of files in the folder that end with .csv and does not have '-info' in name since this means the file has been worked on already\nfilelist = [x.name for x in list(os.scandir(loc)) if x.is_file() and '.csv' in x.name and '-info' not in x.name]\n\nglobal kerror\nglobal kperror\nglobal kverror\nkerror = 0\nkperror = 0\nkverror = 0\n\n# Using PANDAS to clean up and organize data\nfor file in filelist:\n try:\n name = file.split('.')[0]\n\n # reading the csv\n df = pd.read_csv(str(loc)+str(file))\n\n # drop all columns after first one and rename it 'accounts'\n df.drop(df.columns[1:], axis=1, inplace=True)\n df.columns = ['customer_id']\n\n # for each row in accounts, split the account_numbers by a comma, then strip the blank spaces and add it to a new list\n newlists = []\n\n df1 = df.iloc[:, :1]\n df1.drop_duplicates(subset='customer_id', inplace=True)\n df2 = df1['customer_id'].astype(str).tolist()\n\n for row in df2:\n if ',' in row:\n for each in row.split(','):\n newlists.append(each.strip())\n else:\n newlists.append(row)\n\n # stack the list into a column, pick unique accounts_numbers, turn it into a dataframe, drop the newly created index\n new_df = pd.DataFrame(pd.DataFrame(newlists).stack().unique()).reset_index(drop=True)\n\n # rename the column as 'accounts' and turn the column account_numbers into integers to join to info\n # and creating a dataframe with unique accounts\n\n new_df.columns = ['customer_id']\n\n new_df['customer_id'] = new_df['customer_id'].astype(str)\n\n new_df.replace(r'nan', '0', regex=True, inplace=True)\n\n new_df['customer_id'] = new_df['customer_id'].apply(int)\n\n new_dfs = new_df[['customer_id']].copy()\n\n new_dfs.drop_duplicates(subset='customer_id', inplace=True)\n\n # merge (left-join) the unique account_number dataframe with the info dataframe from the cohort builder then sort\n df3 = pd.merge(new_dfs, info, left_on='customer_id', right_on='customer_id', how='left')\n df4 = df3[['customer_id', 'email', 'first_name', 'last_name', 'address1', 'address2', 'city', 'state', 'zip', 'phone']]\n\n # getting dataframes with null and permissions (email permission & unsubscribe)\n dfnull = df4[df4['email'].isnull()]\n dfnoperm = df4[(df4['email'] == 'EMAIL - UNSUBSCRIBED') | (df4['email'] == 'EMAIL - NO PERMISSION')]\n dfreg = df4[(df4['email'].notnull()) & (df4['email'] != 'EMAIL - UNSUBSCRIBED') & (df4['email'] != 'EMAIL - NO PERMISSION')]\n\n # modifying regular email dataframe to rank it by having address_number then the accounts\n dfreg['addressnumber'] = dfreg['address1'].str.extract(r'(\\d+)')\n dfreg['addressnumber'].fillna(99999, inplace=True)\n dfreg['addressnumber'] = dfreg['addressnumber'].astype(int)\n\n dfreg['rank'] = dfreg.groupby('email')['addressnumber'].rank(ascending=True, method='dense')\n dfreg['rank1'] = dfreg.groupby('email')['dp_customer_id'].rank(ascending=False, method='dense')\n\n dfregfiltered = dfreg[(dfreg['rank1'] == 1) & (dfreg['rank'] == 1)]\n dfregfiltered.drop(['rank', 'rank1', 'addressnumber'], axis=1, inplace=True)\n dfregfiltered = dfregfiltered.sort_values(by='email')\n\n # combining all the separate dataframes into one, replacing nulls and blank spaces with 'N/A'\n dfall = pd.concat([dfregfiltered, dfnoperm, dfnull])\n dfall.fillna('N / A', inplace=True)\n dfall.replace(r'^\\s*$', 'N / A', regex=True, inplace=True)\n\n # sending it to excel and removing the old file\n dfall.to_excel(loc+name+'-info.xlsx', index=False)\n os.remove(loc+file)\n\n except PermissionError:\n # Alert when the file can't be removed because someone has the file opened\n kperror += 1\n\n except ValueError:\n # Alert when the file has non-numeric value for account_numbers\n kverror += 1\n\n except:\n # Alert when info append is not working and there are csv files in the folder by sending a text message\n kerror += 1\n\n\n# ERROR TEXT MESSAGES\n# Split up the errors into 2. First is when File is Opened and unable to be removed: Permission Error (permerror).\n# Second is some other error in the system. Need to investigate further.\n\n# Case 1: When Cohort Builder is working but unable to remove because the file is opened by someone or account number column has non-numeric entries\nif kerror == 0 and (kverror > 0 or kperror > 0):\n contents = (\"The Cohort is working: \" + str(kperror) + \" Opened / Unable to Remove csv file(s) and \" +\n str(kverror) + \" file(s) containing non-numeric account_id number. \" + now.strftime(\"%Y-%m-%d %H:%M\") + \".\")\n\n message = twilioClient.messages.create(body=contents, from_=twilio, to=jy)\n\n# Case 2 : When Cohort Builder is not working for some reason & not due to non-numeric account number or file being opened. Need to investigate\nelif kerror > 0:\n contents = \"The Cohort is not working right now. There are \" + str(file) + \" csv file(s) in the folder. Need to investigate. \" + \\\n now.strftime(\"%Y-%m-%d %H:%M\") + \".\"\n message = twilioClient.messages.create(body=contents, from_=twilio, to=jy)\n\n# commit sql changes and close cursor,connection\ncur.close()\nmyConnection.commit()\nmyConnection.close()\n","repo_name":"scratchpapers/data","sub_path":"Cohort_Builder/Cohort_Builder.py","file_name":"Cohort_Builder.py","file_ext":"py","file_size_in_byte":6476,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"15195529589","text":"import unittest\n\nfrom gridworld import GridworldMdp, Direction\nfrom mdp_interface import Mdp\nfrom utils import set_seeds\nimport random\n\nclass TestDirection(unittest.TestCase):\n def test_direction_number_conversion(self):\n all_directions = Direction.ALL_DIRECTIONS\n all_numbers = []\n\n for direction in Direction.ALL_DIRECTIONS:\n number = Direction.get_number_from_direction(direction)\n direction_again = Direction.get_direction_from_number(number)\n self.assertEqual(direction, direction_again)\n all_numbers.append(number)\n\n # Check that all directions are distinct\n num_directions = len(all_directions)\n self.assertEqual(len(set(all_directions)), num_directions)\n # Check that the numbers are 0, 1, ... num_directions - 1\n self.assertEqual(set(all_numbers), set(range(num_directions)))\n\nclass TestGridworld(unittest.TestCase):\n def setUp(self):\n self.grid1 = [['X', 'X', 'X', 'X', 'X'],\n ['X', ' ', ' ', 'A', 'X'],\n ['X', '3', 'X', ' ', 'X'],\n ['X', ' ', ' ', '1', 'X'],\n ['X', 'X', 'X', 'X', 'X']]\n self.grid2 = ['XXXXXXXXX',\n 'X9X X AX',\n 'X X X X',\n 'X X',\n 'XXXXXXXXX']\n self.grid3 = [['X', 'X', 'X', 'X', 'X'],\n ['X', 3.5, 'X', -10, 'X'],\n ['X', ' ', '1', ' ', 'X'],\n ['X', ' ', ' ', 'A', 'X'],\n ['X', 'X', 'X', 'X', 'X']]\n\n self.mdp1 = GridworldMdp(self.grid1, living_reward=0)\n self.mdp2 = GridworldMdp(self.grid2, noise=0.2)\n self.mdp3 = GridworldMdp(self.grid3)\n\n def test_str(self):\n expected = '\\n'.join([''.join(row) for row in self.grid1])\n self.assertEqual(str(self.mdp1), expected)\n expected = '\\n'.join(self.grid2)\n self.assertEqual(str(self.mdp2), expected)\n expected = '\\n'.join(['XXXXX',\n 'XRXNX',\n 'X 1 X',\n 'X AX',\n 'XXXXX'])\n self.assertEqual(str(self.mdp3), expected)\n\n def test_constructor_invalid_inputs(self):\n # Height and width must be at least 2.\n with self.assertRaises(AssertionError):\n mdp = GridworldMdp(['X', 'X', 'X'])\n with self.assertRaises(AssertionError):\n mdp = GridworldMdp([['X', 'X', 'X']])\n\n with self.assertRaises(AssertionError):\n # Borders must be present.\n mdp = GridworldMdp([' A',\n '3X ',\n ' 1'])\n\n with self.assertRaises(AssertionError):\n # There can't be more than one agent.\n mdp = GridworldMdp(['XXXXX',\n 'XA 3X',\n 'X3 AX',\n 'XXXXX'])\n\n with self.assertRaises(AssertionError):\n # There must be one agent.\n mdp = GridworldMdp(['XXXXX',\n 'X 3X',\n 'X3 X',\n 'XXXXX'])\n\n with self.assertRaises(AssertionError):\n # There must be at least one reward.\n mdp = GridworldMdp(['XXXXX',\n 'XAX X',\n 'X X',\n 'XXXXX'])\n\n with self.assertRaises(AssertionError):\n # B is not a valid element.\n mdp = GridworldMdp(['XXXXX',\n 'XB X',\n 'X 3X',\n 'XXXXX'])\n\n def test_start_state(self):\n self.assertEqual(self.mdp1.get_start_state(), (3, 1))\n self.assertEqual(self.mdp2.get_start_state(), (7, 1))\n self.assertEqual(self.mdp3.get_start_state(), (3, 3))\n\n def test_reward_parsing(self):\n self.assertEqual(self.mdp1.rewards, {\n (1, 2): 3,\n (3, 3): 1\n })\n self.assertEqual(self.mdp2.rewards, {\n (1, 1): 9\n })\n self.assertEqual(self.mdp3.rewards, {\n (1, 1): 3.5,\n (2, 2): 1,\n (3, 1): -10\n })\n\n def test_actions(self):\n a = [Direction.NORTH, Direction.SOUTH, Direction.EAST, Direction.WEST, Direction.STAY]\n all_acts = set(a)\n self.assertEqual(set(Direction.ALL_DIRECTIONS), all_acts)\n\n with self.assertRaises(ValueError):\n self.mdp1.get_actions((0, 0))\n\n self.assertEqual(set(self.mdp1.get_actions((1, 1))), all_acts)\n self.assertEqual(set(self.mdp1.get_actions((1, 2))), all_acts)\n self.assertEqual(set(self.mdp2.get_actions((6, 2))), all_acts)\n self.assertEqual(set(self.mdp2.get_actions((3, 1))), all_acts)\n self.assertEqual(set(self.mdp3.get_actions((2, 2))), all_acts)\n\n def test_rewards(self):\n grid1_reward_table = {\n (3, 3): 1,\n (1, 2): 3\n }\n grid2_reward_table = {\n (1, 1): 9\n }\n grid3_reward_table = {\n (1, 1): 3.5,\n (2, 2): 1,\n (3, 1): -10\n }\n self.check_all_rewards(self.mdp1, grid1_reward_table, 0)\n self.check_all_rewards(self.mdp2, grid2_reward_table, -0.01)\n self.check_all_rewards(self.mdp3, grid3_reward_table, -0.01)\n\n def check_all_rewards(self, mdp, reward_lookup_table, living_reward):\n for state in mdp.get_states():\n for action in mdp.get_actions(state):\n expected = 0\n if state in reward_lookup_table:\n expected += reward_lookup_table[state]\n if action != Direction.STAY:\n expected += living_reward\n self.assertEqual(mdp.get_reward(state, action), expected)\n\n def test_transitions(self):\n n, s = Direction.NORTH, Direction.SOUTH\n e, w = Direction.EAST, Direction.WEST\n stay_action = Direction.STAY\n\n # Grid 1: No noise\n with self.assertRaises(ValueError):\n self.mdp1.get_transition_states_and_probs((0, 0), stay_action)\n\n result = self.mdp1.get_transition_states_and_probs((1, 3), n)\n self.assertEqual(set(result), set([((1, 2), 1)]))\n result = self.mdp1.get_transition_states_and_probs((1, 2), stay_action)\n self.assertEqual(set(result), set([((1, 2), 1)]))\n result = self.mdp1.get_transition_states_and_probs((1, 1), n)\n self.assertEqual(set(result), set([((1, 1), 1)]))\n\n # Grid 2: Noise of 0.2\n result = set(self.mdp2.get_transition_states_and_probs((1, 2), n))\n self.assertEqual(result, set([\n ((1, 1), 0.8),\n ((1, 2), 0.2)\n ]))\n result = set(self.mdp2.get_transition_states_and_probs((6, 2), w))\n self.assertEqual(result, set([\n ((5, 2), 0.8),\n ((6, 1), 0.1),\n ((6, 3), 0.1)\n ]))\n result = set(self.mdp2.get_transition_states_and_probs((7, 3), e))\n self.assertEqual(result, set([\n ((7, 3), 0.9),\n ((7, 2), 0.1)\n ]))\n result = set(self.mdp2.get_transition_states_and_probs((5, 1), s))\n self.assertEqual(result, set([\n ((5, 2), 0.8),\n ((5, 1), 0.1),\n ((6, 1), 0.1)\n ]))\n result = self.mdp2.get_transition_states_and_probs((3, 1), n)\n self.assertEqual(set(result), set([((3, 1), 1)]))\n result = self.mdp2.get_transition_states_and_probs((1, 1), stay_action)\n self.assertEqual(set(result), set([((1, 1), 1)]))\n\n def test_states_reachable(self):\n def check_grid(grid):\n self.assertEqual(set(grid.get_states()), self.dfs(grid))\n\n for grid in [self.mdp1, self.mdp2, self.mdp3]:\n check_grid(grid)\n\n def dfs(self, grid):\n visited = set()\n def helper(state):\n if state in visited:\n return\n visited.add(state)\n for action in grid.get_actions(state):\n for next_state, _ in grid.get_transition_states_and_probs(state, action):\n helper(next_state)\n\n helper(grid.get_start_state())\n return visited\n\n def test_environment(self):\n env = Mdp(self.mdp3)\n self.assertEqual(env.get_current_state(), (3, 3))\n next_state, reward = env.perform_action(Direction.NORTH)\n self.assertEqual(next_state, (3, 2))\n self.assertEqual(reward, -0.01)\n self.assertEqual(env.get_current_state(), next_state)\n self.assertFalse(env.is_done())\n env.reset()\n self.assertEqual(env.get_current_state(), (3, 3))\n self.assertFalse(env.is_done())\n next_state, reward = env.perform_action(Direction.WEST)\n self.assertEqual(next_state, (2, 3))\n self.assertEqual(reward, -0.01)\n self.assertEqual(env.get_current_state(), next_state)\n self.assertFalse(env.is_done())\n next_state, reward = env.perform_action(Direction.NORTH)\n self.assertEqual(next_state, (2, 2))\n self.assertEqual(reward, -0.01)\n self.assertEqual(env.get_current_state(), next_state)\n self.assertFalse(env.is_done())\n next_state, reward = env.perform_action(Direction.STAY)\n self.assertEqual(next_state, (2, 2))\n self.assertEqual(reward, 1)\n self.assertEqual(env.get_current_state(), next_state)\n self.assertFalse(env.is_done())\n env.reset()\n self.assertFalse(env.is_done())\n self.assertEqual(env.get_current_state(), (3, 3))\n\n def test_numpy_conversion(self):\n def check_mdp(mdp):\n new_mdp = GridworldMdp.from_numpy_input(*mdp.convert_to_numpy_input())\n self.assertEqual(new_mdp.height, mdp.height)\n self.assertEqual(new_mdp.width, mdp.width)\n self.assertEqual(new_mdp.walls, mdp.walls)\n self.assertEqual(new_mdp.rewards, mdp.rewards)\n self.assertEqual(new_mdp.start_state, mdp.start_state)\n\n check_mdp(self.mdp1)\n check_mdp(self.mdp2)\n check_mdp(self.mdp3)\n\n def test_random_gridworld_generation(self):\n set_seeds(314159)\n mdp = GridworldMdp.generate_random(8, 8, 0, 0)\n self.assertEqual(mdp.height, 8)\n self.assertEqual(mdp.width, 8)\n mdp_string = str(mdp)\n self.assertEqual(mdp_string.count('X'), 28)\n self.assertEqual(mdp_string.count(' '), 34)\n self.assertEqual(mdp_string.count('A'), 1)\n self.assertEqual(mdp_string.count('3'), 1)\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"HumanCompatibleAI/learning_biases","sub_path":"gridworld_test.py","file_name":"gridworld_test.py","file_ext":"py","file_size_in_byte":10758,"program_lang":"python","lang":"en","doc_type":"code","stars":21,"dataset":"github-code","pt":"77"} +{"seq_id":"70098928570","text":"from Components.config import config, ConfigYesNo\nfrom Components.Converter.Converter import Converter\nfrom Components.Element import cached\nfrom enigma import eServiceCenter, eServiceReference, iServiceInformation\nfrom xml.etree.cElementTree import parse\nfrom Poll import Poll\nfrom os import system\nfrom Components.Network import iNetwork\nfrom os import environ\nimport gettext\nfrom Components.Language import language\nfrom Tools.Directories import resolveFilename, SCOPE_PLUGINS, SCOPE_LANGUAGE\nimport threading\nimport re\n\nlang = language.getLanguage()\nenviron['LANGUAGE'] = lang[:2]\ngettext.bindtextdomain('enigma2', resolveFilename(SCOPE_LANGUAGE))\ngettext.textdomain('enigma2')\ngettext.bindtextdomain('nBmExtendedServiceInfo', '%s%s' % ('/usr/lib/enigma2/python/Components/', 'Converter/locale/'))\n\ndef _(txt):\n t = gettext.dgettext('nBmExtendedServiceInfo', txt)\n if t == txt:\n t = gettext.gettext(txt)\n return t\n\nconfig.misc.spazeinfobartp = ConfigYesNo(default=True)\nconfig.misc.spazeinfobarecm = ConfigYesNo(default=True)\nconfig.misc.spazeinfobarnum = ConfigYesNo(default=True)\nconfig.misc.spazeinfobarinet = ConfigYesNo(default=True)\nconfig.misc.spazeinfobarrec = ConfigYesNo(default=True)\n\nclass segundoplano(threading.Thread):\n\n def __init__(self, parametro, six):\n threading.Thread.__init__(self)\n self.parametro = parametro\n self.six = six\n\n def run(self):\n self.parametro.subcargasat(self.six)\n\ndef haytp():\n ret = True\n try:\n ret = config.misc.spazeinfobartp.value\n except:\n pass\n\n return ret\n\ndef hayecm():\n ret = True\n try:\n ret = config.misc.spazeinfobarecm.value\n except:\n pass\n\n return ret\n\ndef haynum():\n ret = True\n try:\n ret = config.misc.spazeinfobarnum.value\n except:\n pass\n\n return ret\n\ndef hayinet():\n ret = True\n try:\n ret = config.misc.spazeinfobarinet.value\n except:\n pass\n\n return ret\n\ndef devStr(cadena, inicio = ':', fin = None):\n try:\n if cadena == None:\n return ''\n if not inicio == None:\n if inicio not in cadena:\n return cadena\n str = cadena.split(inicio)[1]\n else:\n str = cadena\n if not fin == None:\n if fin in cadena:\n str = str.split(fin)[0]\n return str.strip()\n except:\n return ''\n\n return\n\ndef _commafy(s):\n r = []\n for i, c in enumerate(reversed(s)):\n if i and not i % 3:\n r.insert(0, '#')\n r.insert(0, c)\n\n return ''.join(r)\n\ndef FormatWithCommas(value, sepmil = '.', sepdec = ',', ndecimales = 0, cmoneda = ''):\n re_digits_nondigits = re.compile('\\\\d+|\\\\D+')\n format = '%.' + str(ndecimales) + 'f' + cmoneda\n if value == None:\n return ''\n elif str(value) == '':\n return ''\n else:\n cvalue = str(value)\n try:\n fvalue = float(value)\n except:\n return value\n\n try:\n parts = re_digits_nondigits.findall(format % (fvalue,))\n for i in xrange(len(parts)):\n s = parts[i]\n if s.isdigit():\n parts[i] = _commafy(s)\n break\n\n return ''.join(parts).replace('.', sepdec).replace('#', sepmil)\n except:\n return value\n\n return\n\ndef getFEData(frontendDataOrg):\n from Tools.Transponder import ConvertToHumanReadable\n try:\n if frontendDataOrg and len(frontendDataOrg):\n frontendData = ConvertToHumanReadable(frontendDataOrg)\n if frontendDataOrg['tuner_type'] == 'DVB-S':\n return str(FormatWithCommas(int(frontendData['frequency']) / 1000)) + ' MHz ' + str(FormatWithCommas(int(frontendData['symbol_rate']) / 1000)) + ' ' + str(frontendData['polarization'][0:1]) + ' ' + str(frontendData['fec_inner'])\n if frontendDataOrg['tuner_type'] == 'DVB-C':\n return str(frontendData['frequency']) + ' ' + str(frontendData['symbol_rate'])\n if frontendDataOrg['tuner_type'] == 'DVB-T':\n return str(FormatWithCommas(ajustafr(int(frontendData['frequency'])))) + ' Khz (UHF ' + str(devchfr(frontendData['frequency'])) + ') ' + str(frontendData['bandwidth'])\n return ' '\n except:\n return ' '\n\ndef ajustafr(frecu):\n return int(round(float(frecu) / 1000000, 0)) * 1000\n\ndef devchfr(frecu):\n ret = 'NA'\n arrfecs = [(21, 474),\n (22, 482),\n (23, 490),\n (24, 498),\n (25, 506),\n (26, 514),\n (27, 522),\n (28, 530),\n (29, 538),\n (30, 546),\n (31, 554),\n (32, 562),\n (33, 570),\n (34, 578),\n (35, 586),\n (36, 594),\n (37, 602),\n (38, 610),\n (39, 618),\n (40, 626),\n (41, 634),\n (42, 642),\n (43, 650),\n (44, 658),\n (45, 666),\n (46, 674),\n (47, 682),\n (48, 690),\n (49, 698),\n (50, 706),\n (51, 714),\n (52, 722),\n (53, 730),\n (54, 738),\n (55, 746),\n (56, 754),\n (57, 762),\n (58, 770),\n (59, 778),\n (60, 786),\n (61, 794),\n (62, 802),\n (63, 810),\n (64, 818),\n (65, 826),\n (66, 834),\n (67, 842),\n (68, 850),\n (69, 858)]\n nfrecu = ajustafr(frecu) / 1000\n for ele in arrfecs:\n if ele[1] == nfrecu:\n ret = ele[0]\n return ret\n\n return ret\n\nclass nBmExtendedServiceInfo(Poll, Converter, object):\n SERVICENAME = 0\n SERVICENUMBER = 1\n ORBITALPOSITION = 2\n SATNAME = 3\n PROVIDER = 4\n FROMCONFIG = 5\n ALL = 6\n ECMINFO = 7\n CAMNAME = 8\n INETCONECTION = 9\n NETCONECTION = 10\n\n def __init__(self, type):\n Converter.__init__(self, type)\n Poll.__init__(self)\n self.poll_interval = 10000\n self.poll_enabled = True\n self.tv_list = []\n self.seg_plano = None\n self.radio_list = []\n self.satNames = {}\n self.systemCaids = {'06': 'irdeto',\n '01': 'seca',\n '18': 'nagra',\n '05': 'via',\n '0B': 'conax',\n '17': 'betacrypt',\n '0D': 'crypto',\n '4A': 'dreamcrypt',\n '09': 'nds'}\n if type == 'ServiceName':\n self.type = self.SERVICENAME\n elif type == 'Number':\n self.type = self.SERVICENUMBER\n elif type == 'TunerInfo':\n self.type = self.ORBITALPOSITION\n elif type == 'SatName':\n self.type = self.SATNAME\n elif type == 'Provider':\n self.type = self.PROVIDER\n elif type == 'Config':\n self.type = self.FROMCONFIG\n elif type == 'InetConection':\n self.poll_interval = 5000\n self.type = self.INETCONECTION\n elif type == 'NetConection':\n self.poll_interval = 5000\n self.type = self.NETCONECTION\n elif type == 'EcmInfo':\n self.poll_interval = 3000\n self.type = 7\n elif type == 'CamName':\n self.type = 8\n else:\n self.type = self.ALL\n return\n\n @cached\n def getBoolean(self):\n ret = False\n service = self.source.service\n info = service and service.info()\n if not info:\n return False\n if self.type == self.INETCONECTION:\n if not hayinet():\n ret = False\n else:\n try:\n f = open('/tmp/testinet.txt', 'r')\n texto = f.read().replace('\\n', '')\n f.close()\n if '1 packets transmitted, 1 packets received' in texto:\n ret = True\n except:\n pass\n\n try:\n system('ping -q -c 1 -s 6 -w 2 www.google.com >/tmp/testinet.txt &')\n except:\n pass\n\n elif self.type == self.NETCONECTION:\n try:\n adapters = [ (iNetwork.getFriendlyAdapterName(x), x) for x in iNetwork.getAdapterList() ]\n except:\n adapters = False\n\n if not adapters:\n ret = False\n else:\n puerta = '0.0.0.0'\n for x in adapters:\n if iNetwork.getAdapterAttribute(x[1], 'up') is True:\n puerta = str(iNetwork.getAdapterAttribute(x[1], 'gateway')).replace(',', '.').replace('[', '').replace(' ', '').replace(']', '')\n break\n\n if puerta == '0.0.0.0':\n ret = False\n else:\n try:\n f = open('/tmp/testnet.txt', 'r')\n texto = f.read().replace('\\n', '')\n f.close()\n if '1 packets transmitted, 1 packets received' in texto:\n ret = True\n except:\n pass\n\n try:\n system('ping -q -c 1 -s 6 -w 2 ' + puerta + ' >/tmp/testnet.txt &')\n except:\n pass\n\n return ret\n\n boolean = property(getBoolean)\n\n @cached\n def getText(self):\n if len(self.tv_list) == 0:\n if haytp():\n self.cargasat(self.type)\n elif haynum():\n self.cargasat(self.type, False)\n service = self.source.service\n info = service and service.info()\n if not info:\n return ''\n else:\n text = ''\n orbital = ''\n number = ''\n satName = ''\n name = info.getName().replace('\\xc2\\x86', '').replace('\\xc2\\x87', '')\n if self.type == self.SERVICENAME:\n text = name\n elif self.type == self.CAMNAME:\n text = ''\n if hayecm():\n try:\n f = open('/tmp/.cam.info', 'r')\n text = text + f.read().replace('\\n', '')\n f.close()\n except:\n pass\n\n try:\n f = open('/etc/.ActiveCamd', 'r')\n text = text + f.read().replace('\\n', '')\n f.close()\n if text == 'no':\n text = _('No CAM')\n except:\n pass\n\n if text == '':\n text == _('No CAM')\n elif self.type == self.SERVICENUMBER:\n if haynum():\n number = self.getServiceNumber(name, info.getInfoString(iServiceInformation.sServiceref))\n text = number\n elif self.type == self.ORBITALPOSITION:\n if haytp():\n orbital = self.getOrbitalPosition(info)\n text = orbital\n elif self.type == self.SATNAME:\n if haytp():\n orbital = self.getOrbitalPosition(info)\n satName = self.satNames.get(orbital, orbital)\n text = satName\n elif self.type == self.PROVIDER:\n text = info.getInfoString(iServiceInformation.sProvider)\n elif self.type == self.FROMCONFIG:\n if haytp():\n orbital = self.getOrbitalPosition(info)\n satName = self.satNames.get(orbital, orbital)\n number = self.getServiceNumber(name, info.getInfoString(iServiceInformation.sServiceref))\n if config.plugins.ExtendedServiceInfo.showServiceNumber.value == True and number != '':\n text = '%s. %s' % (number, name)\n else:\n text = name\n if config.plugins.ExtendedServiceInfo.showOrbitalPosition.value == True and orbital != '':\n if config.plugins.ExtendedServiceInfo.orbitalPositionType.value == 'name':\n text = '%s (%s)' % (text, satName)\n else:\n text = '%s (%s)' % (text, orbital)\n elif self.type == 7:\n text = ''\n if hayecm():\n ecmInfoString = ' '\n using = ''\n address = ''\n hops = ''\n ecmTime = ''\n sistema = ''\n try:\n f = open('/tmp/ecm.info', 'r')\n content = f.read()\n f.close()\n except:\n content = ''\n\n contentInfo = content.split('\\n')\n for line in contentInfo:\n if line.startswith('system:'):\n esisd = devStr(line)\n if esisd.replace('\\n', '').strip() == 'FTA':\n ecmInfoString = _('FTA channel')\n break\n elif line.startswith('caid:'):\n caid = devStr(line)\n if caid.__contains__('x'):\n idx = caid.index('x')\n caid = caid[idx + 1:]\n if len(caid) == 3:\n caid = '0%s' % caid\n caid = caid[:2]\n caid = caid.upper()\n sistema = caid\n if self.systemCaids.has_key(caid):\n sistema = self.systemCaids.get(caid)\n elif line.startswith('address:'):\n address2 = line.replace('address:', '')\n address = devStr(address2, None, ':')\n porto = devStr(address2)\n if porto == address or porto == '':\n porto = ''\n else:\n porto = ':' + porto\n if len(address) > 23:\n address = address[:20] + '...'\n address = address + porto\n elif line.startswith('hops:'):\n hops = ' (' + devStr(line) + ' ' + _('hops') + ')'\n elif line.startswith('ecm time:'):\n ecmTime = devStr(line)\n if len(ecmTime) > 4:\n ecmTime = ecmTime[0:4]\n ecmTime = ecmTime + ' ' + _('secs.')\n\n if sistema != '':\n ecmInfoString = '%s' % sistema\n if using != '':\n ecmInfoString = '%s :: %s' % (ecmInfoString, using)\n if address != '':\n ecmInfoString = '%s :: %s' % (ecmInfoString, address)\n if ecmTime != '':\n ecmInfoString = '%s :: %s' % (ecmInfoString, ecmTime + hops)\n text = ecmInfoString\n elif haytp():\n number = self.getServiceNumber(name, info.getInfoString(iServiceInformation.sServiceref))\n orbital = self.getOrbitalPosition(info)\n if number == '':\n text = name\n else:\n text = '%s. %s' % (number, name)\n if orbital != '':\n text = '%s (%s)' % (text, orbital)\n return str(text)\n\n text = property(getText)\n\n def getListFromRef(self, ref):\n list = []\n try:\n serviceHandler = eServiceCenter.getInstance()\n services = serviceHandler.list(ref)\n bouquets = services and services.getContent('SN', True)\n for bouquet in bouquets:\n services = serviceHandler.list(eServiceReference(bouquet[0]))\n channels = services and services.getContent('SN', True)\n for channel in channels:\n if not channel[0].startswith('1:64:'):\n list.append(channel[1].replace('\\xc2\\x86', '').replace('\\xc2\\x87', ''))\n\n except:\n pass\n\n return list\n\n def getServiceNumber(self, name, ref):\n try:\n list = []\n if ref.startswith('1:0:2'):\n list = self.radio_list\n elif ref.startswith('1:0:1'):\n list = self.tv_list\n number = ''\n if name in list:\n for idx in range(1, len(list)):\n if name == list[idx - 1]:\n number = str(idx)\n break\n\n return number\n except:\n return ''\n\n def getOrbitalPosition(self, info):\n cret = ''\n try:\n transponderData = info.getInfoObject(iServiceInformation.sTransponderData)\n orbital = 0\n if transponderData is not None:\n if isinstance(transponderData, float):\n return ''\n if transponderData.has_key('tuner_type'):\n if transponderData['tuner_type'] == 'DVB-S' or transponderData['tuner_type'] == 'DVB-S2':\n orbital = transponderData['orbital_position']\n orbital = int(orbital)\n if orbital > 1800:\n orbital = str(float(3600 - orbital) / 10.0) + 'W'\n else:\n orbital = str(float(orbital) / 10.0) + 'E'\n else:\n orbital = '0'\n if not str(orbital) == '0':\n satName = self.satNames.get(orbital, orbital)\n if not satName == None and not satName == '':\n satName = devStr(satName, None, '(')\n cret = cret + devStr(satName, None, '/')\n if str(orbital) not in satName:\n cret = cret + ' (' + str(orbital) + ')'\n cret = cret + ' ' + getFEData(transponderData)\n return cret\n except:\n return '---x---'\n\n return\n\n def cargasat(self, tipo, sixml = True):\n if tipo < 1 or tipo > 6:\n return\n else:\n if tipo == 1:\n sixml = False\n if not self.seg_plano == None:\n return\n try:\n self.seg_plano._Thread__stop()\n except:\n pass\n\n self.seg_plano = None\n self.seg_plano = segundoplano(self, sixml)\n self.seg_plano.start()\n return\n\n def subcargasat(self, sixml = True):\n if sixml:\n try:\n satXml = parse('/etc/tuxbox/satellites.xml').getroot()\n if satXml is not None:\n for sat in satXml.findall('sat'):\n name = sat.get('name') or None\n position = sat.get('position') or None\n if name is not None and position is not None:\n position = '%s.%s' % (position[:-1], position[-1:])\n if position.startswith('-'):\n position = '%sW' % position[1:]\n else:\n position = '%sE' % position\n if position.startswith('.'):\n position = '0%s' % position\n self.satNames[position] = name\n\n except:\n pass\n\n try:\n self.tv_list = self.getListFromRef(eServiceReference('1:7:1:0:0:0:0:0:0:0:(type == 1) || (type == 17) || (type == 195) || (type == 25) FROM BOUQUET \"bouquets.tv\" ORDER BY bouquet'))\n self.radio_list = self.getListFromRef(eServiceReference('1:7:2:0:0:0:0:0:0:0:(type == 2) FROM BOUQUET \"bouquets.radio\" ORDER BY bouquet'))\n except:\n pass\n\n self.seg_plano = None\n return\n\n def changed(self, what):\n if what[0] != self.CHANGED_SPECIFIC or what[1] == self.type:\n Converter.changed(self, what)\n","repo_name":"OpenPE/fhd4-skin","sub_path":"usr/lib/enigma2/python/Components/Converter/nBmExtendedServiceInfo.py","file_name":"nBmExtendedServiceInfo.py","file_ext":"py","file_size_in_byte":20322,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"77"} +{"seq_id":"21012076282","text":"import pickle\nimport os\n\ndef separate_repeats(seqs, overlap_thresh):\n reads_repeat = {}\n reads_foldback = {}\n reads_distal = {}\n\n for k, v in seqs.items():\n has_repeat = False\n has_foldback = False\n has_distal = False\n\n for i in range(1, len(v)):\n if v[i][3] != v[i-1][3]:\n has_distal = True\n continue\n overlap = (min(v[i-1][5], v[i][5]) - max(v[i-1][4], v[i][4])) / min(v[i][5] - v[i][4], v[i-1][5] - v[i-1][4])\n if overlap > overlap_thresh:\n if v[i][2] != v[i-1][2]:\n has_foldback = True\n else:\n has_repeat = True\n else:\n has_distal = True\n\n if has_repeat:\n reads_repeat[k] = v\n if has_foldback:\n reads_foldback[k] = v\n if has_distal:\n reads_distal[k] = v\n\n return reads_repeat, reads_foldback, reads_distal\n\ndef filter_breakpoints(in_path, out_path, overlap_thresh):\n with open(in_path, \"rb\") as in_file:\n seqs, lens = pickle.load(in_file)\n\n reads_repeat, reads_foldback, reads_distal = separate_repeats(seqs, overlap_thresh)\n\n for k, v in reads_distal.items(): ####\n print(k, v) ####\n print(len(reads_repeat), len(reads_foldback), len(reads_distal))\n\n res = reads_repeat, reads_foldback, reads_distal, lens\n with open(out_path, \"wb\") as out_file:\n pickle.dump(res, out_file)\n\nif __name__ == \"__main__\":\n data_dir = \"/oak/stanford/groups/wjg/atwang/ecdna/data\"\n in_path = os.path.join(data_dir, \"COLO320DM_gDNA_nanopore_guppy_4.4_splits.pickle\")\n out_path = os.path.join(data_dir, \"COLO320DM_gDNA_nanopore_guppy_4.4_cats.pickle\")\n\n overlap_thresh = 0.5\n filter_breakpoints(in_path, out_path, overlap_thresh)","repo_name":"austintwang/ecDNA","sub_path":"filter_breakpoints.py","file_name":"filter_breakpoints.py","file_ext":"py","file_size_in_byte":1818,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"26707680421","text":"import espressopp\n\n# try to run this script with force_capping switched on and off\nforceCapping = True\n\n# specify number of particles\nnum_particles = 200\nrho = 0.85\nL = pow(num_particles/rho, 1.0/3.0)\n\n# setup random LennardJones system\n# this system is likely to explode on integration, because particles can strongly overlap\nsystem, integrator = espressopp.standard_system.LennardJones(num_particles, box=(L, L, L), temperature=1.0)\n\n# choose a smaller timestep\nintegrator.dt = 0.0001\n\nif forceCapping:\n max_force = 100000.0\n # define force capping extension\n capForce = espressopp.integrator.CapForce(system, max_force)\n # and add it to the integrator\n integrator.addExtension(capForce)\n\nespressopp.tools.analyse.info(system, integrator)\n\nsock = espressopp.tools.vmd.connect(system)\nfor i in range(1000):\n # make 10 Velocity-Verlet integration steps\n integrator.run(10)\n # print system information\n espressopp.tools.analyse.info(system, integrator)\n # update postions in VMD\n espressopp.tools.vmd.imd_positions(system, sock)\n #switch off force capping after some time\n if forceCapping and i>100:\n capForce.disconnect()\n forceCapping = False\n print(\"switching off force capping\")\n","repo_name":"espressopp/espressopp","sub_path":"examples/force_capping/force_capping.py","file_name":"force_capping.py","file_ext":"py","file_size_in_byte":1244,"program_lang":"python","lang":"en","doc_type":"code","stars":39,"dataset":"github-code","pt":"77"} +{"seq_id":"17874820742","text":"matrix = []\r\nfor _ in range(5):\r\n a = list(input())\r\n matrix.append(a)\r\nmaxx = 0\r\nfor m in matrix:\r\n if maxx < len(m):\r\n maxx = len(m)\r\n\r\nfor i in range(maxx):\r\n for j in range(5):\r\n try:\r\n print(matrix[j][i], end=\"\")\r\n except:\r\n continue","repo_name":"mungjimangji/algorithm","sub_path":"백준/Bronze/10798. 세로읽기/세로읽기.py","file_name":"세로읽기.py","file_ext":"py","file_size_in_byte":293,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"7737983407","text":"import numpy as np\n# Timothy Tyree\n# 12.4.2020\n\n#Ready-Made Example\n\nfrom queue import PriorityQueue\n\nq = PriorityQueue()\n\nq.put((4, 'Read'))\nq.put((2, 'Play'))\nq.put((5, 'Write'))\nq.put((1, 'Code'))\nq.put((3, 'Study'))\n\nwhile not q.empty():\n next_item = q.get()\n print(next_item)\n\n# the following parallel priority queue is very fast, but it makes use of bucket heaps, which I haven't found a ready-made soln for...\n# \"A parallel priority queue with fast updates for GPU architectures\" by John Iacono et al. (2019)\n# `parallel priority queue.pdf` <>\n","repo_name":"timtyree/avi","sub_path":"nb/lib/controller/priority_queue.py","file_name":"priority_queue.py","file_ext":"py","file_size_in_byte":616,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"11181933850","text":"from fastapi import FastAPI\n\nfrom src.main.shared.amqp.amqp_consumer import AmqpConsumer\nfrom src.main.shared.amqp.amqp_publisher import AmqpPublisher\nfrom src.main.post.settings import settings\nfrom src.main.post.util import handle_user_registration, handle_vote_casted, handle_user_deleted, \\\n handle_post_awarded\n\n\nclass PostService(FastAPI):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n self.user_registered_amqp_consumer = None\n self.post_vote_casted_amqp_consumer = None\n self.user_deleted_amqp_consumer = None\n self.post_awarded_amqp_consumer = None\n self.initialize_amqp_consumers()\n\n self.post_created_amqp_publisher = None\n self.initialize_amqp_publishers()\n\n def initialize_amqp_consumers(self):\n self.user_registered_amqp_consumer = AmqpConsumer(\n settings.AMQP_URL,\n exchange_name=settings.AMQP_USER_REGISTERED_EXCHANGE_NAME,\n queue_name=settings.AMQP_USER_REGISTERED_QUEUE_NAME,\n incoming_message_handler=handle_user_registration,\n )\n self.post_vote_casted_amqp_consumer = AmqpConsumer(\n settings.AMQP_URL,\n exchange_name=settings.AMQP_POST_VOTE_CASTED_EXCHANGE_NAME,\n queue_name=settings.AMQP_POST_VOTE_CASTED_QUEUE_NAME,\n incoming_message_handler=handle_vote_casted,\n )\n self.user_deleted_amqp_consumer = AmqpConsumer(\n settings.AMQP_URL,\n exchange_name=settings.AMQP_USER_DELETED_EXCHANGE_NAME,\n queue_name=settings.AMQP_USER_DELETED_QUEUE_NAME,\n incoming_message_handler=handle_user_deleted,\n )\n self.post_awarded_amqp_consumer = AmqpConsumer(\n settings.AMQP_URL,\n exchange_name=settings.AMQP_POST_AWARDED_EXCHANGE_NAME,\n queue_name=settings.AMQP_POST_AWARDED_QUEUE_NAME,\n incoming_message_handler=handle_post_awarded,\n )\n\n def initialize_amqp_publishers(self):\n self.post_created_amqp_publisher = AmqpPublisher(\n settings.AMQP_URL,\n exchange_name=settings.AMQP_POST_CREATED_EXCHANGE_NAME,\n )\n","repo_name":"stoyanK7/not-reddit","sub_path":"api/src/main/post/service.py","file_name":"service.py","file_ext":"py","file_size_in_byte":2177,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"8261521969","text":"import base64\nimport hashlib\nimport logging\n\nfrom cryptography import x509\nfrom cryptography.hazmat.backends import default_backend\nfrom cryptography.hazmat.primitives import serialization\nfrom cryptography.hazmat.primitives.asymmetric import ec\nfrom cryptography.hazmat.primitives.asymmetric import rsa\n\nfrom cryptojwt.utils import b64e\n\nlogger = logging.getLogger(__name__)\n\n\ndef import_public_key_from_pem_file(filename):\n \"\"\"\n Read a public RSA key from a PEM file.\n\n :param filename: The name of the file\n :param passphrase: A pass phrase to use to unpack the PEM file.\n :return: A public key instance\n \"\"\"\n with open(filename, \"rb\") as key_file:\n public_key = serialization.load_pem_public_key(key_file.read(), backend=default_backend())\n return public_key\n\n\ndef import_private_key_from_pem_file(filename, passphrase=None):\n \"\"\"\n Read a private RSA key from a PEM file.\n\n :param filename: The name of the file\n :param passphrase: A pass phrase to use to unpack the PEM file.\n :return: A private key instance\n \"\"\"\n with open(filename, \"rb\") as key_file:\n private_key = serialization.load_pem_private_key(\n key_file.read(), password=passphrase, backend=default_backend()\n )\n return private_key\n\n\nPREFIX = \"-----BEGIN CERTIFICATE-----\"\nPOSTFIX = \"-----END CERTIFICATE-----\"\n\n\ndef import_public_key_from_pem_data(pem_data):\n \"\"\"\n Extract an RSA key from a PEM-encoded X.509 certificate\n\n :param pem_data: RSA key encoded in standard form\n :return: rsa.RSAPublicKey instance\n \"\"\"\n if not pem_data.startswith(PREFIX):\n pem_data = bytes(\"{}\\n{}\\n{}\".format(PREFIX, pem_data, POSTFIX), \"utf-8\")\n else:\n pem_data = bytes(pem_data, \"utf-8\")\n cert = x509.load_pem_x509_certificate(pem_data, default_backend())\n return cert.public_key()\n\n\ndef import_public_key_from_cert_file(filename):\n \"\"\"\n Read a public key from a certificate file.\n\n :param filename: The name of the file\n :return: A public key instance\n \"\"\"\n with open(filename, \"rb\") as key_file:\n cert = x509.load_pem_x509_certificate(key_file.read(), backend=default_backend())\n return cert.public_key()\n\n\ndef der_cert(der_data):\n \"\"\"\n Load a DER encoded certificate\n\n :param der_data: DER-encoded certificate\n :return: A cryptography.x509.certificate instance\n \"\"\"\n if isinstance(der_data, str):\n der_data = bytes(der_data, \"utf-8\")\n return x509.load_der_x509_certificate(der_data, default_backend())\n\n\ndef load_x509_cert(url, httpc, spec2key, **get_args):\n \"\"\"\n Get and transform a X509 cert into a key.\n\n :param url: Where the X509 cert can be found\n :param httpc: HTTP client to use for fetching\n :param spec2key: A dictionary over keys already seen\n :param get_args: Extra key word arguments to the HTTP GET request\n :return: List of 2-tuples (keytype, key)\n \"\"\"\n try:\n r = httpc(\"GET\", url, allow_redirects=True, **get_args)\n if r.status_code == 200:\n cert = str(r.text)\n try:\n public_key = spec2key[cert] # If I've already seen it\n except KeyError:\n public_key = import_public_key_from_pem_data(cert)\n spec2key[cert] = public_key\n\n if isinstance(public_key, rsa.RSAPublicKey):\n return {\"rsa\": public_key}\n elif isinstance(public_key, ec.EllipticCurvePublicKey):\n return {\"ec\": public_key}\n else:\n raise Exception(\"HTTP Get error: %s\" % r.status_code)\n except Exception as err: # not a RSA key\n logger.warning(\"Can't load key: %s\" % err)\n return []\n\n\ndef x5t_calculation(cert):\n \"\"\"\n base64url-encoded SHA-1 thumbprint (a.k.a. digest) of the DER\n encoding of an X.509 certificate.\n\n :param cert: DER encoded X.509 certificate\n :return: x5t value\n \"\"\"\n if isinstance(cert, str):\n der_cert = base64.b64decode(cert.encode(\"ascii\"))\n else:\n der_cert = base64.b64decode(cert)\n\n return b64e(hashlib.sha1(der_cert).digest())\n","repo_name":"IdentityPython/JWTConnect-Python-CryptoJWT","sub_path":"src/cryptojwt/jwk/x509.py","file_name":"x509.py","file_ext":"py","file_size_in_byte":4102,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"77"} +{"seq_id":"9024451628","text":"# search -requests http for humans- on net\r\nimport requests\r\n\r\n# r= requests.get('https://financialmodelingprep.com/api/v3/income-statement/AAPL?period=quarter&limit=400&apikey=YOUR_API_KEY')\r\n# print(r.text) #this is giving me content of r i.e. above api/url\r\n# print(r.status_code) #google http status code\r\n\r\n\r\n\r\n# --------------------------POST REQUEST---------------------------#\r\n#NOTE- PASSWORDS VGERE URL VER NAI PATHVAT. POST REQ NE SEND HOTAT\r\n# url = \"www.something.com\" #random ghetlay, DO NOT RUN THIS PART\r\n# data = {\r\n# 'p1':2, 'p2':9\r\n# }\r\n#\r\n# r2 = requests.post(url = url, data= data)\r\n\r\n# -------------------------------------TASK-------------------------------------#\r\n# take any free api , send post requests\r\nurl = 'https://reqbin.com/echo/post/form'\r\ndata = {\"userid\":123, 'pincode': 300675}\r\nr = requests.post(url, data)\r\nprint(r.text)\r\nprint(r.status_code)\r\n\r\n\r\n\r\n","repo_name":"Juilee27/Python","sub_path":"PyCharm/REQUESTSSmodule.py","file_name":"REQUESTSSmodule.py","file_ext":"py","file_size_in_byte":893,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"71167274168","text":"from django.shortcuts import render, redirect\nfrom django.http import HttpResponse\n\nfrom .models import Todo\n\ndef index(request):\n todos = Todo.objects.all()[:10]\n\n context = {\n 'todos':todos\n }\n return render(request, 'index.html', context)\n\ndef details(request, id):\n todo = Todo.objects.get(id=id)\n\n context = {\n 'todo':todo\n }\n return render(request, 'details.html', context)\n\ndef add(request):\n if(request.method == 'POST'):\n title = request.POST['title']\n text = request.POST['text']\n try:\n request.POST['completed']\n completed=True\n except:\n completed=False\n\n todo = Todo(title=title, text=text,completed=completed)\n todo.save()\n\n return redirect('/todos')\n else:\n return render(request, 'add.html')\n\ndef mark_complete(request):\n id = request.POST['id']\n value = request.POST['value']\n Todo.objects.filter(id=id).update(completed=value)\n return redirect('/todos/details/' + id)","repo_name":"kixes/django-todolist","sub_path":"todos/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1029,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"36580456554","text":"import pygame\r\nimport random\r\nimport math\r\nfrom pygame import mixer\r\n\r\n# Initialize PyGame\r\npygame.init()\r\n\r\n# Define screen resolution\r\ndisplay = pygame.display.set_mode((800, 600))\r\n\r\n# Title, icon and background\r\npygame.display.set_caption('Monster Invasion by Franco Sparn')\r\nicon = pygame.image.load('static/img/icon.png')\r\npygame.display.set_icon(icon)\r\nbackground = pygame.image.load('static/img/background.png')\r\n\r\n# Game music\r\nmixer.music.load('static/sounds/main.mp3')\r\nmixer.music.set_volume(0.1)\r\nmixer.music.play(-1)\r\n\r\n# Player variables\r\nimg_player = pygame.image.load('static/img/dragon.png')\r\nplayer_x = 368\r\nplayer_y = 530\r\nplayer_x_change = 0\r\n\r\n# Enemy variables\r\nimg_enemy = []\r\nenemy_x = []\r\nenemy_y = []\r\nenemy_x_change = []\r\nenemy_y_change = []\r\nenemy_amount = 5\r\n\r\nfor e in range(enemy_amount):\r\n img_enemy.append(pygame.image.load('static/img/enemy.png'))\r\n enemy_x.append(random.randint(0, 736))\r\n enemy_y.append(random.randint(0, 300))\r\n enemy_x_change.append(2.5)\r\n enemy_y_change.append(50)\r\n\r\n# Bullet variables\r\nimg_bullet = pygame.image.load('static/img/bullet.png')\r\nbullet_x = 0\r\nbullet_y = 530\r\nbullet_x_change = 0\r\nbullet_y_change = 5\r\nbullet_visible = False\r\n\r\n# Score variable\r\nscore = 0\r\nfont = pygame.font.Font('static/fonts/VCR_OSD_MONO_1.001.ttf', 28)\r\ntext_x = 10\r\ntext_y = 10\r\n\r\n# Endgame text\r\nend_font = pygame.font.Font('static/fonts/VCR_OSD_MONO_1.001.ttf', 48)\r\n\r\n\r\n# End text function\r\ndef end_text():\r\n end_message = end_font.render('Game Over', True, (0, 0, 0))\r\n display.blit(end_message, (290, 200))\r\n \r\n\r\n# Show score function\r\ndef show_score(x, y):\r\n text = font.render(f'Score:{score}', True, (255, 255, 255))\r\n display.blit(text, (x, y))\r\n\r\n\r\n# Enemy function\r\ndef enemy(x, y, enem):\r\n display.blit(img_enemy[enem], (x, y))\r\n\r\n\r\n# Player function\r\ndef player(x, y):\r\n display.blit(img_player, (x, y))\r\n\r\n\r\n# Bullet function\r\ndef shooting_bullet(x, y):\r\n global bullet_visible\r\n bullet_visible = True\r\n display.blit(img_bullet, (x + 16, y + 10))\r\n\r\n\r\n# Detect collisions function\r\ndef collision_ok(x_1, y_1, x_2, y_2):\r\n distance = math.sqrt((math.pow(x_2 - x_1, 2) + math.pow(y_2 - y_1, 2)))\r\n if distance < 27:\r\n return True\r\n else:\r\n return False\r\n\r\n\r\n# Game loop\r\nrun = True\r\n\r\nwhile run:\r\n\r\n # Background image\r\n display.blit(background, (0, 0))\r\n\r\n # Iterate events\r\n for event in pygame.event.get():\r\n\r\n # Close event\r\n if event.type == pygame.QUIT:\r\n run = False\r\n\r\n # Event to check if a keys is pressed\r\n if event.type == pygame.KEYDOWN:\r\n if event.key == pygame.K_LEFT:\r\n player_x_change = -2\r\n if event.key == pygame.K_RIGHT:\r\n player_x_change = 2\r\n if event.key == pygame.K_SPACE:\r\n bullet_sound = mixer.Sound('static/sounds/shoot.mp3')\r\n bullet_sound.set_volume(0.3)\r\n bullet_sound.play()\r\n if not bullet_visible:\r\n bullet_x = player_x\r\n shooting_bullet(bullet_x, bullet_y)\r\n\r\n # Event that checks if a key was released\r\n if event.type == pygame.KEYUP:\r\n if event.key == pygame.K_LEFT or event.key == pygame.K_RIGHT:\r\n player_x_change = 0\r\n\r\n # Modify player location\r\n player_x += player_x_change\r\n\r\n # Keep player location\r\n if player_x <= 0:\r\n player_x = 0\r\n elif player_x >= 736:\r\n player_x = 736\r\n\r\n # Modify enemy location\r\n for e in range(enemy_amount):\r\n\r\n # End of the game\r\n if enemy_y[e] > 490:\r\n for k in range(enemy_amount):\r\n enemy_y[k] = 1000\r\n end_text()\r\n break\r\n\r\n enemy_x[e] += enemy_x_change[e]\r\n\r\n # Keep enemy location\r\n if enemy_x[e] <= 0:\r\n enemy_x_change[e] = 3.5\r\n enemy_y[e] += enemy_y_change[e]\r\n elif enemy_x[e] >= 736:\r\n enemy_x_change[e] = -3.5\r\n enemy_y[e] += enemy_y_change[e]\r\n\r\n # Collision\r\n collision = collision_ok(enemy_x[e], enemy_y[e], bullet_x, bullet_y)\r\n if collision:\r\n hit_sound = mixer.Sound('static/sounds/hit.mp3')\r\n hit_sound.set_volume(0.3)\r\n hit_sound.play()\r\n bullet_y = 500\r\n bullet_visible = False\r\n score += 1\r\n enemy_x[e] = random.randint(0, 736)\r\n enemy_y[e] = random.randint(0, 300)\r\n\r\n enemy(enemy_x[e], enemy_y[e], e)\r\n\r\n # Bullet motion\r\n if bullet_y <= 0:\r\n bullet_y = 500\r\n bullet_visible = False\r\n\r\n if bullet_visible:\r\n shooting_bullet(bullet_x, bullet_y)\r\n bullet_y -= bullet_y_change\r\n\r\n # Run functions\r\n player(player_x, player_y)\r\n show_score(text_x, text_y)\r\n\r\n # Run update\r\n pygame.display.update()\r\n","repo_name":"francosparn/python_monster_invasion","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4904,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"70397622969","text":"import logging\nimport requests\nfrom requests.exceptions import Timeout\nfrom ..iot.IOTClient import IOTClient\nfrom .interpreter import Interpreter\nfrom lib.commands import (\n\tDefineWord,\n\tGetNews,\n\tGetWeatherForecast,\n\tGoogleSearch,\n\tPlayYoutubeVideo,\n\tShutdownSystem,\n\tStartOfflineMusic,\n\tStopOfflineMusic,\n\t# StopProgram,\n\tTellAJoke,\n\tTellTime,\n\tWikiSearch,\n\tDefaultCommand\n)\n\nclass RasaInterpreterException(Exception):\n\tpass\n\n\n\nclass RasaInterpreter(Interpreter):\n\tRASA_NLU_SERVER_BASE_URL = \"http://localhost:5005/\"\n\tRASA_NLU_PARSE_URL = f\"{RASA_NLU_SERVER_BASE_URL}model/parse\"\n\tHEADERS = {'Content-type': 'application/json', 'Accept': 'text/plain'}\n\tlogger = logging.getLogger(__name__)\n\n\t@staticmethod\n\tdef mapIntentToEvent(intent):\n\t\tevent = DefaultCommand.__name__\n\t\tif intent == \"define_word\":\n\t\t\tevent = DefineWord.__name__\n\t\telif intent == \"play_music\":\n\t\t\tevent = StartOfflineMusic.__name__\n\t\telif intent == \"stop_music\":\n\t\t\tevent = StopOfflineMusic.__name__\n\t\telif intent == \"tell_time\":\n\t\t\tevent = TellTime.__name__\n\t\telif intent == \"tell_joke\":\n\t\t\tevent = TellAJoke.__name__\n\t\telif intent == \"read_news\":\n\t\t\tevent = GetNews.__name__\n\t\telif intent == \"weather_forecast\":\n\t\t\tevent = GetWeatherForecast.__name__\n\t\telif intent == \"shutdown_system\":\n\t\t\tevent = ShutdownSystem.__name__\n\t\telif intent == \"youtube_search\":\n\t\t\tevent = PlayYoutubeVideo.__name__\n\t\telif intent == \"wiki_search\":\n\t\t\tevent = WikiSearch.__name__\n\t\telif intent == \"google_search\":\n\t\t\tevent = GoogleSearch.__name__\n\t\t\n\t\treturn event\n\t\t\n\t\n\t@staticmethod\n\tdef extractEntity(responseJs):\n\t\tentityIndices = list()\n\t\tentities = responseJs[\"entities\"]\n\t\tentityPhrase = None\n\n\t\tif len(entities) > 0:\n\t\t\tfor entity in entities:\n\t\t\t\tentityIndices.append(entity[\"start\"])\n\t\t\t\tentityIndices.append(entity[\"end\"])\n\n\t\t\tentityPhrase = responseJs[\"text\"][min(entityIndices): max(entityIndices)]\n\n\t\treturn entityPhrase\n\n\t@staticmethod\n\tdef processIOTcmd(cmd):\n\t\t# Detect for IOT commands 1st\n\t\tif cmd == \"switch_on_lights\":\n\t\t\tevent = \"lights\"\n\t\t\tdata = \"1\"\n\t\telif cmd == \"switch_off_lights\":\n\t\t\tevent = \"lights\"\n\t\t\tdata = \"0\"\n\t\t\n\t\treturn event, data\n\t\n\t@staticmethod\n\tdef isIOTcmd(cmd):\n\t\treturn cmd in IOTClient.ALLOWED_COMMANDS\n\n\t@classmethod\n\tdef process(cls, command):\n\t\tif command == Interpreter.FAILED_TOKEN:\n\t\t\tcls.logger.warn(\"Failed to interpret command. Reverting to default command\")\n\t\t\treturn DefaultCommand.__name__, None\n\t\ttry:\n\t\t\tresponse = requests.post(cls.RASA_NLU_PARSE_URL, \n\t\t\t\t\tjson = { \"text\": command }, \n\t\t\t\t\theaders = cls.HEADERS,\n\t\t\t\t\ttimeout = 0.7)\n\t\t\tcls.logger.info(\"Rasa Server responded\")\n\t\texcept Timeout:\n\t\t\tcls.logger.warn(\"Rasa Server timeout\")\n\t\t\traise RasaInterpreterException(\"Server took too long to respond\")\n\t\t\t\n\t\tif response.status_code == 200:\n\t\t\tjs = response.json()\n\t\t\tintent = js[\"intent\"][\"name\"]\n\t\t\tcls.logger.info(f\"Detected intent: {intent}\")\n\n\t\t\tif RasaInterpreter.isIOTcmd(intent):\n\t\t\t\treturn RasaInterpreter.processIOTcmd(intent)\n\t\t\telse:\n\t\t\t\tevent = cls.mapIntentToEvent(intent)\n\t\t\t\tdata = cls.extractEntity(js)\n\n\t\t\t\tcls.logger.info(f\"Event, data: {event}, {data}\")\n\t\t\t\treturn event, data\n\n\t\telse:\n\t\t\traise RasaInterpreterException(f\"Response status code: {response.status_code} ({response.reason})\")","repo_name":"beazt123/homemade-ai-assistant","sub_path":"lib/interpreters/RasaInterpreter.py","file_name":"RasaInterpreter.py","file_ext":"py","file_size_in_byte":3222,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"73832139449","text":"def listarMaestro(maestros1):\r\n print (\"maestros\")\r\n con = 1\r\n for i in maestros1:\r\n datos = \"{0}. id: {1} - nombre: {2} - apellido: {3}\"\r\n print(datos.format(con,i[0],i[1],i[2]))\r\n con = con + 1\r\n print(\"\")\r\ndef listarbases(bases):\r\n print (\"bases\")\r\n con = 1\r\n for i in bases:\r\n datos = \"{0}. nombre: {1}\"\r\n print(datos.format(con,i[0]))\r\n con = con + 1\r\n print(\"\") \r\ndef pedirMaestros():\r\n id = int(input(\"Escriba un nuevo id: \"))\r\n nombre = input(\" Nuevo nombre: \")\r\n apellido = input(\" Nuevo apellido: \")\r\n maestro = (id,nombre,apellido)\r\n return maestro\r\n\r\ndef eliminar(maestro):\r\n listarMaestro(maestro)\r\n existeid = False\r\n id = int(input(\"ID de maestro a eliminar: \"))\r\n for i in maestro:\r\n if i[0] == id:\r\n existeid = True\r\n break\r\n if not existeid:\r\n id = \" \"\r\n return id\r\ndef actualizarMaestros(maestros):\r\n listarMaestro(maestros)\r\n existeid = False\r\n id = int(input(\"ID de maestro a editar: \"))\r\n for i in maestros:\r\n if i[0] == id:\r\n existeid = True\r\n break\r\n if existeid:\r\n nombre = input(\" Nuevo nombre: \")\r\n apellido = input(\" Nuevo apellido: \")\r\n maestros = (id,nombre,apellido)\r\n else: \r\n maestros = None\r\n return maestros\r\n \r\n","repo_name":"ElmerRCH/CRUD-PY","sub_path":"funciones.py","file_name":"funciones.py","file_ext":"py","file_size_in_byte":1366,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"25936395667","text":"from pathlib import Path\n\nimport openpyxl.workbook.workbook as ow\nfrom openpyxl.utils.exceptions import ReadOnlyWorkbookException\nfrom openpyxl.worksheet._write_only import WriteOnlyWorksheet\n\nfrom ..worksheet.worksheet import Worksheet\nfrom pyworkkit.utils.function import Others\n\n\nclass Workbook(ow.Workbook):\n \"\"\"\n 继承了原 Workbook 的类,为了实现扩展方法\n \"\"\"\n\n def __init__(self, write_only=False, iso_dates=False,) -> None:\n super(Workbook, self).__init__(write_only, iso_dates)\n\n def print_self(self):\n print('我是自定义的哒!')\n\n def find_sheet(self,find_name:str):\n find_name = Others.str_fix(find_name)\n range_=range(len(self.sheetnames))\n for i in range_:\n re_name =Others.str_fix(self.sheetnames[i])\n if (find_name in re_name) or (re_name in find_name):\n return self.sheetnames[i]\n return find_name\n\n def load_sheet(self, sheetname: str,vague:bool=False) -> Worksheet:\n if isinstance(sheetname, str):\n if sheetname in self.sheetnames:\n print('当前表格为:'+sheetname)\n ws = self[sheetname]\n return ws\n elif vague:\n return self.load_sheet(self.find_sheet(sheetname))\n else:\n raise ValueError(\"can't find the sheet named {}\".format(sheetname))\n else:\n raise TypeError(\"sheetname must be str and not empty .\")\n\n def create_sheet(self, sheetname):\n if sheetname in self.sheetnames:\n raise ValueError(\"sheet {0} is exist.\".format(sheetname))\n ws = self.create_sheet(sheetname)\n return ws\n\n def copy_model_sheet(self, modelname, sheetname):\n if modelname in self.sheetnames:\n moudlesheet = self[modelname]\n copyws = self.copy_worksheet(moudlesheet)\n copyws.title = sheetname\n return copyws\n else:\n raise ValueError('没有名为《'+modelname+'》的模板表格')\n\n def apart_sheets_as_books(self, folder: str, pattern: str = \"{0}\", col_list=None, callback=None):\n for name in self.sheetnames:\n from_worksheet = self.load_sheet(name)\n df = from_worksheet.unmerge_cell(True).toDataframe()\n out = Workbook()\n to_worksheet = out.create_sheet(title=name)\n # 数据转移\n to_worksheet.append_df(df)\n # 样式转移\n to_worksheet.sys_style(from_worksheet)\n # 自动列宽\n to_worksheet.auto_width(col_list)\n # 额外操作,用函数传入\n if callback:\n to_worksheet = callback(to_worksheet)\n if isinstance(to_worksheet, Worksheet):\n path = Path(folder).joinpath(pattern.format(name))\n if path.exists() and path.is_dir():\n out.save(str(path))\n else:\n NotADirectoryError(\"folder is not a directory.\")\n else:\n tip = 'arg function func_call return not Sheet Type!'\n raise TypeError(tip)\n\n def save(self, path):\n if path == None:\n raise ValueError(\"path can not be None.\")\n\n print('<'+path+'>工作簿正在保存')\n if 'Sheet' in self.sheetnames:\n self.remove(self['Sheet'])\n super().save(path)\n self.close()\n print('<'+path+'>工作簿保存成功')\n\n # 重写此方法,用于使用自定义的 Worksheet\n def create_sheet(self, title=None, index=None):\n \"\"\"Create a worksheet (at an optional index).\n\n :param title: optional title of the sheet\n :type title: str\n :param index: optional position at which the sheet will be inserted\n :type index: int\n\n \"\"\"\n if self.read_only:\n raise ReadOnlyWorkbookException(\n 'Cannot create new sheet in a read-only workbook')\n\n if self.write_only:\n new_ws = WriteOnlyWorksheet(parent=self, title=title)\n else:\n new_ws = Worksheet(parent=self, title=title)\n\n self._add_sheet(sheet=new_ws, index=index)\n return new_ws\n","repo_name":"yuyuko-C/pyworkkit","sub_path":"on_excel/workbook/workbook.py","file_name":"workbook.py","file_ext":"py","file_size_in_byte":4215,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"77"} +{"seq_id":"73779038007","text":"from dados_para_o_map import produtos, pessoas, lista\nfrom functools import reduce\n\n# recude -> (acumulador, item: acumulador + item, lista, valor inicial do acumulador)\n\nsoma_lista = reduce(lambda total, preco_produto: total + preco_produto, lista, 0)\nsoma_precos = reduce(lambda acumulador, item: acumulador + item['preco'], produtos, 0)\nsoma_idades = reduce(lambda acumula_idade, pessoa: acumula_idade + pessoa['idade'], pessoas, 0)\nprint(soma_lista)\nprint(soma_precos)\nprint(soma_idades / len(pessoas))\n","repo_name":"JerberthRocha/curso-python3","sub_path":"Programacao_procedural/reduce.py","file_name":"reduce.py","file_ext":"py","file_size_in_byte":507,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"36031352303","text":"import random\nfrom django.core.management.base import BaseCommand\nfrom django.utils import timezone\nfrom faker import Faker\nfrom todo_app.models import Task, Tag\n\n\nclass Command(BaseCommand):\n help = 'Generates fake data for the Todo List app.'\n\n TAGS = ['Work', 'Personal', 'Shopping', 'Health', 'Fitness']\n STATUSES = ['OPEN', 'WORKING', 'DONE', 'OVERDUE']\n\n def add_arguments(self, parser):\n parser.add_argument('count', type=int, help='Number of fake tasks to create')\n\n def handle(self, *args, **options):\n fake = Faker()\n count = options['count']\n for _ in range(count):\n title = fake.sentence(nb_words=4)\n description = fake.paragraph(nb_sentences=2)\n due_date = fake.date_between(start_date='-30d', end_date='+30d')\n status = random.choice(self.STATUSES)\n task = Task.objects.create(title=title, description=description, due_date=due_date, status=status)\n self.assign_random_tags(task)\n\n def assign_random_tags(self, task):\n tags = random.sample(self.TAGS, random.randint(1, len(self.TAGS)))\n for tag_name in tags:\n tag, _ = Tag.objects.get_or_create(name=tag_name)\n task.tags.add(tag)\n task.save()\n\n\n","repo_name":"Parassirohi/Todo-Lists","sub_path":"todo_app/management/commands/dummydata.py","file_name":"dummydata.py","file_ext":"py","file_size_in_byte":1264,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"8969911993","text":"import torch.nn as nn\n\nclass Encoder(nn.Module) :\n def __init__(self):\n super(Encoder, self).__init__()\n\n # input : (?, 1, 256, 256)\n self.inc = DoubleConv(1, 64)\n self.down1 = Down(64, 128)\n self.down2 = Down(128, 256)\n self.down3 = Down(256, 512)\n\n def forward(self, x):\n out = self.inc(x)\n out = self.down1(out)\n out = self.down2(out)\n out = self.down3(out)\n\n return out\n\nclass Down(nn.Module) :\n def __init__(self, in_channels, out_channels):\n super(Down, self).__init__()\n\n self.maxpool_conv = nn.Sequential(\n nn.MaxPool2d(2),\n DoubleConv(in_channels, out_channels)\n )\n\n def forward(self, x):\n out = self.maxpool_conv(x)\n\n return out\n\nclass DoubleConv(nn.Module) :\n def __init__(self, in_channels, out_channels, mid_channels=None):\n super(DoubleConv, self).__init__()\n\n if not mid_channels : mid_channels = out_channels\n\n self.double_conv = nn.Sequential(\n nn.Conv2d(in_channels, mid_channels, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)),\n nn.BatchNorm2d(mid_channels), nn.ReLU(inplace=True),\n nn.Conv2d(mid_channels, out_channels, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)),\n nn.BatchNorm2d(out_channels), nn.ReLU(inplace=True),\n )\n\n def forward(self, x):\n out = self.double_conv(x)\n\n return out\n\nclass channel_reduction(nn.Module) :\n def __init__(self, ratio=2):\n super(channel_reduction, self).__init__()\n self.channel_reduction = nn.Conv2d(512, int(512//ratio), kernel_size=(1, 1))\n\n def forward(self, x):\n x = self.channel_reduction(x)\n\n return x","repo_name":"ICCV2023FSDA/FSDA_ICCV2023","sub_path":"FSDA/Extract/ExtractParts.py","file_name":"ExtractParts.py","file_ext":"py","file_size_in_byte":1744,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"77"} +{"seq_id":"27828353320","text":"import random\n\nimport matplotlib.pyplot as plt\nimport torch\n\nplt.switch_backend('agg')\nimport matplotlib.ticker as ticker\n\nfrom io import open\nimport unicodedata\nimport re\nimport time\nimport math\n\n# 用于建立字符的索引\nSOS_token = 0\nEOS_token = 1\n\n\nclass Lang:\n def __init__(self, name):\n self.name = name\n self.word2index = {}\n self.word2count = {}\n self.index2word = {0: \"SOS\", 1: \"EOS\"}\n self.n_words = 2 # Count SOS and EOS\n\n def addSentence(self, sentence):\n for word in sentence.split(' '):\n self.addWord(word)\n\n def addWord(self, word):\n if word not in self.word2index:\n self.word2index[word] = self.n_words\n self.word2count[word] = 1\n self.index2word[self.n_words] = word\n self.n_words += 1\n else:\n self.word2count[word] += 1\n\n\n# 定义辅助函数\n# Turn a Unicode string to plain ASCII, thanks to\n# https://stackoverflow.com/a/518232/2809427\ndef unicodeToAscii(s):\n return ''.join(\n c for c in unicodedata.normalize('NFD', s)\n if unicodedata.category(c) != 'Mn'\n )\n\n\n# Lowercase, trim, and remove non-letter characters\ndef normalizeString(s):\n s = unicodeToAscii(s.lower().strip())\n s = re.sub(r\"([.!?])\", r\" \\1\", s)\n s = re.sub(r\"[^a-zA-Z.!?]+\", r\" \", s)\n return s\n\n\ndef readLangs(lang1, lang2, reverse=False):\n print(\"Reading lines...\")\n\n # Read the file and split into lines\n lines = open('data/%s-%s.txt' % (lang1, lang2), encoding='utf-8'). \\\n read().strip().split('\\n')\n\n # Split every line into pairs and normalize\n pairs = [[normalizeString(s) for s in l.split('\\t')] for l in lines]\n\n # Reverse pairs, make Lang instances\n if reverse:\n pairs = [list(reversed(p)) for p in pairs]\n input_lang = Lang(lang2)\n output_lang = Lang(lang1)\n else:\n input_lang = Lang(lang1)\n output_lang = Lang(lang2)\n\n return input_lang, output_lang, pairs\n\n\n# 为了简单起见,只使用一部分的数据\nMAX_LENGTH = 10\n\neng_prefixes = (\n \"i am \", \"i m \",\n \"he is\", \"he s \",\n \"she is\", \"she s \",\n \"you are\", \"you re \",\n \"we are\", \"we re \",\n \"they are\", \"they re \"\n)\n\n\ndef filterPair(p):\n return len(p[0].split(' ')) < MAX_LENGTH and \\\n len(p[1].split(' ')) < MAX_LENGTH and \\\n p[1].startswith(eng_prefixes)\n\n\ndef filterPairs(pairs):\n return [pair for pair in pairs if filterPair(pair)]\n\n\ndef prepareData(lang1, lang2, reverse=False):\n input_lang, output_lang, pairs = readLangs(lang1, lang2, reverse)\n print(\"Read %s sentence pairs\" % len(pairs))\n pairs = filterPairs(pairs)\n print(\"Trimmed to %s sentence pairs\" % len(pairs))\n print(\"Counting words...\")\n for pair in pairs:\n input_lang.addSentence(pair[0])\n output_lang.addSentence(pair[1])\n print(\"Counted words:\")\n print(input_lang.name, input_lang.n_words)\n print(output_lang.name, output_lang.n_words)\n return input_lang, output_lang, pairs\n\n\ndef indexesFromSentence(lang, sentence):\n return [lang.word2index[word] for word in sentence.split(' ')]\n\n\ndef tensorFromSentence(lang, sentence, device):\n indexes = indexesFromSentence(lang, sentence)\n indexes.append(EOS_token)\n return torch.tensor(indexes, dtype=torch.long, device=device).view(-1, 1)\n\n\ndef tensorsFromPair(input_lang, output_lang, pair, device):\n input_tensor = tensorFromSentence(input_lang, pair[0], device)\n target_tensor = tensorFromSentence(output_lang, pair[1], device)\n return input_tensor, target_tensor\n\n\ndef asMinutes(s):\n m = math.floor(s / 60)\n s -= m * 60\n return '%dm %ds' % (m, s)\n\n\ndef timeSince(since, percent):\n now = time.time()\n s = now - since\n es = s / (percent)\n rs = es - s\n return '%s (- %s)' % (asMinutes(s), asMinutes(rs))\n\n\ndef showPlot(points):\n plt.figure()\n fig, ax = plt.subplots()\n # this locator puts ticks at regular intervals\n loc = ticker.MultipleLocator(base=0.2)\n ax.yaxis.set_major_locator(loc)\n plt.plot(points)\n\n\n","repo_name":"Sanster/notes","sub_path":"deep_learning/pytorch/seq2seq_attn_translation/helper.py","file_name":"helper.py","file_ext":"py","file_size_in_byte":4065,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"77"} +{"seq_id":"71052215289","text":"import requests as r, os\nfrom bs4 import BeautifulSoup as par\nclear = lambda : os.system('clear')\n\ndef main():\n\tclear()\n\tprint('''\\x1b[1;97m=============================================\n\\x1b[1;91m * \\x1b[1;97mAuthor : \\x1b[1;92mRizky\n\\x1b[1;91m * \\x1b[1;97mSupport: \\x1b[1;92mAprilia\n\\x1b[1;91m * \\x1b[1;97mTeam : \\x1b[1;92mXIUZCODE\n\\x1b[1;91m * \\x1b[1;97mGithub : \\x1b[1;92mhttps://github.com/hekelpro\n\\x1b[1;97m=============================================''')\n\ttry:\n\t\ta = r.get('https://litequran.net').text\n\texcept r.exceptions.ConnectionError:\n\t\texit('\\x1b[1;91m! \\x1b[1;97mTidak Ada Koneksi')\n\ta = par(a,'html.parser')\n\tprint('\\n\\x1b[1;92m1\\x1b[1;91m. Keluar Program')\n\t# \n\tno, ayat, bacaan, arti, link = 0, 0, 0, 0, []\n\tfor x in a.find_all('a'):\n\t\tno += 1\n\t\ttitel = x.get('title')\n\t\tif titel == None:\n\t\t\tpass\n\t\telse:\n\t\t\tprint(f'\\x1b[1;92m{str(no)}\\x1b[1;91m. \\x1b[1;97m{titel}')\n\tfor z in a.find_all('a'):\n\t\trun = z.get('href')\n\t\tlink.append(run)\n\ttry:\n\t\tpil = input('\\n\\x1b[1;91m# \\x1b[1;97mPilih:\\x1b[1;96m ')\n\t\tif pil =='1':\n\t\t\texit('\\x1b[1;91m! \\x1b[1;97mProgram Berakhir')\n\t\telse:\n\t\t\tpil = int(pil) - 1\n\t\t\tlanjut = r.get(str(link[pil])).text\n\t\t\tscrap = par(lanjut,'html.parser')\n\t\t\tfind1 = scrap.find('h1', class_=\"page-title\").text\n\t\t\tprint(\"\\x1b[1;97m=\"*45)\n\t\t\tprint(\"\\x1b[1;92m \"+find1)\n\t\t\tprint(\"\\x1b[1;97m=\"*45)\n\t\t\tprint(\"\\x1b[1;97mTulisan Arab: \\n\")\n\t\t\tfor al in scrap.find_all('span', class_=\"ayat\"):\n\t\t\t\tayat += 1\n\t\t\t\tal = (al.text)\n\t\t\t\tprint(f'\\x1b[1;92m{str(ayat)}\\x1b[1;91m.\\x1b[1;97m {al.strip()}')\n\t\t\tprint(\"\\x1b[1;97m=\"*45)\n\t\t\tprint(\"Cara Bacanya: \\n\")\n\t\t\tfor bc in scrap.find_all('span', class_=\"bacaan\"):\n\t\t\t\tbacaan += 1\n\t\t\t\tbc = (bc.text)\n\t\t\t\tprint(f'\\x1b[1;92m{str(bacaan)}\\x1b[1;91m. \\x1b[1;97m{bc.strip()}')\n\t\t\tprint(\"\\x1b[1;97m=\"*45)\n\t\t\tprint(\"Artinya: \\n\")\n\t\t\tfor ar in scrap.find_all('span', class_=\"arti\"):\n\t\t\t\tarti += 1\n\t\t\t\tar = (ar.text)\n\t\t\t\tprint(f'\\x1b[1;92m{str(arti)}\\x1b[1;91m. \\x1b[1;97m{ar.strip()}')\n\t\t\tprint(\"=\"*45)\n\texcept ValueError:\n\t exit('\\x1b[1;91m! \\x1b[1;97mJangan Kosong')\n\texcept IndexError:\n\t exit('\\x1b[1;91m! \\x1b[1;97mIsi Yang Betul')\n\nif __name__=='__main__':\n\tmain()\n\n# > __dict__\n#print(emp_1.__dict__)## you wont get the raise amount in this. \n#print(employee.__dict__) ## class contains the raise amount attribute.\n\n##\nemployee.raise_amount =1.05\nprint(emp_1.raise_amount)\nprint(employee.raise_amount)\n\nemp_1.raise_amount =1.05# would change only for the employee1.\nprint(emp_1.__dict__)## you get the raise amount in this. \n\nprint(employee.num_emps)\n\n","repo_name":"aakash2016/python-oop","sub_path":"oops4.py","file_name":"oops4.py","file_ext":"py","file_size_in_byte":1526,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"39115138640","text":"# -*- coding: utf-8 -*-\n\nfrom odoo import api, fields, models\nfrom dateutil.relativedelta import relativedelta\n\nfrom odoo.addons.choreograph_project.models.project_project import (\n TERMINATED_TASK_STAGE,\n TODO_TASK_STAGE,\n WAITING_FILE_TASK_STAGE,\n WAITING_QTY_TASK_STAGE,\n BAT_CLIENT_TASK_STAGE\n)\n\n\nclass ProjectProject(models.Model):\n _inherit = 'project.project'\n\n project_template_id = fields.Many2one('project.project', 'Operation Template',\n domain=[('is_template', '=', True)], copy=False)\n\n @api.model\n def set_task_project(self):\n task_details = self.env['project.task'].get_task_list()\n\n def get_vals(_list):\n return {'task_ids': [(0, 0, task_details[item]) for item in _list]}\n\n def new_extend(source_list: list(), extend_list: list()) -> list():\n tmp = source_list.copy()\n tmp.extend(extend_list)\n return tmp\n task_list = ['project_name']\n self.env.ref('choreograph_sale_project.project_project_score_presentation').write(get_vals(task_list))\n self.env.ref('choreograph_sale_project.project_project_study').write(get_vals(task_list))\n self.env.ref('choreograph_sale_project.project_project_count').write(get_vals(task_list))\n self.env.ref('choreograph_sale_project.project_project_yield_calculation').write(get_vals(task_list))\n self.env.ref('choreograph_sale_project.project_project_matchback').write(get_vals(task_list))\n\n task_list.extend(['potential', 'delivery_study', 'fullfilment_client', 'delivery_infos'])\n self.env.ref('choreograph_sale_project.project_project_telfixebox_enrichment').write(\n get_vals(new_extend(task_list, ['campaign_counts'])))\n self.env.ref('choreograph_sale_project.project_project_extraction').write(get_vals(task_list))\n\n task_list.extend(['prefulfillment'])\n self.env.ref('choreograph_sale_project.project_project_ddn_enrichment').write(get_vals(task_list))\n self.env.ref('choreograph_sale_project.project_project_telportable_enrichment').write(\n get_vals(new_extend(task_list, ['campaign_counts'])))\n self.env.ref('choreograph_sale_project.project_project_sms_enrichment').write(\n get_vals(new_extend(task_list, ['audit', 'campaign_sms', 'info_presta', 'delivery_presta', 'campaign_counts'])))\n self.env.ref('choreograph_sale_project.project_project_email_enrichment').write(get_vals(new_extend(\n task_list, ['audit', 'campaign', 'file_bat', 'link_opt_out', 'info_presta', 'delivery_presta', 'campaign_counts'])))\n\n task_list.extend(['presentation', 'deposit_date'])\n self.env.ref('choreograph_sale_project.project_project_reactivation').write(get_vals(task_list))\n self.env.ref('choreograph_sale_project.project_project_loyalty').write(get_vals(task_list))\n self.env.ref('choreograph_sale_project.project_project_activation').write(get_vals(task_list))\n self.env.ref('choreograph_sale_project.project_project_postal_prospecting').write(get_vals(task_list))\n\n task_list.extend(['campaign_counts'])\n self.env.ref('choreograph_sale_project.project_project_postal_prospecting_telfixebox').write(\n get_vals(new_extend(task_list, ['info_presta', 'delivery_presta'])))\n self.env.ref('choreograph_sale_project.project_project_telfixebox_prospecting').write(get_vals(task_list))\n self.env.ref('choreograph_sale_project.project_project_postal_prospecting_email').write(\n get_vals(new_extend(task_list, ['campaign', 'file_bat', 'info_presta', 'delivery_presta'])))\n self.env.ref('choreograph_sale_project.project_project_email_prospecting').write(\n get_vals(new_extend(task_list, ['campaign', 'file_bat'])))\n self.env.ref('choreograph_sale_project.project_project_postal_prospecting_sms').write(\n get_vals(new_extend(task_list, ['campaign_sms', 'info_presta', 'delivery_presta'])))\n self.env.ref('choreograph_sale_project.project_project_sms_prospecting').write(\n get_vals(new_extend(task_list, ['campaign_sms'])))\n self.env.ref('choreograph_sale_project.project_project_postal_prospecting_telportable').write(\n get_vals(new_extend(task_list, ['info_presta', 'delivery_presta'])))\n self.env.ref('choreograph_sale_project.project_project_prospection_telportable').write(get_vals(task_list))\n\n def create_operation_from_template(self):\n action = self.project_template_id.create_project_from_template(self.name)\n self.unlink()\n return action\n\n def create_project_from_template(self, name=False):\n action = super(ProjectProject, self).create_project_from_template()\n project = self.browse(action.get('res_id')).exists()\n if project.type_of_project == 'operation':\n types = self.env['project.task'].get_operation_project_task_type()\n project_stage = self.env.ref('choreograph_project.planning_project_stage_draft')\n task_stage = self.env.ref('choreograph_project.project_task_type_draft')\n project.write({\n 'stage_id': project_stage.id,\n 'type_ids': [(6, 0, types.ids)],\n 'name': name if name else project.name\n })\n project.task_ids.with_context(task_stage_init=True).write({\n 'stage_id': task_stage.id,\n })\n return {\n 'type': 'ir.actions.act_window',\n 'view_mode': 'form',\n 'res_model': 'sale.order',\n 'target': 'current',\n 'context': {\n 'create_project_from_template': True,\n 'operation_id': action.get('res_id')\n }\n }\n return action\n\n def initialize_order(self, order_id):\n self.write({\n 'sale_order_id': order_id.id,\n 'partner_id': order_id.partner_id.id,\n 'user_id': order_id.user_id.id\n })\n self.task_ids.write({\n 'sale_order_id': order_id.id,\n 'partner_id': order_id.partner_id.id,\n 'date_deadline': order_id.commitment_date\n })\n self.task_ids.filtered(lambda t: t.task_number in ['80']).write({\n 'date_deadline': order_id.commitment_date - relativedelta(days=2) if order_id.commitment_date else False,\n })\n\n def write(self, vals):\n res = super(ProjectProject, self).write(vals)\n for record in self:\n if record.type_of_project == 'operation' and \\\n vals.get('stage_id') == self.env.ref('choreograph_project.planning_project_stage_planified').id:\n record._hook_stage_planified()\n return res\n\n def _hook_stage_planified(self):\n self._update_task_stage('5', WAITING_FILE_TASK_STAGE)\n self._update_task_stage('10', WAITING_FILE_TASK_STAGE)\n self._update_task_stage('15', WAITING_FILE_TASK_STAGE)\n self._update_task_stage('20', TODO_TASK_STAGE)\n self._update_task_stage('25', TODO_TASK_STAGE)\n self._update_task_stage('35', TODO_TASK_STAGE)\n\n def _hook_task_in_stage_20_25(self):\n self.write({'stage_id': self.env.ref('choreograph_project.planning_project_stage_in_progress').id})\n\n def _hook_task_20_in_stage_80(self):\n self._update_task_stage('65', TODO_TASK_STAGE)\n\n def _hook_task_25_in_stage_80(self):\n self._update_task_stage('30', WAITING_QTY_TASK_STAGE)\n self._update_task_stage('40', TODO_TASK_STAGE)\n\n def _hook_task_30_in_stage_80(self):\n if self._is_task_terminated(['40']):\n self._update_task_stage('65', TODO_TASK_STAGE)\n \n def _hook_task_40_in_stage_80(self):\n if self._is_task_terminated(['30']):\n self._update_task_stage('65', TODO_TASK_STAGE)\n\n def _hook_task_65_5_15_terminated(self, except_task):\n if self._is_task_terminated(['65', '5', '15'], except_task):\n if self.task_ids.filtered(lambda task: task.task_number == '70'):\n self._update_task_stage('70', TODO_TASK_STAGE)\n else:\n self._update_task_stage('80', TODO_TASK_STAGE)\n\n def _is_task_terminated(self, task_number_list, task_number=False):\n if task_number and task_number in task_number_list:\n task_number_list.pop(task_number_list.index(task_number))\n task_ids = self.task_ids.filtered(lambda task: task.task_number in task_number_list)\n return all([task.stage_id.stage_number == TERMINATED_TASK_STAGE for task in task_ids])\n\n def _hook_task_70_in_stage_80(self):\n task_55 = self._find_task_by_task_number('55')\n task_45 = self._find_task_by_task_number('45')\n task_55_in_80 = task_55 and task_55.stage_id.stage_number == TERMINATED_TASK_STAGE\n task_45_in_80 = task_45 and task_45.stage_id.stage_number == BAT_CLIENT_TASK_STAGE\n if (task_55_in_80 or not task_55) and (task_45_in_80 or not task_45):\n self._update_task_stage('75', TODO_TASK_STAGE)\n\n def _hook_task_55_in_stage_80(self):\n task_70 = self._find_task_by_task_number('55')\n task_45 = self._find_task_by_task_number('45')\n task_45_in_80 = task_45 and task_45.stage_id.stage_number == BAT_CLIENT_TASK_STAGE\n task_70_in_80 = task_70 and task_70.stage_id.stage_number == TERMINATED_TASK_STAGE\n if (task_70_in_80 or not task_70) and (task_45_in_80 or not task_45):\n self._update_task_stage('75', TODO_TASK_STAGE)\n\n def _hook_task_75_in_stage_80(self):\n self.write({'stage_id': self.env.ref('choreograph_project.planning_project_stage_presta_delivery').id})\n\n def _hook_task_10_and_80_in_stage_80(self, task_number):\n if self._is_task_terminated(['10', '80'], task_number):\n self._update_task_stage('85', TODO_TASK_STAGE)\n\n def _hook_task_fulfillement_terminated(self):\n self.write({'stage_id': self.env.ref('choreograph_project.planning_project_stage_to_deliver').id})\n\n def _hook_task_45_in_stage_80(self):\n self._update_task_stage('90', TODO_TASK_STAGE)\n\n def _hook_task_45_in_stage_50(self):\n self.write({'stage_id': self.env.ref('choreograph_project.planning_project_stage_routing').id})\n\n def _hook_task_80_in_stage_80(self):\n self._update_task_stage('85', TODO_TASK_STAGE)\n\n def _hook_task_90_in_stage_15(self):\n self.write({'stage_id': self.env.ref('choreograph_project.planning_project_stage_extraction').id})\n\n def _hook_task_90_in_stage_80(self):\n self.write({'stage_id': self.env.ref('choreograph_project.planning_project_stage_terminated').id})\n\n def _hook_check_all_task(self, task_id):\n not_terminated = self.task_ids.filtered(\n lambda task: task.id != task_id and task.stage_id.stage_number != TERMINATED_TASK_STAGE)\n if not not_terminated:\n self.write({'stage_id': self.env.ref('choreograph_project.planning_project_stage_terminated').id})\n","repo_name":"Choreograph-Lille/OdooConex","sub_path":"choreograph-addons/choreograph_sale_project/models/project_project.py","file_name":"project_project.py","file_ext":"py","file_size_in_byte":11055,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"15504290482","text":"#CRIE UM PROGRAMA QUE LEIA NOME, SEXO E IDADE DE VÁRIAS PESSOAS, GUARDANDO OS DADOS DE CADA PESSOA EM UM DICIONÁRIO E TODOS OS DICIONÁRIOS EMUMA LISTA. NO FINAL, MOSTRE:\n# a) QUANTAS PESSOAS FORAM CADASTRADAS\n# B) A MÉDIA DE IDADE DO GRUPO\n# C) UMA LISTA COM TODSA AS MULHERES.\n# D) UMA LISTA COM TODAS AS PESSOAS COM IDADE ACIMA DA MÉDIA\ndados = list()\nnome = dict()\nsexo = dict()\nidade = dict()\nsoma = list()\nmulheres = list()\ncont = 0\nwhile True:\n nome[\"nome\"] = str(input(\"Digite seu nome:\"))\n dados.append(nome.copy())\n cont = cont + 1\n sexo[\"sexo\"] = str(input(\"Sexo → [M / F]:\").strip().upper())\n dados.append(sexo.copy())\n if sexo[\"sexo\"] in \"F\":\n mulheres.append(nome[\"nome\"])\n while sexo[\"sexo\"] not in \"FM\":\n sexo[\"sexo\"] = str(input(\"VALOR INVÁLIDO!! Sexo → [M / F]:\").strip().upper())\n if sexo[\"sexo\"] in \"FM\":\n break\n idade[\"idade\"] = int(input(\"idade?: \"))\n soma.append(idade[\"idade\"])\n dados.append(idade.copy())\n c = str(input(\"Quer continuar? [ S / N ]: \").strip().upper())\n while c not in \"sSnN\":\n c = str(input(\"Valor inválido !! Quer continuar? [ S / N ]: \").strip().upper())\n if c == \"N\":\n break\nfor k, v in enumerate(dados):\n print(f\"{k} >>>>>>>> {v}\")\ntotal = sum(soma) / cont\n\nprint(f\"Ao todo temos {cont} pessoas cadastradas \")\nprint(f\" A média de idade dessas pessoas é de {total}\")\nprint(f\"As mulheres cadastradas foram {mulheres}\")\n\n# OUTRA FORMA DE FAZER\n\npessoa = list()\nnome = dict()\nsoma = media = 0\n\nwhile True:\n nome.clear()\n nome[\"nome\"] = str(input(\"Digite seu nome:\"))\n while True:\n nome[\"sexo\"] = str(input(\"sexo? [F / M ]: \")).upper()[0]\n if nome[\"sexo\"] in \"FM\":\n break\n print(\"Valor inválido, por favor digite 'F' ou 'M' \")\n nome[\"idade\"] = int(input(\"idade?: \"))\n soma = soma + nome[\"idade\"]\n pessoa.append(nome.copy())\n while True:\n resp = str(input(\"Quer continuar?: [S / N]\")).upper()[0]\n if resp in \"SN\":\n break\n print(\"Valor inválido! Por favor, digite [ S / N]\")\n if resp == \"N\":\n break\nmedia = soma / len(pessoa)\nprint(f\"Foram cadastras {len(pessoa)}\")\nprint(f\"A média de idade foi de {media:.2f} anos\")\nprint(\"As Mulheres cadastradas foram:\", end=\" \")\nfor p in pessoa:\n if p[\"sexo\"] in \"Ff\":\n print(f\"{p['nome']},\", end=\" \")\nprint()\nprint(\"Listas das pessoas acim da média:\")\nfor p in pessoa:\n if p[\"idade\"] >= media:\n print(\" \", end=\" \")\n for k, v in p.items():\n print(f\"{k} = {v}; \", end=\" \")\n print()\nprint(\"<<<< FINALIZADO >>>>>\")\n\n\n\n\n\n\n# PROFESSOR FEZ:\ngalera = list()\npessoa = dict()\nsoma = media = 0\nwhile True:\n pessoa.clear()\n pessoa[\"nome\"] = str(input(\"Nome: \"))\n while True:\n pessoa[\"sexo\"] = str(input(\"Sexo: [M/F] \")).upper()[0]\n if pessoa[\"sexo\"] in \"MF\":\n break\n print(\"ERRO! Por favor, digite apenas M ou F. \")\n pessoa[\"idade\"] = int(input(\"Idade: \"))\n soma = soma + pessoa[\"idade\"]\n galera.append(pessoa.copy())\n while True:\n resp = str(input(\"Quer Continuar? [S/N]\")).upper()[0]\n if resp in \"SN\":\n break\n print(\"ERRO!, Responda apenas S ou N. \")\n if resp == \"N\":\n break\nprint(\"-=-\" * 30)\nprint(f\"Ao todo temos {len(galera)} pessoas cadastradas. \")\nmedia = soma / len(galera)\nprint(f\" A Média de idade é de {media:5.2f} anos.\")\nprint(f\"As mulheres cadastradas foram\", end=\"\")\nfor p in galera:\n if p[\"sexo\"] in \"Ff\":\n print(f\"{p['nome']}\", end=\"\")\nprint()\nprint(f\"Lista das pessoas que estão acima da média: \")\nfor p in galera:\n if p[\"idade\"] >= media:\n print(\" \", end=\"\")\n for k, v in p.items():\n print(f\"{k} = {v}\", end=\"\")\n print()\nprint(\"<< ENCERRADO >>\")\n\n","repo_name":"AlanLima100/exercicios_em_python","sub_path":"ex094.py","file_name":"ex094.py","file_ext":"py","file_size_in_byte":3827,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"7573282777","text":"import sys\r\ninput = sys.stdin.readline\r\n\r\nn = int(input().strip())\r\n\r\narr = []\r\n\r\nfor _ in range(n):\r\n arr.append(int(input().strip()))\r\n\r\nplus = []\r\nminus = []\r\nzero = 0\r\nfor x in arr:\r\n if x > 0:\r\n plus.append(x)\r\n elif x < 0:\r\n minus.append(x)\r\n else:\r\n zero = 1\r\n# 포인트는 위치에 상관없이 묶는다는것이다\r\n# 0 은 무시하자\r\n# 그리디하게 곱이 높은애들만 묶자\r\nplus.sort()\r\nminus.sort(reverse = True)\r\n\r\nanswer = 0\r\n\r\nwhile len(plus) >= 2:\r\n x,y = plus.pop(),plus.pop()\r\n answer += max(x * y,x + y)\r\nif plus:\r\n answer += plus.pop()\r\nwhile len(minus) >= 2:\r\n x,y = minus.pop(),minus.pop()\r\n answer += x * y\r\nif minus:\r\n if not zero :\r\n answer += minus.pop()\r\nprint(answer)\r\n\r\n","repo_name":"wjs2063/BaekJoon","sub_path":"백준/Gold/1744. 수 묶기/수 묶기.py","file_name":"수 묶기.py","file_ext":"py","file_size_in_byte":771,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"5875863234","text":"import cohere\nimport argparse\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"prompt\", help=\"The prompt to send to the API\")\n#parser.add_argument(\"file_name\", help=\"Name of the file to save Python script\")\nargs = parser.parse_args()\n\nco = cohere.Client('Zmz7oSN242GFTiGisumtHI1YJue4RpbiJMgyXc4T')\n\nresponse = co.generate(\n prompt = f\"Write python script to {args.prompt}. Provide only code, no text\",\n)\n\nprint (response)\n","repo_name":"geordie12311/python_scripts","sub_path":"python-chatgpt/chat-test.py","file_name":"chat-test.py","file_ext":"py","file_size_in_byte":433,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"29655032717","text":"#法1:非递归\nclass Solution():\n def letterCombinatins(self,digits):\n if not digits:\n return []\n\n phoneNo = {\n '2':'abc',\n '3':'def',\n '4':'ghi',\n '5':'jkl',\n '6':'mno',\n '7':'pqrs',\n '8':'tuv',\n '9':'wxyz',\n }\n\n arr = [i for i in phoneNo[digits[0]]]\n\n for i in digits[1:]:\n arr = [u+v for u in arr for v in phoneNo[i]]\n print(arr)\n\n\nif __name__ == '__main__':\n Solution().letterCombinatins('23')\n\n#法2:递归\nclass Solution(object):\n def letterCombinations(self, digits):\n \"\"\"\n :type digits: str\n :rtype: List[str]\n \"\"\"\n # 创建字母对应的字符列表的字典\n dic = {2: ['a', 'b', 'c'],\n 3: ['d', 'e', 'f'],\n 4: ['g', 'h', 'i'],\n 5: ['j', 'k', 'l'],\n 6: ['m', 'n', 'o'],\n 7: ['p', 'q', 'r', 's'],\n 8: ['t', 'u', 'v'],\n 9: ['w', 'x', 'y', 'z'],\n }\n # 存储结果的数组\n ret_str = []\n if len(digits) == 0: return []\n # 递归出口,当递归到最后一个数的时候result拿到结果进行for循环遍历\n if len(digits) == 1:\n return dic[int(digits[0])]\n # 递归调用\n result = self.letterCombinations(digits[1:])\n # result是一个数组列表,遍历后字符串操作,加入列表\n for r in result:\n for j in dic[int(digits[0])]:\n ret_str.append(j + r)\n return ret_str\n\n\n\n","repo_name":"Janice18/xiyu-NLPTrainee","sub_path":"python/Day3/LeetCode17 电话号码的字母组合.py","file_name":"LeetCode17 电话号码的字母组合.py","file_ext":"py","file_size_in_byte":1629,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"14814378045","text":"# test_database.py - tests for database functions\n\n# from sql_database import get_items, add_item, update_item, delete_item\nfrom dataset_database import get_items, add_item, update_item, delete_item\nimport time\n\n# Test Cases\ndef random_string():\n return str(time.time())\n\ndef test_get_items():\n print(\"testing get items\")\n items = get_items()\n assert type(items) is list\n assert len(items) > 0\n assert type(items[0]) is dict\n assert 'id' in items[0].keys()\n assert 'description' in items[0].keys()\n assert type(items[0]['id']) is int\n assert type(items[0]['description']) is str\n pass\n\ndef test_add_item():\n print(\"testing add items\")\n desc = random_string()\n add_item(desc)\n items = get_items()\n item = items[-1]\n assert desc == item[\"description\"]\n pass\n\ndef test_delete_item():\n print(\"testing delete items\")\n desc = random_string()\n add_item(desc)\n items = get_items()\n item = items[-1]\n id = item[\"id\"]\n delete_item(id)\n new_items = get_items()\n assert len(items) > len(new_items)\n for i in new_items:\n assert desc != i[\"description\"]\n pass\n\n\ndef test_update_item():\n print(\"testing update item\")\n desc = random_string()\n add_item(desc)\n items = get_items()\n item = items[-1]\n id, desc = item[\"id\"], item[\"description\"]\n new_desc = desc.replace(\"1\", \"9\").replace(\".\", \",\")\n update_item(id, new_desc)\n new_items = get_items()\n assert len(items) == len(new_items)\n new_found = False\n for i in new_items:\n if i[\"id\"] == int(id):\n assert new_desc == i[\"description\"]\n new_found = True\n assert i[\"description\"] != desc\n assert new_found\n\n\nif __name__ == \"__main__\":\n test_get_items()\n test_add_item()\n test_delete_item()\n test_update_item()\n print(\"done\")\n\n\n\n\n\n\n","repo_name":"saitejadasari/bottle_webapp","sub_path":"topic-03-dataset-db/test_database.py","file_name":"test_database.py","file_ext":"py","file_size_in_byte":1846,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"74213118648","text":"'''\r\nMaximal Square from Coderbyte\r\nJanuary 2021 Jakub Kazimierski\r\n'''\r\n\r\ndef MaximalSquare(strArr):\r\n '''\r\n Have the function MaximalSquare(strArr) \r\n take the strArr parameter being passed \r\n which will be a 2D matrix of 0 and 1's, \r\n and determine the area of the largest \r\n square submatrix that contains all 1's. \r\n A square submatrix is one of equal width \r\n and height, and your program should return \r\n the area of the largest submatrix that contains \r\n only 1's. \r\n \r\n For example: if strArr is \r\n [\"10100\", \"10111\", \"11111\", \"10010\"] \r\n then this looks like the following matrix:\r\n\r\n 1 0 1 0 0\r\n 1 0 1 1 1\r\n 1 1 1 1 1\r\n 1 0 0 1 0\r\n\r\n For the input above, you can see the bolded 1's \r\n create the largest square submatrix of size 2x2, \r\n so your program should return the area which is 4.\r\n You can assume the input will not be empty.\r\n '''\r\n\r\n matrix = []\r\n for row in strArr:\r\n matrix.append(list(row))\r\n\r\n max_area = 0\r\n area_border = 1\r\n # below increments area border at each iteration\r\n for length in range(len(matrix)):\r\n # from each point from which square can fit into matrix \r\n # below cheks possible square created from 1's \r\n for row_id in range(len(matrix) - length):\r\n for col_id in range(len(matrix[row_id]) - length):\r\n is_square = True \r\n\r\n # below checks square area of given border length\r\n for right_id in range(0, area_border):\r\n for down_id in range(0, area_border):\r\n if is_square:\r\n if area_border == 1:\r\n if matrix[row_id + down_id][col_id + right_id] == '1':\r\n max_area = 1\r\n else:\r\n if matrix[row_id + down_id][col_id + right_id] == '0':\r\n is_square = False\r\n if is_square:\r\n max_area = area_border**2 \r\n\r\n area_border += 1\r\n\r\n return max_area","repo_name":"JakubKazimierski/PythonPortfolio","sub_path":"Coderbyte_algorithms/Hard/MaximalSquare/MaximalSquare.py","file_name":"MaximalSquare.py","file_ext":"py","file_size_in_byte":2134,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"77"} +{"seq_id":"39240288264","text":"\"\"\"\nSolution of application 3\n\"\"\"\n\n# api\nfrom alg_cluster import Cluster as cls\n\n# solution\nfrom projectThree import slow_closest_pairs as slow\nfrom projectThree import fast_closest_pair as fast\nfrom projectThree import hierarchical_clustering as hierarchical\nfrom projectThree import kmeans_clustering as kmeans\n\n# general\nfrom random import randint as rand\nfrom time import clock as timer\nimport matplotlib.pyplot as plt\n\n\n# Question 1\ndef gen_random_clusters(num_clusters):\n clist = []\n for _dummy_idx in range(num_clusters):\n clist.append(cls(set([]), rand(-1, 1), rand(-1, 1), rand(0, 1), rand(-1, 1)))\n\n return clist\n\ndef simulator():\n res_slow = []\n res_fast = []\n\n clusters = []\n for size in range(2, 201):\n clusters.append(gen_random_clusters(size))\n\n # slow\n for clist in clusters:\n slow_start = timer()\n slow(clist)\n slow_end = timer()\n res_slow.append(slow_end - slow_start)\n\n # fast\n for clist in clusters:\n fast_start = timer()\n fast(clist)\n fast_end = timer()\n res_fast.append(fast_end - fast_start)\n\n\n x_axis = [num for num in range(2, 201)]\n plt.title('Comparison of efficiency in desktop python environment')\n plt.xlabel('size of random clusters')\n plt.ylabel('running time (seconds)')\n plt.plot(x_axis, res_slow, '-b', label='slow_closest_pair', linewidth=2)\n plt.plot(x_axis, res_fast, '-r', label='fast_closest_pair', linewidth=2)\n plt.legend(loc='upper left')\n plt.show()\n\n\nif __name__ == '__main__':\n simulator()\n","repo_name":"amazonbuy201508/algorithmic-thinking","sub_path":"src/moduleSix.py","file_name":"moduleSix.py","file_ext":"py","file_size_in_byte":1566,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"77"} +{"seq_id":"33622734297","text":"import tkinter as tk\r\nfrom tkinter import ttk, messagebox\r\nfrom threading import Thread, Semaphore\r\nimport time\r\n\r\n# Semáforos\r\nmutex = Semaphore(1)\r\nwrite = Semaphore(1)\r\n\r\n# Contador de lectores\r\nreadcount = 0\r\n\r\nclass ProcessWindow(tk.Toplevel):\r\n def __init__(self, master=None):\r\n super().__init__(master)\r\n self.master = master\r\n self.title(\"Proceso\")\r\n self.create_widgets()\r\n\r\n def create_widgets(self):\r\n self.text = tk.Text(self)\r\n self.text.pack()\r\n\r\n frame = ttk.Frame(self)\r\n frame.pack()\r\n\r\n self.read_button = ttk.Button(frame, text=\"Leer\", command=self.read)\r\n self.read_button.pack(side=\"left\")\r\n\r\n self.edit_button = ttk.Button(frame, text=\"Editar\", command=self.edit)\r\n self.edit_button.pack(side=\"left\")\r\n\r\n self.save_button = ttk.Button(frame, text=\"Guardar\", command=self.save)\r\n self.save_button.pack(side=\"left\")\r\n\r\n def read(self):\r\n def reader():\r\n global readcount\r\n mutex.acquire()\r\n readcount += 1\r\n if readcount == 1:\r\n if not write.acquire(blocking=False):\r\n messagebox.showwarning(\"Advertencia\", \"Un escritor está escribiendo en el archivo.\")\r\n readcount -= 1\r\n mutex.release()\r\n return\r\n mutex.release()\r\n\r\n # Leer archivo\r\n with open('file.txt', 'r') as f:\r\n content = f.read()\r\n\r\n # Simular lectura en tiempo real\r\n for word in content.split():\r\n self.text.insert('end', word + ' ')\r\n self.text.update()\r\n time.sleep(0.5)\r\n\r\n mutex.acquire()\r\n readcount -= 1\r\n if readcount == 0:\r\n write.release()\r\n mutex.release()\r\n\r\n Thread(target=reader).start()\r\n\r\n def edit(self):\r\n self.text.delete('1.0', 'end')\r\n\r\n def save(self):\r\n def writer():\r\n if not write.acquire(blocking=False):\r\n messagebox.showwarning(\"Advertencia\", \"Otro escritor está escribiendo en el archivo.\")\r\n return\r\n\r\n # Guardar archivo\r\n with open('file.txt', 'w') as f:\r\n f.write(self.text.get('1.0', 'end'))\r\n\r\n write.release()\r\n\r\n Thread(target=writer).start()\r\n\r\nroot = tk.Tk()\r\nroot.withdraw()\r\n\r\nfor _ in range(3):\r\n ProcessWindow(root)\r\n\r\nroot.mainloop()\r\n","repo_name":"GALL1T0/Lector-Escritor1","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2513,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"72567465209","text":"from lxml import etree\nimport time, os, sys, re\n\nclass XMLEntity(object):\n def xml(self):\n # returns an xml representation of object\n raise NotImplementedError\n\n _strcache = None\n def __str__(self):\n if not self._strcache:\n self._strcache = render_xml(self.xml(),include_header=True)\n return self._strcache\n \n def __len__(self):\n return len(str(self))\n\nclass XMLEntityList(object):\n def xml_list(self):\n raise NotImplementedError\n\ndef render_xml(element, include_header=True):\n s = etree.tostring(element)\n if include_header:\n return '\\n' + s\n else:\n return s\n\ndef SubElement(*args, **keywords):\n return _element_helper(2, etree.SubElement, args, keywords)\n\ndef Element(*args, **keywords):\n return _element_helper(1, etree.Element, args, keywords)\n\ndef _element_helper(n_expected_args, func, args, keywords):\n \n if len(args) > n_expected_args:\n text = args[n_expected_args]\n args = args[:n_expected_args] + args[n_expected_args+1:]\n else:\n text = None\n\n try:\n children = keywords.pop('children')\n except:\n children = []\n try:\n extra_elements = keywords.pop('extra_elements')\n except:\n extra_elements = {}\n\n el = func(*args, **keywords)\n \n for k,v in extra_elements.items():\n el.set(k,v)\n\n if text != None:\n el.text = str(text)\n\n for c in _expand_children(children):\n el.append(c)\n return el\n\n\ndef _expand_children(seq):\n for c in seq:\n if isinstance(c, XMLEntity):\n yield c.xml()\n elif isinstance(c, XMLEntityList):\n for e in _expand_children(c.xml_list()):\n yield e\n else:\n yield c\n","repo_name":"hussam/racs","sub_path":"python/racs/xmlutil.py","file_name":"xmlutil.py","file_ext":"py","file_size_in_byte":1808,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"8434962530","text":"from django.db import models\nfrom devices.models import Device\nfrom infraction_types.models import InfractionType\n\nclass PredictionModel(models.Model):\n class TrainingState(models.TextChoices):\n INITIALIZED = 'init', 'Initialized'\n FIRST_COMMITTING_INFRACTION = 'committing_1', 'First committing infraction'\n FIRST_DONE_COMMITTING_INFRACTION = 'done_committing_1', 'Done committing first infraction'\n FIRST_NOT_COMMITTING_INFRACTION = 'not_committing_1', 'First not committing infraction'\n FIRST_DONE_NOT_COMMITTING_INFRACTION = 'done_not_committing_1', 'First done not commimtting infraction'\n SECOND_COMMITTING_INFRACTION = 'committing_2', 'Second committing infraction'\n SECOND_DONE_COMMITTING_INFRACTION = 'done_committing_2', 'Done committing second infraction'\n SECOND_NOT_COMMITTING_INFRACTION = 'not_committing_2', 'Second not committing infraction'\n SECOND_DONE_NOT_COMMITTING_INFRACTION = 'done_not_committing_2', 'Done not commimtting second infraction'\n TRAINED = 'trained', 'Trained'\n\n device = models.ForeignKey(Device, on_delete=models.CASCADE)\n infraction_type = models.ForeignKey(InfractionType, on_delete=models.CASCADE)\n is_predicting = models.BooleanField(default=False)\n training_state = models.CharField(\n max_length=21,\n choices=TrainingState.choices,\n default=TrainingState.INITIALIZED,\n )\n stream_delay = models.IntegerField(default=0)\n created_at = models.DateTimeField(auto_now_add=True)\n updated_at = models.DateTimeField(auto_now=True)\n\n def __str__(self):\n return f'Detecting {self.infraction_type.infraction_type_name} on device #{self.device.serial_number}'\n","repo_name":"SafetyVision/safety-vision-platform","sub_path":"prediction_models/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1718,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"79"} +{"seq_id":"6184129244","text":"k , N = map(int, input().split(' '))\n# x 값을 정해놓고 점점 늘려가면서 abcd 값을 더해 N에 가까워지면 출력\narr = []\nfor i in range(k):\n arr.append(int(input()))\nstart = 1\nx = 0\nmin = 0\nfor i in range(k):\n if arr[i] > min:\n min = arr[i]\nend = min\nsum = 0\nwhile start <= end:\n mid = (start + end) // 2\n \n for i in range(k):\n if arr[i] >= mid:\n sum += arr[i] // mid \n if sum >= N:\n start = mid +1\n else:\n end = mid -1\n sum = 0\nprint (end)","repo_name":"shgusgh12/Python-practice","sub_path":"github/baekjoon/1654.py","file_name":"1654.py","file_ext":"py","file_size_in_byte":522,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"73850953216","text":"\"\"\"POCA: CALC\n The module with the functions to perform calculations on data sets in .../portland_crime/data_sets\n\n These will be imported to main.py in order to provide feedback to the user.\n\"\"\"\n\nfrom control_flow import pause_clear\nimport config\nimport main\nfrom collections import (Counter, defaultdict)\n\n\ndef data_max(data, check_type):\n \"\"\"\n Returns the max() of the search terms for each year of selected data.\n \"\"\"\n\n data_dict = defaultdict(list)\n\n for eachkey in data:\n for incident in data[eachkey]:\n data_dict[eachkey].append(getattr(incident, check_type))\n\n c = Counter(data_dict[eachkey])\n print(\"The most common {} in {} was {} with {} incidents.\".format(check_type, eachkey, c.most_common()[0][0], c.most_common()[0][1]))\n\n pause_clear()\n\n return_dict[check_type]()\n\n\ndef data_min(data, check_type):\n \"\"\"\n Returns the max() of the search terms for each year of selected data.\n \"\"\"\n\n data_dict = defaultdict(list)\n\n for eachkey in data:\n for incident in data[eachkey]:\n data_dict[eachkey].append(getattr(incident, check_type))\n\n c = Counter(data_dict[eachkey])\n print(\"The least common {} in {} was {} with {} incidents.\".format(check_type, eachkey, c.most_common()[-1][0], c.most_common()[-1][1]))\n\n pause_clear()\n\n return_dict[check_type]()\n\n\ndef data_average(data, check_type):\n \"\"\"\n Returns the average of data for the selected check_type over the selected data years.\n \"\"\"\n\n data_dict = defaultdict(list)\n\n for eachkey in data:\n for incident in data[eachkey]:\n data_dict[eachkey].append(getattr(incident, check_type))\n\n c = Counter(data_dict[eachkey])\n print(\"The common {} in {} was {} with {} incidents.\".format(check_type, eachkey, c.most_common()[-1][0],\n c.most_common()[-1][1]))\n # The average number of (larceny) incidents across the year(s) (2000, 2001, 2002) was (1200).\n pause_clear()\n\n return_dict[check_type]()\n\n\ndef by_date():\n \"\"\"\n Menu to calculate certain information oriented by date.\n \"\"\"\n\n calc_functions = {1: data_min, 2: data_max, 3: data_average}\n\n calc_choice = calc_options_menu()\n\n if calc_choice == 4:\n main.main_menu()\n else:\n calc_functions[calc_choice](config.DATA, 'ReportDate')\n\n\ndef by_time():\n \"\"\"\n Menu to calculate certain information oriented by time of day.\n \"\"\"\n\n # for dataset_key in data.keys():\n # data = sorted(data[dataset_key], key=sort_by_offense_helper)\n\n calc_functions = {1: data_min, 2: data_max, 3: data_average}\n\n calc_choice = calc_options_menu()\n\n if calc_choice == 4:\n main.main_menu()\n else:\n calc_functions[calc_choice](config.DATA, 'ReportTime')\n\n\ndef by_offense():\n \"\"\"\n Menu to calculate certain information oriented by offense type.\n \"\"\"\n\n calc_functions = {1: data_min, 2: data_max, 3: data_average}\n\n calc_choice = calc_options_menu()\n\n if calc_choice == 4:\n main.main_menu()\n else:\n calc_functions[calc_choice](config.DATA, 'MajorOffenseType')\n\n\ndef by_address():\n \"\"\"\n Menu to calculate certain information oriented by address.\n \"\"\"\n\n calc_functions = {1: data_min, 2: data_max, 3: data_average}\n\n calc_choice = calc_options_menu()\n\n if calc_choice == 4:\n main.main_menu()\n else:\n calc_functions[calc_choice](config.DATA, 'Address')\n\n\ndef by_neighborhood():\n \"\"\"\n Menu to calculate certain information oriented by neighborhood.\n \"\"\"\n\n calc_functions = {1: data_min, 2: data_max, 3: data_average}\n\n calc_choice = calc_options_menu()\n\n if calc_choice == 4:\n main.main_menu()\n else:\n calc_functions[calc_choice](config.DATA, 'Neighborhood')\n\n\ndef by_precinct():\n \"\"\"\n Menu to calculate certain information oriented by police precinct and district.\n \"\"\"\n\n pass\n\n\ndef calc_options_menu():\n \"\"\"\n Prints a menu for a variety of calculation options and returns the choice of the option.\n \"\"\"\n\n\n calc_options = {1: 'Min', 2: 'Max', 3: 'Average', 4: 'Back'}\n print()\n for copt_key, copt_value in calc_options.items():\n print(\"{}: {}\".format(copt_key, copt_value))\n\n print(\"Which one do you want?\")\n try:\n copt_choice = int(input(\">>> \"))\n if copt_choice in calc_options.keys():\n return copt_choice\n else:\n print(\"Invalid choice. Please enter the number next to the selection you want.\")\n calc_options_menu()\n return int(copt_choice)\n except ValueError:\n print(\"Invalid choice. Please enter the number next to the selection you want.\")\n calc_options_menu()\n finally:\n return int(copt_choice)\n\n\nreturn_dict = {'ReportDate': by_date, 'ReportTime': by_time, 'MajorOffenseType': by_offense,\n 'Address': by_address, 'Neighborhood': by_neighborhood}\n\n# TODO: Make functions to perform calculations on data.\n","repo_name":"awarnes/portland_crime","sub_path":"calc.py","file_name":"calc.py","file_ext":"py","file_size_in_byte":5052,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"79"} +{"seq_id":"29297440522","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('clubs', '0048_club_logo'),\n ('activities', '0031_remove_invitation_registered_students'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='invitation',\n name='club',\n field=models.ForeignKey(blank=True, to='clubs.Club', null=True),\n ),\n ]\n","repo_name":"enjaz/enjaz","sub_path":"activities/migrations/0032_invitation_club.py","file_name":"0032_invitation_club.py","file_ext":"py","file_size_in_byte":487,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"79"} +{"seq_id":"2727643328","text":"#!/usr/bin/python3\n\nfrom linkedlist import Linkedlist\n\ndef binary_search(list,target):\n find = 0\n target_not_found = 0\n\n while find == 0 and target_not_found == 0:\n size = list.size()\n if size == 1:\n if list.head.data == target:\n find = 1\n else:\n target_not_found = 1\n mid = size // 2\n mid_node = list.find_node(mid)\n if mid_node.data == target:\n find = 1\n else:\n if mid_node.data > target:\n end = mid - 1\n last_node = list.find_node(end)\n last_node.next_node = None\n\n else:\n first = mid + 1\n first_node = list.find_node(first)\n if first_node != None:\n list.head = first_node\n else:\n target_not_found = 1\n\n if target_not_found == 1:\n find = 0\n if find == 0:\n find = False\n else:\n find = True \n\n return find\n \n\n\nl = Linkedlist()\nl.add(180)\nl.add(100)\nl.add(74)\nl.add(55)\nl.add(30)\nl.add(1)\nl.add(0)\nresult = binary_search(l,30)\nprint(result)\n \n","repo_name":"automationlearner17/algorithm-and-data-structure","sub_path":"data_structures/linked_list_binary_search.py","file_name":"linked_list_binary_search.py","file_ext":"py","file_size_in_byte":1038,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"21931781885","text":"import pygame\nimport random\nimport math\n\nSCREEN_DIM = (800, 600)\n\n\nclass Vec2d:\n\n def __init__(self, pair, y=None):\n if y is None:\n self.x = pair[0]\n self.y = pair[1]\n else:\n self.x = pair\n self.y = y\n\n def __add__(self, other):\n return Vec2d(self.x + other.x, self.y + other.y)\n\n def __sub__(self, other):\n return Vec2d(self.x - other.x, self.y - other.y)\n\n def __mul__(self, other):\n return Vec2d(self.x * other, self.y * other)\n\n def __len__(self):\n return math.sqrt(self.x ** 2 + self.y ** 2)\n\n def int_pair(self):\n return (int(self.x), int(self.y))\n\n\nclass Polyline:\n\n def __init__(self):\n self.points = []\n self.speeds = []\n\n def add_point_speed(self, point, speed):\n self.points.append(point)\n self.speeds.append(speed)\n\n def set_points(self):\n for p in range(len(self.points)):\n self.points[p] += self.speeds[p]\n if self.points[p].x > SCREEN_DIM[0] or self.points[p].x < 0:\n self.speeds[p] = Vec2d(-self.speeds[p].x, self.speeds[p].y)\n if self.points[p].y > SCREEN_DIM[1] or self.points[p].y < 0:\n self.speeds[p] = Vec2d(self.speeds[p].x, -self.speeds[p].y)\n\n def draw_points(self, points, width=3, color=(255, 255, 255)):\n for p in points:\n pygame.draw.circle(gameDisplay, color, p.int_pair(), width)\n\n\nclass Knot(Polyline):\n\n def __init__(self, count):\n super(Knot, self).__init__()\n self.count = count\n\n def add_point_speed(self, point, speed):\n super(Knot, self).add_point_speed(point, speed)\n self.get_knot()\n\n def set_points(self):\n super(Knot, self).set_points()\n self.get_knot()\n\n def get_point(self, points, alpha, deg=None):\n if deg is None:\n deg = len(points) - 1\n if deg == 0:\n return points[0]\n return (points[deg] * alpha + self.get_point(points, alpha, deg - 1) * (1 - alpha))\n\n def get_points(self, base_points):\n alpha = 1 / self.count\n res = []\n for i in range(self.count):\n res.append(self.get_point(base_points, i * alpha))\n return res\n\n def draw_points(self, points, width=3, color=(255, 255, 255)):\n for p_n in range(-1, len(points) - 1):\n pygame.draw.line(gameDisplay, color, points[p_n].int_pair(), points[p_n + 1].int_pair(), width)\n\n def get_knot(self):\n if len(self.points) < 3:\n return []\n res = []\n for i in range(-2, len(self.points) - 2):\n ptn = []\n ptn.append((self.points[i] + self.points[i + 1]) * 0.5)\n ptn.append(self.points[i + 1])\n ptn.append((self.points[i + 1] + self.points[i + 2]) * 0.5)\n res.extend(self.get_points(ptn))\n return res\n\n\ndef draw_help():\n gameDisplay.fill((50, 50, 50))\n font1 = pygame.font.SysFont(\"courier\", 24)\n font2 = pygame.font.SysFont(\"serif\", 24)\n data = []\n data.append([\"F1\", \"Show Help\"])\n data.append([\"R\", \"Restart\"])\n data.append([\"P\", \"Pause/Play\"])\n data.append([\"Num+\", \"More points\"])\n data.append([\"Num-\", \"Less points\"])\n data.append([\"\", \"\"])\n data.append([str(steps), \"Current points\"])\n data.append([\"\", \"\"])\n data.append([\"N\", \"New curve\"])\n data.append([\"F\", \"Faster\"])\n data.append([\"S\", \"Slower\"])\n\n pygame.draw.lines(gameDisplay, (255, 50, 50, 255), True, [\n (0, 0), (800, 0), (800, 600), (0, 600)], 5)\n for i, text in enumerate(data):\n gameDisplay.blit(font1.render(\n text[0], True, (128, 128, 255)), (100, 100 + 30 * i))\n gameDisplay.blit(font2.render(\n text[1], True, (128, 128, 255)), (200, 100 + 30 * i))\n\n\nif __name__ == \"__main__\":\n pygame.init()\n gameDisplay = pygame.display.set_mode(SCREEN_DIM)\n pygame.display.set_caption(\"MyScreenSaver\")\n\n steps = 35\n working = True\n polyline = Polyline()\n knots = [Knot(steps)]\n show_help = False\n pause = True\n hue = 0\n color = pygame.Color(0)\n\n while working:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n working = False\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_ESCAPE:\n working = False\n if event.key == pygame.K_r:\n polyline = Polyline()\n knots = [Knot(steps)]\n if event.key == pygame.K_p:\n pause = not pause\n if event.key == pygame.K_KP_PLUS:\n steps += 1\n if event.key == pygame.K_F1:\n show_help = not show_help\n if event.key == pygame.K_KP_MINUS:\n steps -= 1 if steps > 1 else 0\n if event.key == pygame.K_n:\n knots.append(Knot(steps))\n if event.key == pygame.K_f:\n for knot in knots:\n for speed_index in range(len(knot.speeds)):\n knot.speeds[speed_index] = (knot.speeds[speed_index] * 3)\n if event.key == pygame.K_s:\n for knot in knots:\n for speed_index in range(len(knot.speeds)):\n knot.speeds[speed_index] = (knot.speeds[speed_index] * 0.2)\n\n if event.type == pygame.MOUSEBUTTONDOWN:\n polyline.add_point_speed(Vec2d(event.pos), Vec2d(random.random() * 2, random.random() * 2))\n knots[-1].add_point_speed(Vec2d(event.pos), Vec2d(random.random() * 2, random.random() * 2))\n\n gameDisplay.fill((0, 0, 0))\n hue = (hue + 1) % 360\n color.hsla = (hue, 100, 50, 100)\n polyline.draw_points(polyline.points)\n for knot in knots:\n knot.draw_points(knot.get_knot(), 3, color)\n if not pause:\n polyline.set_points()\n for knot in knots:\n knot.set_points()\n if show_help:\n draw_help()\n\n pygame.display.flip()\n\n pygame.display.quit()\n pygame.quit()\n exit(0)\n\n\n# Реализовать возможность удаления «опорной» точки из кривой","repo_name":"192117/oop_and_patterns_in_python","sub_path":"week2/w2e2.py","file_name":"w2e2.py","file_ext":"py","file_size_in_byte":6301,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"35339371442","text":"import argparse\n\nimport mlflow\nimport torch\nfrom aml import CropSegChipsDataModule\nfrom models import ModelPlusSigmoid, SegmentationModel\nfrom pytorch_lightning import Trainer\n\n\ndef parse_args():\n # setup arg parser\n parser = argparse.ArgumentParser()\n\n # add arguments\n parser.add_argument(\"--dataset\", type=str)\n parser.add_argument(\"--onnx_model_path\", type=str)\n parser.add_argument(\"--model_dir\", type=str, default=\"./\")\n parser.add_argument(\"--ndvi_stack_bands\", type=int, default=37)\n parser.add_argument(\"--batch_size\", type=int, default=16)\n parser.add_argument(\"--max_epochs\", type=int, default=10)\n parser.add_argument(\"--learning_rate\", type=float, default=0.001)\n parser.add_argument(\"--weight_decay\", type=float, default=0.001)\n parser.add_argument(\"--num_workers\", type=int, default=4)\n parser.add_argument(\"--num_gpus\", type=int, default=1)\n\n # parse args\n args = parser.parse_args()\n\n # return args\n return args\n\n\ndef main(args: argparse.Namespace):\n # Setup DataLoader\n data = CropSegChipsDataModule(\n data_dir=args.dataset, batch_size=args.batch_size, num_workers=args.num_workers\n )\n data.setup()\n\n # Setup Segmentation Model\n model = SegmentationModel(\n lr=args.learning_rate,\n weight_decay=args.weight_decay,\n in_channels=args.ndvi_stack_bands,\n num_epochs=args.max_epochs,\n classes=1,\n )\n\n # Enables logging\n mlflow.pytorch.autolog(log_models=False)\n\n # Train\n trainer = Trainer(max_epochs=args.max_epochs, accelerator=\"gpu\", devices=args.num_gpus)\n trainer.fit(model, data)\n\n # Signature\n batch = next(iter(data.train_dataloader()))\n ndvi_batch = batch[\"image\"]\n ndvi_sample = ndvi_batch[0:1, :, :, :].numpy()\n\n # Set model to inference mode before exporting to ONNX\n trace_model = ModelPlusSigmoid(model).eval()\n dummy_input = torch.randn(\n args.batch_size, args.ndvi_stack_bands, ndvi_sample.shape[-2], ndvi_sample.shape[-1]\n )\n\n # Export the model\n torch.onnx.export(\n trace_model,\n dummy_input, # model example input\n args.onnx_model_path, # where to save the model (can be a file or file-like object)\n export_params=True, # store the trained parameter weights inside the model file\n do_constant_folding=True, # whether to execute constant folding for optimization\n opset_version=11,\n input_names=[\"ndvi_stack\"], # the model's input names\n output_names=[\"seg_map\"], # the model's output names\n dynamic_axes={\n \"ndvi_stack\": {0: \"batch_size\"}, # variable length axes\n \"seg_map\": {0: \"batch_size\"},\n },\n )\n\n # Stop Logging\n mlflow.end_run()\n\n\nif __name__ == \"__main__\":\n # parse args\n args = parse_args()\n\n # run main function\n main(args)\n","repo_name":"microsoft/farmvibes-ai","sub_path":"notebooks/crop_segmentation/notebook_lib/aml_train_script.py","file_name":"aml_train_script.py","file_ext":"py","file_size_in_byte":2851,"program_lang":"python","lang":"en","doc_type":"code","stars":549,"dataset":"github-code","pt":"79"} +{"seq_id":"27141470013","text":"\nfrom PIL import Image\n\n# HACK workaround for upstream pillow issue python-pillow/Pillow#400\nimport sys\nfrom python_qt_binding import QT_BINDING_MODULES\nif (\n not QT_BINDING_MODULES['QtCore'].__name__.startswith('PyQt5') and\n 'PyQt5' in sys.modules\n):\n sys.modules['PyQt5'] = None\nfrom PIL.ImageQt import ImageQt\n\nfrom .topic_message_view import TopicMessageView\nimport image_helper\n\nfrom .sandtray_item import SandtrayItem\n\nfrom python_qt_binding.QtCore import Qt, QPointF\nfrom python_qt_binding.QtGui import QPixmap, QColor\nfrom python_qt_binding.QtWidgets import QGraphicsScene, QGraphicsView\n\n\nclass SandtrayView(TopicMessageView):\n name = 'Sandtray'\n\n def __init__(self, timeline, parent, topics):\n super(SandtrayView, self).__init__(timeline, parent, topics[0])\n\n self._items = {}\n\n self._image = None\n self._image_topic = None\n self._image_stamp = None\n self.quality = Image.NEAREST # quality hint for scaling\n\n self._sandtray = SandtrayItem()\n\n self._sandtray_view = QGraphicsView(parent)\n self._sandtray_view.resizeEvent = self._resizeEvent\n self._scene = QGraphicsScene()\n self._scene.addItem(self._sandtray)\n\n self._sandtray_view.setScene(self._scene)\n self._sandtray_view.fitInView(self._scene.itemsBoundingRect(), Qt.KeepAspectRatio)\n\n parent.layout().addWidget(self._sandtray_view)\n\n # MessageView implementation\n def _resizeEvent(self, event):\n self._sandtray_view.fitInView(self._scene.itemsBoundingRect(), Qt.KeepAspectRatio)\n\n def message_viewed(self, bag, msg_details):\n \"\"\"\n render the sandtray\n \"\"\"\n TopicMessageView.message_viewed(self, bag, msg_details)\n topic, msg, t = msg_details[:3]\n if msg:\n if topic == \"/zones\":\n self._sandtray.update_zones(self._get_zones(msg))\n else:\n for t in msg.transforms:\n if t.header.frame_id == \"sandtray\":\n self._items[t.child_frame_id] = t.transform.translation.x, -t.transform.translation.y\n self._sandtray.update(self._items)\n\n def message_cleared(self):\n TopicMessageView.message_cleared(self)\n self.set_image(None, None, None)\n\n # End MessageView implementation\n\n def _get_zones(self, msg):\n zones = {}\n\n for marker in msg.markers:\n polygon = []\n color = QColor(255*marker.color.r, 255*marker.color.g, 255*marker.color.b, 255*marker.color.a)\n for p in marker.points:\n polygon.append(QPointF(p.x * SandtrayItem.scale, -p.y * SandtrayItem.scale))\n zones.setdefault(color, []).append(polygon)\n\n return zones\n\n def put_image_into_scene(self):\n if self._image:\n QtImage = ImageQt(self._image)\n pixmap = QPixmap.fromImage(QtImage)\n self._scene.clear()\n self._scene.addPixmap(pixmap)\n\n def set_image(self, image_msg, image_topic, image_stamp):\n self._image_msg = image_msg\n if image_msg:\n self._image = image_helper.imgmsg_to_pil(image_msg)\n else:\n self._image = None\n self._image_topic = image_topic\n self._image_stamp = image_stamp\n self.put_image_into_scene()\n","repo_name":"freeplay-sandbox/analysis","sub_path":"src/freeplay_sandbox_analysis/plugins/sandtray_view.py","file_name":"sandtray_view.py","file_ext":"py","file_size_in_byte":3323,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"79"} +{"seq_id":"42636125561","text":"import numpy as np\nfrom sklearn import tree\n\n\nclass RandomForest:\n def __init__(self):\n pass\n\n def fit(self, X, y, k, N):\n self.trees = []\n for i in range(k):\n bootstrap = np.random.choice(X.shape[0], size=N, replace=True)\n \n X_ = X.iloc[bootstrap]\n y_ = y.iloc[bootstrap]\n\n dtree = tree.DecisionTreeRegressor()\n dtree.fit(X_, y_)\n self.trees.append(dtree)\n\n def predict(self, X):\n predictions = []\n for tree in self.trees:\n predictions.append(tree.predict(X))\n return np.mean(predictions, axis=0)\n\n def score(self, X, y):\n predictions = self.predict(X)\n return 1 - np.sum((predictions - y)**2) / np.sum((y - np.mean(y))**2)\n","repo_name":"CeceZiegler1/ML_Final_Proj","sub_path":".ipynb_checkpoints/RandomForestRegressor-checkpoint.py","file_name":"RandomForestRegressor-checkpoint.py","file_ext":"py","file_size_in_byte":781,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"79"} +{"seq_id":"42056341698","text":"# 25. - mouredev.weeklychallenge2022 #25\n\"\"\" ROCK, PAPER, SCISSORS\nCrea un programa que calcule quien gana más partidas al piedra, papel, tijera.\n- El resultado puede ser: \"Player 1\", \"Player 2\", \"Tie\" (empate)\n- La función recibe un listado que contiene pares, representando cada jugada.\n- El par puede contener combinaciones de \"R\" (piedra), \"P\" (papel) o \"S\" (tijera).\n- Ejemplo. Entrada: [(\"R\",\"S\"), (\"S\",\"R\"), (\"P\",\"S\")]. Resultado: \"Player 2\". \"\"\"\n\nVALUES = [\"R\",\"P\",\"S\"]\n\ndef game(games: list[tuple]):\n player1PTS = 0\n player2PTS = 0\n\n for i in games:\n if type(i) != tuple or len(i) != 2 or (i[0] not in VALUES) or (i[1] not in VALUES):\n return \"error\"\n \n player1 = i[0]\n player2 = i[1]\n\n if player1 != player2:\n #if player1 wins\n if player1 == VALUES[0] and player2 == VALUES[2]:\n player1PTS+=1\n elif player1 == VALUES [2] and player2 == VALUES[1]:\n player1PTS+=1\n elif player1 == VALUES[1] and player2 == VALUES[0]:\n player1PTS+=1\n #if player2 wins\n else:\n player2PTS+=1 \n\n if player1PTS == player2PTS:\n return 'Tie'\n elif player1PTS > player2PTS:\n return 'Player1'\n elif player1PTS < player2PTS:\n return 'Player2'\n \n\n\n# Tests\nprint(game([(\"R\", \"P\"), (\"R\", \"R\"), (\"S\", \"P\")]))\nprint(game([(\"R\", \"P\"), (\"R\", \"S\"), (\"P\", \"R\")]))\nprint(game([(\"R\", \"P\"), (\"P\", \"S\"), (\"P\", \"R\")]))\nprint(game([(0), (\"S\", \"P\"), (\"P\", \"R\"), (\"R\", \"P\")]))\nprint(game([(\"S\", \"P\"), (\"P\", \"R\"), (\"R\", \"P\"), \"R\"]))\n","repo_name":"stv-beep/coding-questions","sub_path":"Python/25RockPaperScissors.py","file_name":"25RockPaperScissors.py","file_ext":"py","file_size_in_byte":1617,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"22372955333","text":"from django.contrib import admin\nfrom django.urls import path\n\nfrom .views import Index, Login, Signup, home, logout, Cart, checkout, Order_View, BootstrapFilterView, Wishlist_View, \\\n Profile, Removal_Wishlist, Removal_Cart, Transfer_from_Cart, Transfer_from_Wishlist, Rating\n\nurlpatterns = [\n path('', home, name=\"Nostalgia_Home\"),\n path('menu', Index.as_view(), name=\"Nostalgia_Menu\"),\n path('signup', Signup.as_view()),\n path('login', Login.as_view(), name='login'),\n path('logout', logout, name='logout'),\n path('cart', Cart.as_view(), name='cart'),\n path('checkout', checkout, name='checkout'),\n path('orders', Order_View.as_view(), name='orders'),\n path('search', BootstrapFilterView, name='search'),\n path('wishlist', Wishlist_View.as_view(), name='wishlist'),\n path('user-profile', Profile.as_view(), name='profile'),\n path('removal', Removal_Wishlist.as_view(), name='removal'),\n path('removal-cart', Removal_Cart.as_view(), name='removal-cart'),\n path('transfer', Transfer_from_Cart.as_view(), name='transfer'),\n path('transfer-wishlist', Transfer_from_Wishlist.as_view(), name='transfer-wishlist'),\n path('rating', Rating.as_view(), name='rating'),\n]\n","repo_name":"satyakinkohli/ashoka-ap1","sub_path":"Store/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1214,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"29611664728","text":"'''\nGiven a binary tree, return the inorder traversal of its nodes' values.\n\nExample:\n\nInput: [1,null,2,3]\n 1\n \\\n 2\n /\n 3\n\nOutput: [1,3,2]\nFollow up: Recursive solution is trivial, could you do it iteratively?\n\n'''\n\n\n# Definition for a binary tree node.\n# class TreeNode:\n# def __init__(self, x):\n# self.val = x\n# self.left = None\n# self.right = None\n\nclass Solution:\n def inorderTraversal2(self, root: TreeNode) -> List[int]:\n '''# 2: iterate version'''\n if not root:\n return []\n\n stack, res = [], []\n pCur = root\n while pCur or stack: #根节点不为空,则入栈,同时将指针指向左子树,为下一次入栈左子树做准备\n if pCur:\n stack.append(pCur)\n pCur = pCur.left\n else: #根节点为空,则出栈最后一个左树,并将该左树的右子树入栈,为下一次入栈该右子树的左子树做准备\n node = stack.pop()\n res.append(node.val)\n pCur = node.right\n\n return res\n\n '''# 1: recursion version'''\n def inorderTraversal(self, root: TreeNode) -> List[int]:\n if not root:\n return []\n\n return self.inorderTraversal(root.left) + [root.val] + self.inorderTraversal(root.right)","repo_name":"Ricky-Hu5918/Python-Lab","sub_path":"94_Binary_Tree_Inorder_Traversal.py","file_name":"94_Binary_Tree_Inorder_Traversal.py","file_ext":"py","file_size_in_byte":1350,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"79"} +{"seq_id":"14627103023","text":"import operator\n\nimport nltk\nfrom nltk.sentiment.vader import SentimentIntensityAnalyzer\nimport SentimentAnalisis.prep_data as p\n\nnltk.download('vader_lexicon')\n\nsid = SentimentIntensityAnalyzer()\n\n\ndef sentiment_data_collector(xml_name):\n dictionary = {'neg': 0, 'pos': 0, 'neu': 0, 'compound': 0}\n for sentence in p.parse_xml_for_comments(xml_name):\n ss = sid.polarity_scores(sentence)\n # for k in ss:\n # print('{0}: {1}, '.format(k, ss[k]), end='')\n # print()\n dictionary['neg'] += ss['neg']\n dictionary['pos'] += ss['pos']\n # dictionary['neu'] += ss['neu']\n # dictionary['compound'] += ss['compound']\n main_sentiment_in_data(dictionary, xml_name)\n\n\ndef main_sentiment_in_data(stats, xml_name):\n print('For: ' + xml_name)\n print(stats)\n print('Pos or neg? Answear: ' + max(stats.items(), key=operator.itemgetter(1))[0])\n\n\n#autotagger - treningowe z datadamp\n\nsentiment_data_collector('Comments beer.xml')\nsentiment_data_collector('Comments crypto.xml')\nsentiment_data_collector('Comments android.xml')\n","repo_name":"Belfegor8625/WebMiningProject","sub_path":"SentimentAnalisis/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1079,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"34725475691","text":"\r\n\r\n #********************************************************************************#\r\n # #\r\n # нεℓℓσ,вαтεs! #\r\n # #\r\n # filename: script_silver #\r\n # created: 2022-05-22 #\r\n # system: Windows #\r\n # version: 64bit #\r\n # by: Bates #\r\n #********************************************************************************#\r\n # import your librarys below #\r\n #********************************************************************************#\r\nfrom pyspark.sql import SparkSession\r\nfrom pyspark.sql.functions import max as max_\r\n\r\n\r\npath_parametros=\"/home/bates/repositorio/big_data/one_piece/parameters/data.json\"\r\n\r\nspark = SparkSession.builder.master(\"local[1]\").appName(\"local\").getOrCreate()\r\n\r\ndf_parar=spark.read.json(path_parametros)\r\nconfig_spark_mongo = df_parar.agg(max_(\"config_spark_mongo\")).collect()[0][0]\r\npackage_config_spark = df_parar.agg(max_(\"package_config_spark\")).collect()[0][0]\r\nuri = df_parar.agg(max_(\"uri\")).collect()[0][0]\r\nmode = df_parar.agg(max_(\"mode\")).collect()[0][0]\r\nformat = df_parar.agg(max_(\"format\")).collect()[0][0]\r\nsave_bronze = df_parar.agg(max_(\"save_bronze\")).collect()[0][0]\r\nsave_silver = df_parar.agg(max_(\"save_silver\")).collect()[0][0]\r\nsave_gold = df_parar.agg(max_(\"save_gold\")).collect()[0][0]\r\ncursor_execute = df_parar.agg(max_(\"cursor_execute\")).collect()[0][0]\r\nfindspark_sql = df_parar.agg(max_(\"findspark_sql\")).collect()[0][0]\r\nhost = df_parar.agg(max_(\"host\")).collect()[0][0]\r\nuser = df_parar.agg(max_(\"user\")).collect()[0][0]\r\npassw = df_parar.agg(max_(\"pass\")).collect()[0][0]\r\ndriver_sql = df_parar.agg(max_(\"driver_sql\")).collect()[0][0]\r\n\r\n\r\n# extract\r\nprint(\"#\"*100)\r\n\r\ndf = spark.read.parquet(save_bronze).createOrReplaceTempView(\"df\")\r\n\r\n\r\n######################2º STEP #################################################\r\n\r\ndf = spark.sql(\"\"\"SELECT * FROM \r\n(SELECT id, dia, data_captura, apelido, descricao, \r\nREPLACE(kanji, \"Alcunha:Komurasaki\", \"none\") as kanji,\r\nnome_completo,\r\nREPLACE(mes, 'zembro', 'dezembro') as mes,\r\nREPLACE(recompensa, ',', '.') as recompensa, url,\r\nrow_number() OVER (PARTITION BY id ORDER BY data_captura) \r\nas row_id FROM df WHERE TRIM(id) <> '')\r\nWHERE row_id = 1 \"\"\").createOrReplaceTempView('df_temp')\r\n\r\ndf_final = spark.sql(\"\"\"SELECT REPLACE(uuid(), '-','') as id,\r\ndata_captura, TRIM(dia) as dia_aniversario, TRIM(mes) as mes_aniversario, apelido, descricao, kanji, nome_completo, recompensa, url FROM df_temp\"\"\")\r\n\r\n\r\n\r\nprint(\"#\"*100)\r\n\r\ndf_final.write.mode(mode).format(format).partitionBy(\"data_captura\").save(save_silver)\r\n","repo_name":"batestin1/onepiece","sub_path":"scripts/script_silver.py","file_name":"script_silver.py","file_ext":"py","file_size_in_byte":3407,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"79"} +{"seq_id":"32734404934","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[74]:\n\n\nimport requests\nfrom bs4 import BeautifulSoup\nimport pandas as pd\n\nSCRAPERAPI_KEY = 'c44fbbcb4363f1808a24874749b876' \nproxies = {\n 'http': f'http://scraperapi:{SCRAPERAPI_KEY}@proxy-server.scraperapi.com:8001',\n}\n\ndef extract(page):\n headers = {'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/92.0.4515.159 Safari/537.36'}\n url = f'https://www.indeed.com/jobs?q=data%20engineer&l=New%20York%2C%20NY&start={page}&vjk=97d70eb2f37da8d3'\n r = requests.get(url, headers, proxies=proxies, verify=False)\n soup = BeautifulSoup(r.content, 'lxml')\n return soup\n\ndef transform(soup):\n divs_title = soup.find_all('div' , class_ = 'slider_container')\n for item in divs_title:\n job_title = item.find('h2' , class_ = 'jobTitle').text\n company = item.find('span', class_ = 'companyName').text\n location = item.find('div', class_ ='companyLocation').text \n try:\n salary = item.find('span' , class_ = 'salary-snippet').text\n except:\n salary = ''\n summary = item.find('div' , class_ = 'job-snippet').text\n \n job = {\n 'job_title' : job_title,\n 'company' : company,\n 'location' : location,\n 'salary' : salary,\n 'summary' : summary\n }\n joblist.append(job)\n return\n \n\njoblist = [] \n\nfor i in range(0,20,10):\n print(f'Getting page, {i}')\n c=extract(i)\n transform(c)\n \ndf = pd.DataFrame(joblist)\nprint(df.head())\ndf.to_csv('Indeed_Jobs_Sep.csv')\n\n\n# In[ ]:\n","repo_name":"msk-data/web_scraping","sub_path":"Indeed_Job_Search.py","file_name":"Indeed_Job_Search.py","file_ext":"py","file_size_in_byte":1646,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"5969139132","text":"from socket import *\nimport ctypes\nimport pickle\n\nimport threading\n\nfrom app.server_models import *\nfrom app.global_constants import *\nfrom app.client.sprites import *\n\nclass Network():\n def __init__(self, ip, port):\n self.client = socket(AF_INET, SOCK_STREAM)\n self.server = ip\n self.port = port\n self.addr = (ip, port)\n self.my_player_data = None\n self.enemy_player_data = None\n\n #Conecta ao servidor e recebe pacote correspondente ao jogador\n def connect(self):\n try:\n #conexão\n self.client.settimeout(5)\n self.client.connect(self.addr)\n self.client.settimeout(None)\n except:\n print(f\"Error trying to connect\")\n self.client.close()\n quit()\n\n try:\n #pacote do jogador\n my_player = pickle.loads(self.client.recv(BUFFER_SIZE))\n print(f\"Received initial player obj pid: {my_player.pid}\")\n self.my_player_data = my_player\n\n return my_player\n except:\n print(f\"Error receiving intial player data\")\n self.client.close()\n quit()\n\n def send_pid_is_ready(self):\n self.client.send(pickle.dumps(Command(POST_PID_IS_READY, self.my_player_data)))\n\n def send_selected_map(self, selected_map):\n self.client.send(pickle.dumps(Command(POST_GAME_MAP, selected_map)))\n\n def send_game_reset(self):\n self.client.send(pickle.dumps(Command(POST_GAME_RESET, (self.my_player_data.pid, self.my_player_data.life))))\n\n def send_bullet_data(self, bullet_data):\n self.client.send(pickle.dumps(bullet_data))\n\n def send_player_data(self, bullet_data):\n self.client.send(pickle.dumps(bullet_data))\n\n def start_receive(self, game):\n new_thread = threading.Thread(target=self.receive, args=(id(game),))\n new_thread.start()\n\n def receive(self, game):\n game = ctypes.cast(game, ctypes.py_object).value\n while game.state != END_STATE:\n if(game.state == GET_MAP_STATE):\n self.client.send(pickle.dumps(Command(GET_GAME_MAP)))\n data = self.client.recv(BUFFER_SIZE)\n game_map = pickle.loads(data)\n\n if(type(game_map) == int):\n if(game_map != -1):\n game.map = game_map\n print(f\"PLAYER {self.my_player_data.pid} RECEBEU O MAPA {game.map}\")\n game.state = AWAIT_PLAYERS_STATE\n\n if(game.state == AWAIT_PLAYERS_STATE):\n self.client.send(pickle.dumps(Command(GET_READY_PLAYERS)))\n data = self.client.recv(BUFFER_SIZE)\n players = pickle.loads(data)\n\n if(type(players) == list):\n if(len(players) == N_PLAYERS):\n print(f\"PLAYER {self.my_player_data.pid} SAID GAME IS READY\")\n self.enemy_player_data = [x for x in players if x.pid != self.my_player_data.pid][0]\n game.state = TRADE_UPDATES_STATE\n\n if(game.state == TRADE_UPDATES_STATE):\n try:\n data = self.client.recv(BUFFER_SIZE)\n\n if data:\n server_pkt = pickle.loads(data)\n \n if type(server_pkt) is PlayerData and game.enemy_player:\n game.network.enemy_player_data = server_pkt\n game.enemy_player.update_enemy()\n\n if type(server_pkt) is BulletData:\n b = Bullet(game, server_pkt.pos, server_pkt.dir, server_pkt.dx, server_pkt.dy, server_pkt.pid)\n\n if server_pkt.pid == game.my_player.pid:\n game.alliebullets.add(b)\n else:\n game.enemybullets.add(b)\n\n game.all_sprites.add(b)\n\n if type(server_pkt) is Command:\n if(server_pkt.type == POST_GAME_RESET):\n if(server_pkt.data[0] != self.my_player_data.pid):\n self.enemy_player_data.life = server_pkt.data[1]\n game.enemy_player.life = server_pkt.data[1]\n game.reset()\n except:\n print(\"ERROR RECEIVING ON TRADING STATE\")\n print(\"MATOU A NETWORK\")\n","repo_name":"FelipeMarra/combat-multiplayer-python","sub_path":"app/client/network.py","file_name":"network.py","file_ext":"py","file_size_in_byte":4545,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"32814712289","text":"x, e = map(float, input('Enter x and e: ').split())\r\nshx = 0\r\nn = 0\r\na = 0\r\nfact = 1\r\nwhile True:\r\n for i in range(1, 2 * n + 2):\r\n fact *= i\r\n a0 = a\r\n a = x ** (2 * n + 1) / fact\r\n diff = abs(a - a0)\r\n shx += a\r\n n += 1\r\n fact = 1\r\n if not (diff > e):\r\n break\r\nprint(shx)","repo_name":"Malbof/Omran-Mohamad-OP","sub_path":"lab3.py","file_name":"lab3.py","file_ext":"py","file_size_in_byte":311,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"10191150625","text":"import logging\n\nfrom mmcv.runner import get_dist_info\n\n\ndef get_root_logger(log_file=None, log_level=logging.INFO):\n \"\"\"Get the root logger.\n\n The logger will be initialized if it has not been initialized. By default a\n StreamHandler will be added. If `log_file` is specified, a FileHandler will\n also be added. The name of the root logger is the top-level package name,\n e.g., \"mmdet\".\n\n Args:\n log_file (str | None): The log filename. If specified, a FileHandler\n will be added to the root logger.\n log_level (int): The root logger level. Note that only the process of\n rank 0 is affected, while other processes will set the level to\n \"Error\" and be silent most of the time.\n\n Returns:\n logging.Logger: The root logger.\n \"\"\"\n logger = logging.getLogger(__name__.split('.')[0]) # i.e., mmdet\n # if the logger has been initialized, just return it\n if logger.hasHandlers():\n return logger\n\n format_str = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'\n logging.basicConfig(format=format_str, level=log_level)\n rank, _ = get_dist_info()\n if rank != 0:\n logger.setLevel('ERROR')\n elif log_file is not None:\n file_handler = logging.FileHandler(log_file, 'w')\n file_handler.setFormatter(logging.Formatter(format_str))\n file_handler.setLevel(log_level)\n logger.addHandler(file_handler)\n\n return logger\n\n\ndef print_log(msg, logger=None, level=logging.INFO):\n \"\"\"Print a log message.\n\n Args:\n msg (str): The message to be logged.\n logger (logging.Logger | str | None): The logger to be used. Some\n special loggers are:\n - \"root\": the root logger obtained with `get_root_logger()`.\n - \"silent\": no message will be printed.\n - None: The `print()` method will be used to print log messages.\n level (int): Logging level. Only available when `logger` is a Logger\n object or \"root\".\n \"\"\"\n if logger is None:\n print(msg)\n elif logger == 'root':\n _logger = get_root_logger()\n _logger.log(level, msg)\n elif isinstance(logger, logging.Logger):\n logger.log(level, msg)\n elif logger != 'silent':\n raise TypeError(\n 'logger should be either a logging.Logger object, \"root\", '\n '\"silent\" or None, but got {}'.format(logger))\n","repo_name":"WXinlong/SOLO","sub_path":"mmdet/utils/logger.py","file_name":"logger.py","file_ext":"py","file_size_in_byte":2412,"program_lang":"python","lang":"en","doc_type":"code","stars":1632,"dataset":"github-code","pt":"79"} +{"seq_id":"8482042813","text":"import copy\nimport os\n\nimport matplotlib.pyplot as plt\nimport traci\n\nfrom optimization.Agent import Agent\nfrom controllers.Environment import Environment\nfrom Utils import Utils\n\nDEFAULT_SCHEDULE = [2, 2, 2, 2]\n\ndef getPicture(filename, data, yLabel, xLabel):\n plt.plot(data)\n plt.ylabel(yLabel)\n plt.xlabel(xLabel)\n plt.margins(0)\n minValue = min(data)\n maxValue = max(data)\n plt.ylim(minValue - 0.05 * abs(minValue), maxValue + 0.05 * abs(maxValue))\n fig = plt.gcf()\n fig.set_size_inches(20, 11.25)\n fig.savefig(os.path.join('plot', filename), dpi=96)\n plt.close(\"all\")\n\ndef preTraining(env, agent):\n phaseCombinations = []\n for phase2 in range(1, 4):\n for phase3 in range(1, 4):\n if phase3 != phase2:\n for phase4 in range(1, 4):\n if phase4 != phase3 and phase4 != phase2:\n phaseCombinations.append([0, phase2, phase3, phase4])\n\n schedules = [DEFAULT_SCHEDULE]\n for dominantPhase in range(4):\n schedule = copy.deepcopy(DEFAULT_SCHEDULE)\n schedule[dominantPhase] -= 1\n schedules.append(schedule)\n for increaseBy in range(1, 4):\n schedule = copy.deepcopy(DEFAULT_SCHEDULE)\n schedule[dominantPhase] += increaseBy\n schedules.append(schedule)\n\n for phaseCombination in phaseCombinations:\n for schedule in schedules:\n print(\"Pre-episode with schedule\", schedule, \"and phase combination\", phaseCombination, \"started...\")\n\n actionIndex = 0\n stepsTaken = 0\n done = False\n observation = env.reset(preTraining=True, training=True)\n\n while not done:\n action = phaseCombination[actionIndex]\n newObservation, reward, done = env.step(action)\n agent.remember(observation, action, reward, newObservation)\n observation = newObservation\n\n stepsTaken += 1\n if stepsTaken >= schedule[actionIndex]:\n stepsTaken = 0\n actionIndex = (actionIndex + 1) % 4\n\n env.runner.endConnection()\n print(\"Pre-episode ended.\")\n\n agent.saveReplayBuffer()\n agent.updateNetwork(preTraining=True, useAverage=True)\n agent.updateNetworkBar(preTraining=True)\n\n return agent\n\nif __name__ == \"__main__\":\n env = Environment()\n agent = Agent(alpha=Utils.ALPHA.value, numberOfActions=Utils.NUMBER_OF_ACTIONS.value,\n batchSize=Utils.BATCH_SIZE.value,\n inputDimensions=Utils.INPUT_DIMENSIONS.value, memorySize=Utils.MEMORY_SIZE.value,\n filename=Utils.MODEL_FILENAME.value, memoryFilename=Utils.MEMORY_FILENAME.value,\n learningStepsToTake=Utils.LEARNING_STEPS.value)\n\n if Utils.LOAD_MODEL.value:\n agent.loadModel()\n else:\n print(\"Starting pre-optimization...\")\n agent = preTraining(env, agent)\n print(\"End pre-optimization\")\n\n scores = []\n losses = []\n print(\"Starting optimization...\")\n for learningStep in range(Utils.STARTING_STEP.value, Utils.LEARNING_STEPS.value):\n print(\"Learning step #\", learningStep, \"started.\")\n done = False\n score = 0.0\n negativeReward = 0.0\n minLoss = float('inf')\n observation = env.reset(preTraining=False, training=True)\n counter = 0\n currentTime = 0\n while not done:\n action = agent.chooseAction(observation, traci.simulation.getTime())\n newObservation, reward, done = env.step(action)\n if env.runner.freshChanged:\n counter += 8\n else:\n counter += 5\n score += reward\n if reward < 0.0:\n negativeReward += reward\n agent.remember(observation, action, reward, newObservation)\n observation = newObservation\n\n if counter > Utils.UPDATE_PERIOD.value:\n currentTime += counter\n counter = 0\n print(\"Current time:\", currentTime, \"Learning step:\", learningStep)\n minLoss = min(agent.updateNetwork(preTraining=False, useAverage=False), minLoss)\n agent.updateNetworkBar(preTraining=False)\n\n env.runner.endConnection()\n agent.saveModel()\n print('Learning step #', learningStep, ' had negative reward %.2f' % negativeReward)\n print('Learning step #', learningStep, ' had total reward %.2f' % score)\n print('Learning step #', learningStep, ' epsilon =', agent.epsilon)\n losses.append(minLoss)\n scores.append(score)\n print('Current scores:', scores)\n print('Current losses:', losses)\n\n print(\"Training ended.\")\n\n getPicture('obtained_score.png', scores, 'Scores', 'Learning Steps')\n getPicture('obtained_loss.png', losses, 'Losses', 'Learning Steps')\n","repo_name":"916-Preda-Andrei/DiplomaThesisApp","sub_path":"traci/Training.py","file_name":"Training.py","file_ext":"py","file_size_in_byte":4908,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"73326047936","text":"\n'''\n2018-5-30\n决策树\n'''\nimport numpy as np\nfrom math import log\n\ndef createDataSet():\n dataSet = [\n [1, 1, 'yes'],\n [1, 1, 'yes'],\n [1, 0, 'no'],\n [0, 1, 'no'],\n [0, 1, 'no']]\n labels = ['no surfacing', 'flippers']\n return dataSet, labels\n\n# 计算给定数据集的香农熵\ndef calcShannonEnt(dataSet):\n # 数据集样本数\n numEntries = len(dataSet)\n # 定义字典,用于计数\n labelCounts = {}\n # 从数据集中,每次取出一行\n for featVec in dataSet:\n # 取出每一行的最后一列,即 'yes' or 'no'\n currentLabel = featVec[-1]\n # 判断 'yes' or 'no' 是否在字典中,不在加入计数为0,在则计数加1\n if currentLabel not in labelCounts.keys():\n labelCounts[currentLabel] = 0\n labelCounts[currentLabel] += 1\n # 定义容器,存放 熵\n shannonEnt = 0.0\n # 依据信息熵公式,计算该 dataSet 的信息熵 \n for key in labelCounts:\n prob = float(labelCounts[key]) / numEntries\n shannonEnt -= prob * log(prob, 2)\n # 返回 dataSet 的信息熵\n return shannonEnt\n\n# 按照给定特征划分数据集\n# dataSet 数据集\n# axis  列号\n# value 将列号为axis,值为value的 其他数据分个出来,看实例\ndef splitDataSet(dataSet, axis, value):\n retDataSet = []\n for featVec in dataSet:\n if featVec[axis] == value:\n reducedFeatVec = featVec[:axis] \n reducedFeatVec.extend(featVec[axis + 1 : ])\n retDataSet.append(reducedFeatVec)\n return retDataSet \n\n# 选择最好的数据集划分方式\ndef chooseBestFeatureToSplit(dataSet):\n # 每一行最后一列为 label,计算 feature 列数\n numFeatures = len(dataSet[0]) - 1\n #计算整个数据集的信息熵\n baseEntropy = calcShannonEnt(dataSet)\n # bestInfoGain 信息增益预先设为为 0.0\n # bestFeature 最好划分的列标签,预先设为 -1\n bestInfoGain = 0.0; bestFeature = -1\n # 依据 feature 数进行循环\n for i in np.arange(numFeatures):\n # 将每一行数据取出,存为list,次数为feature数,即没有最后一列label\n featList = [example[i] for example in dataSet]\n # set 去冗余\n uniqueVals = set(featList)\n # 定义熵容器\n newEntropy = 0.0\n # 从 uniqueVals 集合中迭代\n for value in uniqueVals:\n # 从每一列开始,按值划分数据集\n subDataSet = splitDataSet(dataSet, i, value)\n # 见公式\n prob = len(subDataSet) / float(len(dataSet))\n newEntropy += prob * calcShannonEnt(subDataSet) \n infoGain = baseEntropy - newEntropy\n # 找到信息增益最大的列号\n if infoGain > bestInfoGain:\n bestInfoGain = infoGain\n bestFeature = i\n # 返回信息增益最大的列号\n return bestFeature\n\n\n# 如果数据集已经处理了所有属性,但是类标签依然不是唯一的,此时我们需要决定如何定义该叶子节点,\n# 在这种情况下,我们通常会采用 多数表决的方法决定该叶子节点的分类\ndef majorityCnt(classList):\n # 定义一个用于计数的字典\n # 注,此时classList 只有一列,为类标签,因为类标签不唯一,才用此方法找最多的label\n classCount = {}\n # 从 classList 迭代取值\n for vote in classList:\n # 如果从classList中取出的值不在classCount字典中,则将该值放入字典,计数为1,否则在字典中的该值计数加1\n if vote not in classCount.keys():\n classCount[vote] = 0\n classCount[vote] += 1\n \n # 找到字典中 value 最大的 key 并返回\n newvalue = -1\n for key in classCount:\n if newvalue < classCount[key]:\n newkey = key\n newvalue = classCount[key]\n return newkey\n\n# 创建树的函数代码\ndef createTree(dataSet, labels): # 两个输入参数-- 数据集, 标签列表\n # 将 dataSet 最后一列放入 classList\n classList = [example[-1] for example in dataSet]\n # 如果类别完全相同则停止继续划分\n if classList.count(classList[0]) == len(classList):\n return classList[0] \n \n # 如果数据集已经处理了所有属性,但是类标签依然不是唯一的,采用 多数表决的方法决定该叶子节点的分类\n if len(dataSet[0]) == 1: \n return majorityCnt(classList) \n \n # 得到最好划分,也就是信息增益最大的列号\n bestFeat = chooseBestFeatureToSplit(dataSet)\n # 将 信息增益最大的列的列名存入 bestFeatLabel\n bestFeatLabel = labels[bestFeat]\n # 定义树,存为字典形式\n myTree = {bestFeatLabel:{}}\n # 将信息增益最大的列名删除\n del(labels[bestFeat])\n \n # 将信息增益最大的列取出\n featValues = [example[bestFeat] for example in dataSet]\n # 去除冗余\n uniqueVals = set(featValues)\n # 迭代取值\n for value in uniqueVals:\n # 这行代码复制了类标签\n subLabels = labels[:] \n # 递归创建树\n myTree[bestFeatLabel][value] = createTree(splitDataSet(dataSet, bestFeat, value), subLabels) # 字典的嵌套\n # 返回创建好的树\n return myTree\n\n# 使用决策树的分类函数\n# inputTree 创建好的决策树\n# featLabels 存放feature名的list\n# testVec 预测的feature\ndef classify(inputTree, featLabels, testVec):\n # 取出决策树的key,存为list,并取第一个key\n firstStr = list(inputTree.keys())[0]\n # 取出第一个key所对应的value\n secondDict = inputTree[firstStr]\n # 取出 firstStr 所在的列号\n featIndex = featLabels.index(firstStr)\n # 这段代码为递归找到类别,依次递归向下找\n for key in secondDict.keys():\n if testVec[featIndex] == key:\n if isinstance(secondDict[key], dict):\n classLabel = classify(secondDict[key], featLabels, testVec)\n else:\n classLabel = secondDict[key]\n return classLabel\n\nif __name__ == '__main__':\n myDat, labels = createDataSet()\n print(labels)\n\n myTree = createTree(myDat, labels)\n print(myTree)\n\n # 经过 createTree 已经把labels给破坏了,所以现在要从新获取labels\n myDat, labels = createDataSet()\n print(classify(myTree, labels, [1, 0]))\n print(classify(myTree, labels, [1, 1]))","repo_name":"cb-guo/Machine-Learning","sub_path":"机器学习实战/第3章 决策树/1.py","file_name":"1.py","file_ext":"py","file_size_in_byte":6485,"program_lang":"python","lang":"zh","doc_type":"code","stars":1,"dataset":"github-code","pt":"79"} +{"seq_id":"30914138595","text":"from registro_ig import app\nfrom datetime import datetime\nfrom flask import render_template,request,redirect\nfrom registro_ig.models import *\n\n\n#funcion que controla los errores\ndef validatePurchase(requestForm):\n moneda_to=requestForm['moneda_to']\n moneda_from=requestForm['moneda_from']\n cantidad_from=requestForm['cantidad_from']\n cantidad_realcry = (money_back(moneda_from)[0][0]) - (invested_money(moneda_from)[0][0])\n errores=[]\n if moneda_from != \"EUR\" and consult_currencies(moneda_from) == False:#evito resultado notype\n errores.append(\"Moneda inexsistente en su cartera\")\n if moneda_from == moneda_to:\n errores.append(\"Moneda invalida: No puede intercambiar valores con la misma mondeda\")\n if moneda_from != \"EUR\" and float(cantidad_from) > cantidad_realcry:#calculo si tengo en realidad la cantidad que pongo en cantidad_from\n errores.append(\"Cantidad insuficiente, o moneda inexsistente en su cartera\")\n if float(cantidad_from) <= 0:\n errores.append(\"No puede introducir cantidad inferior a 1\")\n \n return errores\n\n\n@app.route(\"/\")#Home\ndef index():\n\n registros = select_all()#importo todo el registro\n\n return render_template(\"index.html\", page = \"Inicio\", pageTitle=\"Home\",data=registros)#data esta en index con jinja, y aqui creamos la variable asignandole la lista de diccionario data_mov\n\n\n\n@app.route(\"/purchase\",methods=[\"GET\",\"POST\"])\ndef purchase():\n \n \n if request.method == \"GET\":\n \n return render_template(\"purchase.html\",page =\"Compra\",form={})\n \n else:#entra nel post y son 2 POST\n cantidad_from=float(request.form['cantidad_from'])\n moneda_from=request.form['moneda_from']\n moneda_to=request.form['moneda_to']\n cantidad_to=request.form['cantidad_to']\n \n if 'calcular' in request.form:#primer boton y primer post\n\n errores = validatePurchase(request.form)\n if errores:#si hay errores no calcula\n return render_template(\"purchase.html\",msgError=errores, page =\"Compra\",cantidad_from=cantidad_from, cantidad_to=cantidad_to, form={})\n \n else:\n \n cambio =change_from_to(moneda_from,moneda_to)#peticion api del intercambio\n \n precio_unitario = cantidad_from/cambio\n cantidad_to = cambio\n \n lista_request={\n \"moneda_from\":request.form['moneda_from'],\n \"moneda_to\":request.form['moneda_to'],\n \"cantidad_from\":request.form['cantidad_from'],\n \"cantidad_to\":str(cambio),\n \"precio_unitario\":str(precio_unitario)\n }#declaro la lista de los argumentos del form para que se devuelvan en los botones \n \n return render_template(\"purchase.html\", page =\"Compra\", cantidad_to=cambio, form=lista_request, pageTitle=\"Invertir\", msgError=errores, precio_unitario=precio_unitario)\n \n if 'comprar' in request.form:#segunda peticion \"POST\", finalmente capturo horas y fecha\n\n fecha = datetime.now().strftime('%Y-%m-%d')\n horas = datetime.now().strftime('%H:%M:%S')\n \n insert([ fecha,\n horas,\n request.form['moneda_from'],\n request.form['cantidad_from'],\n request.form['moneda_to'],\n request.form['cantidad_to'] ])#funcion que captura y registra los datos en la base de datos\n\n return redirect(\"/\")#devuelve a la home, donde se averiguerá la transacion efectuada\n \n\n\n@app.route(\"/status\", methods=[\"GET\",\"POST\"] )\ndef resume():\n if request.method == \"GET\":#calculos sobre la base de datos\n invertido=invested_money(\"EUR\")[0][0]\n recuperado=money_back(\"EUR\")[0][0]\n valor_compra= (invested_money(\"EUR\")[0][0] - money_back(\"EUR\")[0][0])\n valor_actual=current_value()#funcion que captura el valor actual de euros con las cryptomonedas que efectivamente se poseen\n \n return render_template(\"status.html\", page =\"Estado\", valor_actual=valor_actual, invertido=invertido, recuperado=recuperado,valor_compra=valor_compra, form={})\n \n else:\n if 'reiniciar' in request.form:#he puesto un boton para poder poner a cero la tabla y volver a empezar la inversion\n delete_all()\n return redirect(\"/\")","repo_name":"Tittiola/Proyecto_Flask_classic_Viola_M","sub_path":"registro_ig/routes.py","file_name":"routes.py","file_ext":"py","file_size_in_byte":4483,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"14262866356","text":"import json\nimport typing\nimport boto3\n\nfrom botocore.exceptions import ClientError, ParamValidationError\n\n\nclass DynamoDbException(Exception):\n pass\n\n\nclass AwsConnectorContext:\n \"\"\"\n Class represents connector context for AWS resources\n \"\"\"\n\n def __init__(\n self,\n region: str,\n account_id: str,\n access_key_id: str,\n secret_access_key: str\n ):\n self.region = region\n self.account_id = account_id\n self.access_key_id = access_key_id\n self.secret_access_key = secret_access_key\n\n\nclass DynamoDbConnector:\n \"\"\"\n Class represents connector for AWS DynamoDB table\n \"\"\"\n\n def __init__(\n self,\n table_name: str,\n context: AwsConnectorContext\n ):\n self.table_name = table_name\n self.context = context\n self._table = None\n\n @property\n def table(self):\n if not self._table:\n dynamodb = boto3.resource(\n 'dynamodb',\n aws_access_key_id=self.context.access_key_id,\n aws_secret_access_key=self.context.secret_access_key,\n region_name='us-west-2',\n )\n self._table = dynamodb.Table(\n self.table_name\n )\n return self._table\n\n def fetchone(self, key: dict) -> typing.Dict:\n try:\n response = self.table.get_item(\n Key=key\n )\n return response.get('Item')\n except (\n ParamValidationError,\n ClientError\n ) as err:\n raise DynamoDbException(err) from err\n\n def put_item(self, data: dict):\n self.table.put_item(\n Item=data\n )\n\n\nclass SnsConnector:\n \"\"\"\n Class represents connector for AWS SNS topic\n \"\"\"\n\n def __init__(\n self,\n topic_name: str,\n context: AwsConnectorContext\n ):\n self.topic_name = topic_name\n self.context = context\n self._client = None\n\n @property\n def topic_arn(self):\n return 'arn:aws:sns:{}:{}:{}'.format(\n self.context.region,\n self.context.account_id,\n self.topic_name\n )\n\n @property\n def client(self):\n if not self._client:\n self._client = boto3.client(\n 'sns', self.context.region\n )\n return self._client\n\n def publish(self, msg: str):\n response = self.client.publish(\n TopicArn=self.topic_arn,\n Message=json.dumps(\n {\n 'default': json.dumps(msg)\n }\n ),\n MessageStructure='json'\n )\n return response\n","repo_name":"dmitrikuksik/watchdog","sub_path":"watchdog/core/connectors.py","file_name":"connectors.py","file_ext":"py","file_size_in_byte":2696,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"29195266178","text":"'''\nCreated on Tue Oct 9 22:52:59 2018\n\n@author: Grant, Pierce\n'''\n\n\nimport os\nimport nltk\n#nltk.download('stopwords')\n#nltk.download('wordnet')\nimport re\nimport string\nimport sklearn as sl \nimport pandas as pd\nimport gensim as gs\n\nos.chdir(r\"C:\\\\Users\\\\pseco\\\\Documents\\\\GitHub\\\\Text-Mining\\\\\") \n#os.chdir(\"C:\\\\Users\\\\gflemin\\\\Documents\\\\GitHub\\\\Text-Mining\\\\\")\n\ndf = pd.read_csv(r'US_Strat_sample.csv') # what's up with the r's?\n\n\ndf['description'] = df['description'].str.replace('\\d+', '')\nvariety = list(df['variety'])\ndoc = []\ndescriptions = df['description']\ntype(descriptions) # made a series\n\n\ndf.head()\nfor description in descriptions:\n doc.append(description)\n \n# remove punctuation\n#nltk.download('punkt')\npunc = re.compile('[%s]' % re.escape(string.punctuation))\nterm_vec = []\nfor d in doc: \n d = d.lower()\n d = punc.sub('', d)\n term_vec.append(d)\n\n#print(term_vec[0:10])\n\n#tokenize\ntoken_term_vec=[]\nfor elm in term_vec:\n token_term_vec.append(nltk.word_tokenize(elm))\n \nprint(token_term_vec[0:10])\n\n# add words that might be relevant stop words\nstopwords = nltk.corpus.stopwords.words('english')\nmorewords = ['drink', 'show', 'touch', 'mouth', 'feel', 'nose', 'drinks', 'wine', 'wines', \n 'wine', 'wines', 'grape', 'grapes', 'note', 'notes', 'aroma', 'aromas', 'palate', 'finish', 'taste', \n 'tastes', 'show', 'flavour', 'flavor', 'flavors', 'flavours', 'fruit', '%', '.', '-', '-.','(',')']\nstopwords.extend(morewords)\n\n#print(stopwords)\n\n#remove stop words\n#nltk.download('stopwords')\nnsw_term_vec = []\nfor review in token_term_vec: \n wr=[]\n for word in review:\n if word not in stopwords:\n wr.append(word) \n nsw_term_vec.append(wr)\n \nprint(nsw_term_vec[0:10])\n\n# Lemmatize data\n#nltk.download('wordnet')\nwnl = nltk.stem.WordNetLemmatizer()\n\ncln_term_vec = []\nfor reviews in nsw_term_vec:\n lemma=[]\n for words in reviews:\n lemma.append(wnl.lemmatize(words))\n cln_term_vec.append(lemma)\n \nprint(cln_term_vec[0:10])\n\n#Gen Sim Dictionary\n \ndict_corp = gs.corpora.Dictionary(cln_term_vec)\n\ncorpus=[]\nfor i in range(0, len(cln_term_vec)):\n corpus.append(dict_corp.doc2bow(cln_term_vec[i]))\n\n\n\n#Create TFIDF Vectors Based on term Vectors\n\ntfidf_model = gs.models.TfidfModel(corpus)\n\ntfidf=[]\nfor i in range(0,len(corpus)):\n tfidf.append(tfidf_model[corpus[i]])\n \n#Create Pairwise Document Siliarity Index\nn=len(dict_corp)\nindex = gs.similarities.SparseMatrixSimilarity(tfidf_model[corpus], num_features = n) \n\n\n#TFIDF Values per Document\n\n'''\nfor i in range(0,len(tfidf)):\n s='Review' + ' ' + str(i+1)+' TFIDF'\n \n for j in range(0, len(tfidf[i])):\n s = s + ' (' + dict.get(tfidf[i][j][0]) + ',' \n s = s + ('%.3f' % tfidf[i][j][1]) + ')'\n'''\n \n# Produces Document Simalarity\n\n#Repeat Process stem and tokenize\nfor i in range(0, len(corpus)):\n sim = index[tfidf_model[corpus[i]]]\n for j in range(0, len(sim)):\n sim[j] \n\n#cluster analysis\ndef tokenize_and_stem(text):\n # first tokenize by sentence, then by word to ensure that punctuation is caught as it's own token\n tokens = [word for sent in nltk.sent_tokenize(text) for word in nltk.word_tokenize(sent)]\n filtered_tokens = []\n # filter out any tokens not containing letters (e.g., numeric tokens, raw punctuation)\n for token in tokens:\n if re.search('[a-zA-Z]', token):\n filtered_tokens.append(token)\n stems = [wnl.lemmatize(t) for t in filtered_tokens]\n return stems \n\ndef tokenize_only(text):\n # first tokenize by sentence, then by word to ensure that punctuation is caught as it's own token\n tokens = [word.lower() for sent in nltk.sent_tokenize(text) for word in nltk.word_tokenize(sent)]\n filtered_tokens = []\n # filter out any tokens not containing letters (e.g., numeric tokens, raw punctuation)\n for token in tokens:\n if re.search('[a-zA-Z]', token):\n filtered_tokens.append(token)\n return filtered_tokens\n\ntotalvocab_stemmed = []\ntotalvocab_tokenized = []\nfor i in doc:\n allwords_stemmed = tokenize_and_stem(i) #for each item in 'synopses', tokenize/stem\n totalvocab_stemmed.extend(allwords_stemmed) #extend the 'totalvocab_stemmed' list\n \n allwords_tokenized = tokenize_only(i)\n totalvocab_tokenized.extend(allwords_tokenized)\n#Define Vectorizer Parameters\nimport sklearn as sk\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nfrom sklearn.metrics.pairwise import cosine_similarity\nfrom scipy.spatial.distance import cdist\nfrom sklearn.cluster import KMeans\nfrom sklearn.externals import joblib\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n#computes similarity\ntfidf_vectorizer = TfidfVectorizer(max_df=0.8, max_features=200000, min_df=0.2, stop_words=stopwords, use_idf=True, tokenizer=tokenize_and_stem, ngram_range=(1,3))\n\ntfidf_matrix = tfidf_vectorizer.fit_transform(doc)\n\n#Subset Matrix and compute distance\npart_one = tfidf_matrix\ndist_matrix_one = 1-cosine_similarity(part_one)\n\n\n\nterms= tfidf_vectorizer.get_feature_names()\n\n#Cluster Analysis\nnum_clusters = 10\n\nkm = KMeans(n_clusters=num_clusters)\n\nkm.fit(tfidf_matrix)\n\nclusters=km.labels_.tolist()\n\nclusters = km.labels_.tolist()\n\n#plot use elbow method to determine cluster\n\nmms=sk.preprocessing.MaxAbsScaler()\nmms.fit(tfidf_matrix)\ntransformed_matrix = mms.transform(tfidf_matrix)\n\nsum_of_squared_dist = []\nKl=range(1,50)\n\nfor k in Kl:\n km = KMeans(n_clusters=k)\n km = km.fit(transformed_matrix)\n \n sum_of_squared_dist.append(km.inertia_)\n\nplt.plot(Kl, sum_of_squared_dist, 'bx-')\nplt.xlabel('K')\nplt.ylabel('sum_of_squared_dist')\nplt.title('optimal k')\nplt.show()\n\n#Visualize Cluster\nreviews = {'Wine':variety, 'Reviews':doc,'cluster':clusters}\n\ncluster_frame=pd.DataFrame(reviews, index = [clusters], columns = ['Wine', 'cluster'])\n\nprint(cluster_frame[0:10])\n\n\n#subset matrix\n \n#create data frame that has the result of the MDS plus the cluster numbers and titles\n\nimport matplotlib.pyplot as plt\nimport matplotlib as mpl\nfrom sklearn.manifold import MDS\nfrom IPython import get_ipython\nget_ipython().run_line_magic('matplotlib', 'inline')\n\nMDS()\n\n# convert two components as we're plotting points in a two-dimensional plane\n# \"precomputed\" because we provide a distance matrix\n# we will also specify `random_state` so the plot is reproducible.\nmds = MDS(n_components=2, dissimilarity=\"precomputed\", random_state=1)\n\npos = mds.fit_transform(dist_matrix_one) # shape (n_components, n_samples)\n\nxs, ys = pos[:, 0], pos[:, 1]\nprint()\nprint()\n\ndf_2 = pd.DataFrame(dict(x=xs, y=ys, clusters=clusters, title=variety)) \n\n#group by cluster\ngroups = df_2.groupby('clusters')\n\n# set up plot\nfig, ax = plt.subplots(figsize=(20, 20)) # set size\nax.margins(0.05) # Optional, just adds 5% padding to the autoscaling\n\n#iterate through groups to layer the plot\n#note that I use the cluster_name and cluster_color dicts with the 'name' lookup to return the appropriate color/label\nfor name, group in groups:\n ax.plot(group.x, group.y, marker='o', linestyle='', ms=12, \n label=clusters, \n mec='none')\n ax.set_xlim([-.9,.9])\n ax.set_ylim([-.9,.9])\n ax.set_aspect('auto')\n ax.tick_params(\\\n axis= 'x', # changes apply to the x-axis\n which='both', # both major and minor ticks are affected\n bottom=True, # ticks along the bottom edge are off\n top=False, # ticks along the top edge are off\n labelbottom=True)\n ax.tick_params(\\\n axis= 'y', # changes apply to the y-axis\n which='both', # both major and minor ticks are affected\n left=True, # ticks along the bottom edge are on\n top=False, # ticks along the top edge are off\n labelleft=True\n )\n\nfor i in range(len(df_2)):\n ax.text(df_2.loc[i]['x'], df_2.loc[i]['y'], df_2.loc[i]['clusters'], size=24) \n\n \nplt.show() #show the plot\n\ncentroids=km.cluster_centers_\n\ncentroids_x = centroids[:,0]\ncentroids_y=centroids[:,1]\n\nplt.scatter(centroids_x, centroids_y, color='black', label=clusters)","repo_name":"glflemin/Text-Mining","sub_path":"Text_mining_hw1_Cluster_Code.py","file_name":"Text_mining_hw1_Cluster_Code.py","file_ext":"py","file_size_in_byte":8130,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"4784390499","text":"# f(i) - maksymalny zysk zcięcia do i-tego drzewa\n\n# f(i) = max (f(i-2) + value[i], f(i-1))\n\ndef lumberjack(trees):\n n = len(trees)\n x2 = trees[0]\n x1 = max(trees[0], trees[1])\n for i in range(2, n):\n x1, x2 = max(x2 + trees[i], x1), x1\n return x1\n\n\narr = [5, 1, 12, 14, 2, 2, 2, 19, 1, 7]\nprint(lumberjack(arr))\n","repo_name":"karpinsk/ASD","sub_path":"Dynamic programming/cutting_trees.py","file_name":"cutting_trees.py","file_ext":"py","file_size_in_byte":336,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"7993926181","text":"INF = 10 ** 10\n\nclass Solution(object):\n def maxSumDivThree(self, nums):\n dp = [0, -INF, -INF]\n for num in nums:\n nxt = dp[:]\n for i in xrange(3):\n j = (num + i) % 3\n nxt[j] = max(nxt[j], dp[i] + num)\n dp = nxt[:]\n #print dp\n return dp[0]\n","repo_name":"Wizmann/ACM-ICPC","sub_path":"Leetcode/Algorithm/python/2000/01262-Greatest Sum Divisible by Three.py","file_name":"01262-Greatest Sum Divisible by Three.py","file_ext":"py","file_size_in_byte":337,"program_lang":"python","lang":"en","doc_type":"code","stars":63,"dataset":"github-code","pt":"79"} +{"seq_id":"15018600340","text":"\ndef modify(s):\n return(s.replace(\"he\",\" Modified \"))\n #s=\"modify\"\n\ns1= \"hello\"\nprint(\"s1 = \",s1)\ns3=modify(s1)\nprint(\"after modify(s), s1= \",s1, \" s3 = \",s3)\ns2 = s1.replace(\"o\",\"\")\nprint(\"after s1.replace, s1= \",s1, \" s2 = \", s2)\n\n#----------------------------------------------------------------------\n# var assignment\na = [1, 2, 3]\nb = a\nb.append(4)\nb = ['a', 'b']\nprint(a, b)\n\nd=[1,2,3,5]\nc = [1, 2, 3]\nd = c\nd.append(4)\nd = ['c', 'd']\nprint(c, d)\n\n#----------------------------------------------------------------------\ndef foo(a):\n print(id(a))\n a.append(1)\n\na = []\nprint(id(a))\nfoo(a)\nprint(a)\n\n#----------------------------------------------------------------------\n'''\nThe parameter passed in is actually a reference to an object,\nbut the reference is passed by value.\n'''\ndef foo1(a):\n print('In fool, a id',id(a))\n a.append(1)\n # re-assignment\n a = [1, 2]\n\na = []\nprint('In fool, a id',id(a))\nfoo1(a)\nprint(a)\n\n#----------------------------------------------------------------------\ndef try_to_change_list_reference1(the_list):\n print('got', the_list)\n the_list = ['and', 'we', 'can', 'not', 'lie']\n print('set to', the_list)\n\nouter_list = ['we', 'like', 'proper', 'English']\n\nprint('\\nbefore, outer_list =', outer_list)\ntry_to_change_list_reference1(outer_list)\nprint('after, outer_list =', outer_list)\n\n#----------------------------------------------------------------------\ndef try_to_change_string_reference2(the_string):\n print('got', the_string)\n the_string = 'In a kingdom by the sea'\n print('set to', the_string)\n\nouter_string = 'It was many and many a year ago'\n\nprint('\\nbefore, outer_string =', outer_string)\ntry_to_change_string_reference2(outer_string)\nprint('after, outer_string =', outer_string)\n\n#----------------------------------------------------------------------\n","repo_name":"mike03052000/python","sub_path":"Training/HackRank/Level-0/func-1.py","file_name":"func-1.py","file_ext":"py","file_size_in_byte":1840,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"39139276107","text":"\n#predicting and submission file\npred_test_y = np.mean([outputs[i][1] for i in range(len(outputs))], axis = 0)\npred_test_y = (pred_test_y > best_thresh).astype(int)\n\n\nsub = pd.read_csv('../input/sample_submission.csv')\nout_df = pd.DataFrame({\"qid\":sub[\"qid\"].values})\nout_df['prediction'] = pred_test_y\nout_df.to_csv(\"submission.csv\", index=False)\n","repo_name":"Kalyankr/Insincere-questions-classification","sub_path":"output.py","file_name":"output.py","file_ext":"py","file_size_in_byte":348,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"79"} +{"seq_id":"42170322378","text":"import numpy as np\r\nfrom scipy.linalg import eigh\r\nfrom sklearn import preprocessing\r\nimport matplotlib.pyplot as plt\r\nfrom skimage.segmentation import slic, mark_boundaries\r\nimport torch\r\nimport torch.nn as nn\r\nimport torch.nn.functional as F\r\nfrom torch_geometric.nn import GATv2Conv, EdgeConv\r\nimport spectral as spy\r\nimport os\r\n\r\ndef SELF(HSI, train_idx, K: int, eigs_thres: float , beta: float):\r\n num_total, bands = HSI.shape\r\n train_set = HSI[train_idx[:, 0], :]\r\n num_train = train_set.shape[0]\r\n total_2 = np.linalg.norm(HSI, axis=1)**2\r\n train_2 = np.linalg.norm(train_set, axis=1)**2\r\n total_sum = np.matmul(train_set, HSI.T)\r\n total_sum = np.tile(total_2[None],(num_train,1)) + np.tile(train_2[None].T,(1,num_total)) -2.0*total_sum\r\n total_sum = np.sqrt(np.where(total_sum>=0, total_sum,0))\r\n _ind = np.argsort(total_sum, axis=1)\r\n sigma_train = np.zeros(num_train)\r\n for i in range(num_train):\r\n sigma_train[i] = total_sum[i, _ind[i, K]]\r\n num_class= int(np.amax(train_idx[:, 1]))\r\n num_perclass = np.zeros(num_class,dtype='int')\r\n for i in range(num_class):\r\n idx = np.where(train_idx[:, 1]==i+1)[-1]\r\n num_perclass[i] = len(idx)\r\n W_lb = np.zeros((num_train,num_train))\r\n W_lw = np.zeros((num_train,num_train))\r\n for i in range(num_train):\r\n p_cur = train_set[i, :]\r\n p_lab = train_idx[i][1]\r\n for j in range(i, num_train):\r\n if train_idx[j][1] == p_lab:\r\n tmp1 = (1.0/num_train-1.0/num_perclass[p_lab-1])*\\\r\n np.exp(-np.linalg.norm(p_cur- train_set[j, :])**2/(sigma_train[i]*sigma_train[j]))\r\n tmp2 = (1.0/num_perclass[p_lab-1])*np.exp(-np.linalg.norm(p_cur- train_set[j, :])**2\r\n /(sigma_train[i]*sigma_train[j]))\r\n else:\r\n tmp1 = 1.0/num_train\r\n tmp2 = 0.0\r\n W_lb[i, j] = W_lb[j,i] = tmp1\r\n W_lw[i, j] = W_lw[j,i] = tmp2\r\n\r\n S_lb = np.matmul(np.matmul(train_set.T,np.diag(np.sum(W_lb, axis=1))-W_lb),train_set)\r\n S_lw = np.matmul(np.matmul(train_set.T,np.diag(np.sum(W_lw, axis=1))-W_lw),train_set)\r\n mu = (np.sum(HSI, axis=0)/num_total)[:, None]\r\n S_t = np.matmul(HSI.T, HSI)- np.matmul(mu,mu.T)*num_total\r\n S_rlb = (1.0-beta)*S_lb + beta*S_t\r\n S_rlw = (1.0-beta)*S_lw + beta*np.eye(bands)\r\n eigs_val, vec_eigs = eigh(S_rlb, S_rlw)\r\n\r\n compress_num = int(np.min(np.where(np.flip(eigs_val).cumsum(0) / np.sum(eigs_val) > eigs_thres))+1)\r\n\r\n T = np.flip(vec_eigs[:, -compress_num:], axis=1)\r\n #T = np.multiply(T, np.tile(np.sqrt(np.flip(eigs_val[-compress_num:])[None]), (bands,1)))\r\n return np.matmul(HSI, T)\r\n\r\ndef image_show(data, Height, Width):\r\n minMax = preprocessing.MinMaxScaler()\r\n img = minMax.fit_transform(data[:, :3])\r\n img = np.reshape(img, [Height, Width, 3])\r\n plt.imshow(img)\r\n plt.pause(1)\r\n\r\ndef Draw_Classification_Map(label, name: str, scale: float = 4.0, dpi: int = 400):\r\n '''\r\n get classification map , then save to given path\r\n :param label: classification label, 2D\r\n :param name: saving path and file's name\r\n :param scale: scale of image. If equals to 1, then saving-size is just the label-size\r\n :param dpi: default is OK\r\n :return: null\r\n '''\r\n fig, ax = plt.subplots()\r\n numlabel = np.array(label)\r\n v = spy.imshow(classes=numlabel.astype(np.int16), fignum=fig.number)\r\n ax.set_axis_off()\r\n ax.xaxis.set_visible(False)\r\n ax.yaxis.set_visible(False)\r\n fig.set_size_inches(label.shape[1] * scale / dpi, label.shape[0] * scale / dpi)\r\n foo_fig = plt.gcf() # 'get current figure'\r\n plt.gca().xaxis.set_major_locator(plt.NullLocator())\r\n plt.gca().yaxis.set_major_locator(plt.NullLocator())\r\n plt.subplots_adjust(top=1, bottom=0, right=1, left=0, hspace=0, wspace=0)\r\n plt.pause(1)\r\n foo_fig.savefig(name + '.png', format='png', transparent=True, dpi=dpi, pad_inches=0)\r\n pass\r\n\r\ndef SegmentsLabelProcess(labels):\r\n '''\r\n 对labels做后处理,防止出现label不连续现象\r\n '''\r\n labels = np.array(labels, np.int64)\r\n H, W = labels.shape\r\n ls = list(set(np.reshape(labels, [-1]).tolist()))\r\n\r\n dic = {}\r\n for i in range(len(ls)):\r\n dic[ls[i]] = i\r\n\r\n new_labels = labels\r\n for i in range(H):\r\n for j in range(W):\r\n new_labels[i, j] = dic[new_labels[i, j]]\r\n return new_labels\r\n\r\ndef SLIC_seg(HSI, Datacube, segments_list):\r\n H, W, bands =Datacube.shape\r\n\r\n minMax2 = preprocessing.MinMaxScaler()\r\n HSI = np.reshape(HSI,[H*W,3])\r\n HSI = minMax2.fit_transform(HSI)\r\n HSI = np.reshape(HSI,[H,W,3])\r\n num_scale = len(segments_list)\r\n node_feature_list = []\r\n edge_index_list = []\r\n Qmatrix_list = []\r\n for k in range(num_scale):\r\n segments = slic(Datacube, n_segments=segments_list[k], compactness=0.1)\r\n if segments.max() + 1 != len(list(set(np.reshape(segments, [-1]).tolist()))):\r\n segments = SegmentsLabelProcess(segments)\r\n show_seg = mark_boundaries(HSI, segments)\r\n plt.figure()\r\n plt.imshow(show_seg)\r\n plt.show()\r\n\r\n superpixel_num_cur = segments.max() + 1\r\n segments = np.reshape(segments, [-1])\r\n Q = np.zeros([H * W, superpixel_num_cur], dtype=np.float32)\r\n\r\n for i in range(superpixel_num_cur):\r\n idx = np.where(segments == i)[0]\r\n Q[idx, i] = 1\r\n segments = np.reshape(segments, [H, W])\r\n Affine=np.zeros((superpixel_num_cur,superpixel_num_cur), dtype='int')\r\n for i in range(H):\r\n for j in range(W - 1):\r\n if (segments[i, j] != segments[i, j + 1]):\r\n Affine[segments[i, j], segments[i, j + 1]] = Affine[segments[i, j + 1], segments[i, j]] = 1\r\n\r\n for i in range(H - 1):\r\n for j in range(W):\r\n if (segments[i, j] != segments[i + 1, j]):\r\n Affine[segments[i, j], segments[i + 1, j]] = Affine[segments[i + 1, j], segments[i, j]] = 1\r\n\r\n edge_index = np.empty((2, 0), dtype=int)\r\n for i in range(superpixel_num_cur):\r\n for j in range(i + 1, superpixel_num_cur):\r\n if (Affine[i, j] != 0):\r\n edge_index = np.concatenate((edge_index, np.array([[i, j], [j, i]])), axis=1)\r\n\r\n #node_feature_list.append(S)\r\n edge_index_list.append(edge_index)\r\n Qmatrix_list.append(Q)\r\n return edge_index_list, Qmatrix_list\r\n\r\n\r\nclass SSConv(nn.Module):\r\n '''\r\n Spectral-Spatial Convolution\r\n '''\r\n def __init__(self, in_ch, out_ch, num_depth_conv_layer, kernel_size=5):\r\n super(SSConv, self).__init__()\r\n self.num_depth_conv_layer = num_depth_conv_layer\r\n self.depth_conv = nn.Sequential()\r\n for i in range(self.num_depth_conv_layer):\r\n self.depth_conv.add_module('depth_conv_'+str(i),nn.Conv2d(in_channels=out_ch, out_channels=out_ch,\r\n kernel_size = kernel_size, stride=1, padding=kernel_size//2, groups=out_ch))\r\n\r\n self.point_conv = nn.Conv2d(\r\n in_channels=in_ch,\r\n out_channels=out_ch,\r\n kernel_size=1,\r\n stride=1,\r\n padding=0,\r\n groups=1,\r\n )\r\n self.Act = nn.LeakyReLU(inplace=True)\r\n self.BN = nn.BatchNorm2d(in_ch)\r\n\r\n\r\n def forward(self, input):\r\n out = self.point_conv(self.BN(input))\r\n out = self.Act(out)\r\n for i in range(self.num_depth_conv_layer):\r\n out = self.depth_conv[i](out)\r\n out = self.Act(out)\r\n return out\r\n\r\nclass myGNN(torch.nn.Module):\r\n def __init__(self, num_inputfeatures: int, num_outfeatures: int):\r\n super().__init__()\r\n self.BN = nn.BatchNorm1d(num_inputfeatures)\r\n self.conv1 = GATv2Conv(num_inputfeatures, 64, heads=4)\r\n self.conv2 = GATv2Conv(256, num_outfeatures, heads=1)\r\n\r\n def forward(self, x: torch.Tensor, edge_index: torch.Tensor):\r\n x = self.BN(x)\r\n x = self.conv1(x, edge_index)\r\n x = F.leaky_relu(x)\r\n x = self.conv2(x, edge_index)\r\n return x\r\n\r\n\r\nclass MSDesGATnet(nn.Module):\r\n def __init__(self, height: int, width: int, channel: int, class_count: int,\r\n Qmatrix_list, Q_Hat_T_list, edge_index_list, SSConv_num_depth_conv_layer = 1,SSConv_kernel = 5):\r\n super(MSDesGATnet, self).__init__()\r\n self.class_count = class_count\r\n self.channel = channel\r\n self.height = height\r\n self.width = width\r\n self.edge_index_list = edge_index_list\r\n self.Qmatrix_list = Qmatrix_list\r\n self.layer_num = len(Qmatrix_list)\r\n self.Q_list_Hat_T = Q_Hat_T_list\r\n self.GAT_layers = nn.Sequential()\r\n input_num = channel\r\n output_num = channel\r\n for i in range(self.layer_num):\r\n self.GAT_layers.add_module('my_GNN_l'+str(i), myGNN(input_num, output_num))\r\n input_num = input_num + output_num\r\n output_num = input_num\r\n\r\n self._linear = nn.Linear(input_num, self.class_count)\r\n self._CNN_denoise1 = SSConv(channel, channel,num_depth_conv_layer=SSConv_num_depth_conv_layer,kernel_size=SSConv_kernel)\r\n self._CNN_denoise2 = SSConv(output_num, output_num,num_depth_conv_layer=SSConv_num_depth_conv_layer,kernel_size=SSConv_kernel)\r\n\r\n def forward(self, x: torch.Tensor):\r\n (h, w, c) = x.shape\r\n #x0 = x.reshape([h * w, -1])\r\n x = torch.unsqueeze(x.permute([2, 0, 1]), 0)\r\n H_0 = self._CNN_denoise1(x)\r\n H_0 = torch.squeeze(H_0, 0).permute([1, 2, 0])\r\n x_flatten = H_0.reshape([h * w, -1])\r\n\r\n for i in range(self.layer_num):\r\n superpixels_flatten = torch.sparse.mm(self.Q_list_Hat_T[i], x_flatten)\r\n H_i = self.GAT_layers[i](superpixels_flatten, self.edge_index_list[i])\r\n x_flatten = torch.cat([x_flatten, torch.sparse.mm(self.Qmatrix_list[i], H_i)], dim=-1)\r\n\r\n output = self._CNN_denoise2(x_flatten.reshape([1,h,w,-1]).permute([0,3,1,2]))\r\n x_flatten = torch.squeeze(output, 0).permute([1, 2, 0]).reshape([h * w, -1])\r\n Y = self._linear(x_flatten)\r\n return Y\r\n\r\nclass EGNN(nn.Module):\r\n def __init__(self, height: int, width: int, channel: int, class_count: int, out_channel: int,\r\n Qmatrix, Q_Hat_T, edge_index):\r\n super(EGNN, self).__init__()\r\n self.class_count = class_count\r\n self.channel = channel\r\n self.height = height\r\n self.width = width\r\n self.edge_index = edge_index\r\n self.Qmatrix = Qmatrix\r\n self.Q_list_Hat_T = Q_Hat_T\r\n self.GNN1 = EdgeConv(nn.Sequential(nn.Linear(2*channel, out_channel), nn.ReLU(inplace=True)))\r\n self.GNN2 = EdgeConv(nn.Sequential(nn.Linear(2*out_channel, out_channel), nn.ReLU(inplace=True)))\r\n self.GNN3 = EdgeConv(nn.Sequential(nn.Linear(2*out_channel, out_channel), nn.ReLU(inplace=True)))\r\n self.CNN = nn.Conv2d(out_channel, out_channel,kernel_size=3,stride=1,padding=1,groups=1)\r\n self._linear = nn.Linear(out_channel, self.class_count)\r\n\r\n def forward(self, x: torch.Tensor):\r\n (h, w, c) = x.shape\r\n x_flatten = x.reshape([h * w, -1])\r\n superpixels_flatten = torch.sparse.mm(self.Q_list_Hat_T, x_flatten)\r\n H1 = self.GNN1(superpixels_flatten, self.edge_index)\r\n H2 = self.GNN2(H1, self.edge_index)\r\n H3 = self.GNN2(H2, self.edge_index)\r\n H4 = H1 + H2 + H3\r\n output = torch.sparse.mm(self.Qmatrix, H4).reshape([1,h,w,-1]).permute([0,3,1,2])\r\n output = self.CNN(output)\r\n output = torch.squeeze(output, 0).permute([1, 2, 0]).reshape([h * w, -1])\r\n output =self._linear(output)\r\n return output\r\n\r\ndef write_acc(OA, AA, producer_acc, kappa, confusion_matrix, filename, method):\r\n if os.path.exists(filename):\r\n f = open(filename, 'a+')\r\n else:\r\n f = open(filename, 'w')\r\n str_results = '\\n' + method + ': ======================' \\\r\n + \"\\nOA=\" + str(OA) \\\r\n + \"\\nAA=\" + str(AA) \\\r\n + '\\nkpp=' + str(kappa) \\\r\n + '\\nacc per class:' + str(producer_acc) \\\r\n + \"\\nconfusion matrix:\" + str(confusion_matrix) + \"\\n\"\r\n\r\n f.write(str_results)\r\n f.close()\r\n\r\n\r\n\r\n\r\n\r\n","repo_name":"l7170/MSDesGAT","sub_path":"HSI_SELF.py","file_name":"HSI_SELF.py","file_ext":"py","file_size_in_byte":12433,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"79"} +{"seq_id":"36287585752","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# Data differ code\n__author__ = \"Ivan Begtin (ivan@begtin.tech)\"\n__license__ = \"MIT\"\n\nimport io\nimport bson\nimport json\nimport csv\nimport xxhash\n\n\ndef csv_index(key, csvf, delimiter=\";\"):\n \"\"\"Index csv file records and return dict with key and hash of record\"\"\"\n bindex = {}\n reader = csv.DictReader(csvf, delimiter=delimiter)\n for r in reader:\n rh = xxhash.xxh64(str(r)).hexdigest()\n bindex[r[key]] = rh\n return bindex\n\n\ndef json_index(key, jsonf):\n \"\"\"Index json file records and return dict with key and hash of each record\"\"\"\n bindex = {}\n complexkey = False\n if key.find(\".\") > -1:\n parts = key.split(\".\")\n complexkey = True\n for line in jsonf:\n r = json.loads(line)\n rh = xxhash.xxh64(bson.BSON.encode(r)).hexdigest()\n if not complexkey:\n bindex[r[key]] = rh\n else:\n v = r\n for p in parts:\n v = v[p]\n bindex[v] = rh\n return bindex\n\n\ndef bson_index(key, bsonf):\n \"\"\"Index bson file records and return dict with key and hash of each record\"\"\"\n\n bindex = {}\n for r in bson.decode_file_iter(bsonf):\n rh = xxhash.xxh64(bson.BSON.encode(r)).hexdigest()\n # rh = xxhash.xxh64(str(r)).intdigest()\n bindex[r[key]] = rh\n return bindex\n\n\ndef dict_index(key, arr):\n \"\"\"Index python dict records and return dict with key and hash of each record\"\"\"\n bindex = {}\n for r in arr:\n rh = xxhash.xxh64(str(r)).hexdigest()\n bindex[r[key]] = rh\n return bindex\n\n\ndef compare_index(lefti, righti):\n \"\"\"Compares to indexes and returns dict with all added, removed and changed records\"\"\"\n setl = set(lefti.keys())\n setr = set(righti.keys())\n diffl = setl.difference(setr)\n diffr = setr.difference(setl)\n inter = setl.intersection(setr)\n changed = []\n for i in inter:\n if lefti[i] != righti[i]:\n changed.append(i)\n report = {\"a\": [], \"c\": [], \"d\": []}\n for i in diffr:\n report[\"a\"].append(i)\n for i in diffl:\n report[\"d\"].append(i)\n for i in changed:\n report[\"c\"].append(i)\n report[\"stats\"] = [len(setl), len(report[\"a\"]), len(report[\"c\"]), len(report[\"d\"])]\n return report\n\n\ndef basediff(key, left, right, difftype=\"csv\"):\n \"\"\"Generates diff report between left and right files\"\"\"\n if isinstance(left, io.IOBase):\n leftf = left\n elif difftype == \"csv\":\n leftf = open(left, \"r\", encoding=\"utf8\")\n else:\n leftf = open(left, \"rb\")\n\n if isinstance(right, io.IOBase):\n rightf = right\n elif difftype == \"csv\":\n rightf = open(right, \"r\", encoding=\"utf8\")\n else:\n rightf = open(right, \"rb\")\n\n if difftype == \"csv\":\n lefti = csv_index(key, leftf)\n righti = csv_index(key, rightf)\n elif difftype == \"bson\":\n lefti = bson_index(key, leftf)\n righti = bson_index(key, rightf)\n elif difftype == \"json\":\n lefti = json_index(key, leftf)\n righti = json_index(key, rightf)\n else:\n return None\n report = compare_index(lefti, righti)\n return report\n\n\ndef bsondiff(key, left, right):\n \"\"\"Returns difference between two bson files by selected unique key\"\"\"\n return basediff(key, left, right, difftype=\"bson\")\n\n\ndef jsondiff(key, left, right):\n \"\"\"Returns difference between two json files by selected unique key\"\"\"\n return basediff(key, left, right, difftype=\"json\")\n\n\ndef csvdiff(key, left, right):\n \"\"\"Returns difference between two csv files by selected unique key\"\"\"\n return basediff(key, left, right, difftype=\"csv\")\n\n\ndef arrdiff(key, left, right):\n \"\"\"Returns difference between two arrays of dicts.\n Each array record should be dict with unique id defined in 'key' variable\"\"\"\n lefti = dict_index(key, left)\n righti = dict_index(key, right)\n return compare_index(lefti, righti)\n\n\nif __name__ == \"__main__\":\n import sys\n\n ext = sys.argv[1].rsplit(\".\", 1)[-1]\n if ext == \"bson\":\n report = bsondiff(sys.argv[3], sys.argv[1], sys.argv[2])\n elif ext in [\"json\", \"jsonl\"]:\n report = jsondiff(sys.argv[3], sys.argv[1], sys.argv[2])\n elif ext == \"csv\":\n report = csvdiff(sys.argv[3], sys.argv[1], sys.argv[2])\n else:\n print(\"Wrong file extension: bson, json or csv supported\")\n sys.exit(1)\n stats = {\"a\": len(report[\"a\"]), \"c\": len(report[\"c\"]), \"d\": len(report[\"d\"])}\n\n print(json.dumps(stats, indent=4))\n","repo_name":"datacoon/datadifflib","sub_path":"datadiff/diff.py","file_name":"diff.py","file_ext":"py","file_size_in_byte":4526,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"79"} +{"seq_id":"44547634255","text":"import django_filters\nfrom backend.stores.models import Store\nfrom datetime import datetime\n\n\nclass StoreFilter(django_filters.FilterSet):\n is_open = django_filters.BooleanFilter(method=\"is_open_filter\")\n\n def is_open_filter(self, queryset, name, value):\n if not value:\n return queryset\n datetime_today = datetime.today()\n time = datetime_today.time()\n queryset = queryset.filter(\n open_times__hours_ranges__start_hour__lte=time,\n open_times__hours_ranges__end_hour__gte=time,\n open_times__weekday=datetime_today.weekday()\n ).distinct()\n return queryset\n\n class Meta:\n model = Store\n fields = ['is_open']\n","repo_name":"lchorolque/cocos-minimarket","sub_path":"backend/stores/filters.py","file_name":"filters.py","file_ext":"py","file_size_in_byte":718,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"71974837376","text":"import json\n\ndef handle(value, sapi):\n assert isinstance(value, dict)\n data = value\n\n response = {}\n response_data = {}\n\n success = False\n\n email = data[\"email\"]\n\n functions = sapi.get(email + \"_list_grains\", True)\n\n if functions is not None and functions != \"\":\n #sapi.log(functions)\n functions = json.loads(functions)\n f_list = []\n for i in functions:\n f = sapi.get(email + \"_grain_\" + functions[i], True)\n if f is not None and f != \"\":\n f = json.loads(f)\n if \"modified\" not in f:\n f[\"modified\"] = 0\n\n f_list.append(f)\n\n response_data[\"functions\"] = f_list\n response_data[\"message\"] = \"Found \" + str(len(f_list)) + \" functions.\"\n\n else:\n # no functions yet\n response_data[\"functions\"] = []\n response_data[\"message\"] = \"No functions yet.\"\n\n success = True\n\n if success:\n response[\"status\"] = \"success\"\n else:\n response[\"status\"] = \"failure\"\n\n response[\"data\"] = response_data\n\n sapi.log(json.dumps(response))\n\n return response\n\n","repo_name":"knix-microfunctions/knix","sub_path":"ManagementService/python/getFunctions.py","file_name":"getFunctions.py","file_ext":"py","file_size_in_byte":1137,"program_lang":"python","lang":"en","doc_type":"code","stars":194,"dataset":"github-code","pt":"79"} +{"seq_id":"11090183517","text":"# -*- coding: utf-8 -*-\n# @Time : 18-8-23 下午4:52\n# @Author : HeJi\n# @FileName: wrapup.py\n# @E-mail: hj@jimhe.cn\n\nfrom hsi_bert import HSI_BERT\nimport numpy as np\nimport gc\nimport argparse\nimport scipy.io as scio\nfrom utils import get_train_test, get_coordinates_labels, AA_andEachClassAccuracy\nfrom module import KNN\nfrom dataset import Data_Generator, zeropad_to_max_len\nfrom sklearn import preprocessing\nfrom sklearn.metrics import accuracy_score, classification_report, cohen_kappa_score, confusion_matrix\nimport tensorflow as tf\nimport os\nfrom grammar import Grammar, standartizeData, rotation_and_flip, zmm_random_flip, padWithZeros\nfrom utils import timer\n\nselection_rules = [\"rect 11\"]# \"round 4\", \"round 5\", \"round 6\"]\n\n\ndef get_matrics(y_true, y_pred):\n oa = accuracy_score(y_pred=y_pred, y_true=y_true)\n cm = confusion_matrix(y_pred=y_pred, y_true=y_true)\n apc, aa = AA_andEachClassAccuracy(cm)\n kappa = cohen_kappa_score(y1=y_true, y2=y_pred)\n apc = np.expand_dims(apc, axis=0)\n result = {\"oa\":oa, 'aa':aa, \"k\":kappa, \"apc\":apc}\n return result\n\n\n\ndef get_args():\n parser = argparse.ArgumentParser()\n # 添加参数\n parser.add_argument('--n_epochs', type=int, default=30, help='n_epochs')\n parser.add_argument(\"--max_depth\", type=int, default=2, help=\"max_depth\")\n parser.add_argument('--batch_size', type=int, default=128, help=\"num_batches\")\n parser.add_argument(\"--num_head\", type=int, default=10)\n parser.add_argument(\"--drop_rate\", type=float, default=0.3)\n parser.add_argument(\"--attention_dropout\", type=float, default=0.3)\n parser.add_argument(\"--log_every_n_samples\", type=int, default=5)\n parser.add_argument(\"--save_model\", type=bool, default=True)\n parser.add_argument(\"--save_path\", type=str, default=\"models\")\n parser.add_argument(\"--test_size\",type=float)\n parser.add_argument(\"--start_learning_rate\", type=float, default=3e-4)\n parser.add_argument(\"--dataset\", type=str, default=\"IN\")\n parser.add_argument(\"--prembed\", type=bool, default=True)\n parser.add_argument(\"--prembed_dim\", type=int, default=100)\n parser.add_argument(\"--data_path\", type=str, default=\"data/IN\")\n parser.add_argument(\"--repeat_term\", type=int, default=10)\n parser.add_argument(\"--is_valid\", type=bool, default=False)\n parser.add_argument(\"--limited_num\", type=int)\n parser.add_argument(\"--num_hidden\", type=int, default=200)\n parser.add_argument(\"--masking\", type=bool, default=False)\n parser.add_argument(\"--pooling\", type=bool, default=False)\n parser.add_argument(\"--pool_size\", type=int, default=3)\n parser.add_argument(\"--data_augment\", type = bool, default=False)\n parser.add_argument(\"--max_len\", type=int,default=121)\n parser.add_argument(\"--test_region\", type=str, default=\"rect 11\")\n\n # 如: python xx.py --foo hello > hello\n args = parser.parse_args()\n return args\n\ndef main():\n print(tf.__version__)\n\n arg = get_args()\n\n print(\"arg.is_valid\", arg.is_valid, \"type(arg.is_valid)\", type(arg.is_valid))\n used_labels = None\n if arg.dataset == \"IN\":\n X = scio.loadmat(\"data/Indian_pines_corrected.mat\")[\"indian_pines_corrected\"]\n y = scio.loadmat(\"data/Indian_pines_gt.mat\")[\"indian_pines_gt\"]\n VAL_SIZE = 1025\n used_labels = [1,2,4,7,9,10,11,13]\n elif arg.dataset == \"PU\":\n X = scio.loadmat(\"data/PaviaU.mat\")[\"paviaU\"]\n y = scio.loadmat(\"data/PaviaU_gt.mat\")[\"paviaU_gt\"]\n VAL_SIZE = 4281\n elif arg.dataset == \"KSC\":\n X = scio.loadmat(\"data/KSC.mat\")[\"KSC\"]\n y = scio.loadmat(\"data/KSC_gt.mat\")[\"KSC_gt\"]\n elif arg.dataset == \"Salinas\":\n X = scio.loadmat(\"data/Salinas_corrected.mat\")[\"salinas_corrected\"]\n y = scio.loadmat(\"data/Salinas_gt.mat\")[\"salinas_gt\"]\n elif arg.dataset == \"Houston\":\n X = scio.loadmat(\"data/houston15.mat\")['data']\n mask_train = scio.loadmat(\"data/houston15_mask_train.mat\")[\"mask_train\"]\n mask_test = scio.loadmat(\"data/houston15_mask_test.mat\")[\"mask_test\"]\n\n\n #X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.8)\n #X_train, y_train = oversampleWeakClasses(X_train, y_train)\n X = standartizeData(X)\n #X_train, y_train, X_test, y_test = build_data_v2(X, y)\n\n margin = 6\n X = padWithZeros(X, margin=margin)\n\n if arg.dataset == \"Houston\":\n num_classes = 15\n elif used_labels is not None:\n num_classes = len(used_labels)\n else:\n num_classes = len(np.unique(y)) - 1\n xshape = X.shape[1:]\n aoa = []\n aaa = []\n ak = []\n aapc = []\n for repterm in range(arg.repeat_term):\n\n if arg.dataset != \"Houston\":\n coords, labels = get_coordinates_labels(y)\n # train_coords, test_coords, train_labels, test_labels = train_test_split(coords, labels, test_size=arg.test_size)\n\n train_coords, train_labels, test_coords, test_labels = get_train_test(data=coords, data_labels=labels,\n test_size=arg.test_size,\n limited_num=arg.limited_num,\n used_labels=used_labels)\n\n else:\n train_coords, train_labels = get_coordinates_labels(mask_train)\n test_coords, test_labels = get_coordinates_labels(mask_test)\n train_coords = train_coords + margin\n test_coords = test_coords + margin\n #X_test = Grammar(X, test_coords, method=arg.test_region)\n\n X_train = Grammar(X, train_coords, method=\"rect 11\")\n y_train = train_labels\n y_test = test_labels\n\n if arg.data_augment:\n X_train, y_train = zmm_random_flip(X_train, y_train) # rotation_and_flip(X_train, y_train)\n # X_train, y_train, X_test, y_test = build_data(X, y)\n X_train_shape = X_train.shape\n #X_test_shape = X_test.shape\n if len(X_train_shape) == 4:\n X_train = np.reshape(X_train, [X_train_shape[0], X_train_shape[1] * X_train_shape[2], X_train_shape[3]])\n #X_test = np.reshape(X_test, [X_test_shape[0], X_test_shape[1] * X_test_shape[2], X_test_shape[3]])\n\n #X_test = zeropad_to_max_len(X_test, max_len=arg.max_len)\n X_train = zeropad_to_max_len(X_train, max_len=arg.max_len)\n \n for i in range(num_classes):\n print(\"num train and test in class %d is %d / %d\" % (i, (y_train == i).sum(), (y_test == i).sum()))\n #print(\"num_train\", X_train.shape[0])\n #print(\"num_test\", X_test.shape[0])\n print(\"num_classes\", num_classes)\n\n train_generator = Data_Generator(X, y=y_train, use_coords=train_coords,\n batch_size=arg.batch_size,\n selection_rules=selection_rules,\n shuffle=True, till_end=False,\n max_len=arg.max_len)\n\n\n test_generator = Data_Generator(X, y=y_test, use_coords=test_coords,\n batch_size=1024,\n selection_rules=[arg.test_region]\n ,shuffle=False,\n till_end=True, max_len=arg.max_len)\n\n\n model = HSI_BERT(max_len = arg.max_len,\n n_channel=xshape[-1],\n max_depth=arg.max_depth,\n num_head=arg.num_head,\n num_hidden=arg.num_hidden,\n drop_rate=arg.drop_rate,\n attention_dropout=arg.attention_dropout,\n num_classes = num_classes,\n start_learning_rate=arg.start_learning_rate,\n prembed=arg.prembed,\n prembed_dim=arg.prembed_dim,\n masking=arg.masking,\n pooling=arg.pooling,\n pool_size=arg.pool_size)\n\n model.build()\n\n print(arg)\n\n save_full_path = None\n\n if arg.save_model:\n if not os.path.exists(arg.save_path):\n os.mkdir(arg.save_path)\n save_full_path = arg.save_path+'/'+arg.dataset+\"/model_%d_h%d_d%d\"%(repterm, arg.num_head, arg.max_depth)+'/'+\"model_%d_h%d_d%d.ckpt\"%(repterm, arg.num_head, arg.max_depth)\n model_path = arg.save_path+'/'+arg.dataset+\"/model_%d_h%d_d%d\"%(repterm, arg.num_head, arg.max_depth)\n if not os.path.exists(model_path):\n os.mkdir(model_path)\n np.save(os.path.join(model_path,\"train_coords.npy\"), train_coords - margin)\n np.save(os.path.join(model_path, \"test_coords.npy\"), test_coords - margin)\n #if arg.dataset == \"Salinas\":\n \"\"\"\n print(\"Fitting generator\")\n with timer(\"Fitting Generator Completed\"):\n model.fit_generator(train_generator,\n nb_epochs = arg.n_epochs,\n log_every_n_samples = arg.log_every_n_samples,\n save_path=save_full_path)\n\n #preds = model.predict_from_generator(test_generator)\n \"\"\"\n print(\"Fitting normal data\")\n with timer(\"Fitting Normal Data Completed\"):\n model.fit(X_train, y_train, batch_size=arg.batch_size,\n nb_epochs=arg.n_epochs,\n log_every_n_samples=arg.log_every_n_samples,\n save_path=save_full_path)\n\n with timer(\"Testing\"):\n preds = model.predict_from_generator(test_generator)\n result = get_matrics(y_true=test_labels, y_pred=preds)\n oa = result['oa']\n aa = result[\"aa\"]\n kappa = result[\"k\"]\n apc = result[\"apc\"]\n print(\"oa\", oa)\n print('aa', aa)\n print(\"kappa\", kappa)\n print(\"apc\", apc.flatten())\n\n best_model = HSI_BERT(max_len = arg.max_len,\n n_channel=xshape[-1],\n max_depth=arg.max_depth,\n num_head=arg.num_head,\n num_hidden=arg.num_hidden,\n drop_rate=arg.drop_rate,\n attention_dropout=arg.attention_dropout,\n num_classes = num_classes,\n start_learning_rate=arg.start_learning_rate,\n prembed=arg.prembed,\n prembed_dim=arg.prembed_dim,\n masking=arg.masking,\n pooling=arg.pooling,\n pool_size=arg.pool_size)\n best_model.restore(save_full_path)\n\n #if arg.dataset == \"Salinas\":\n # preds = best_model.predict_from_generator(test_generator)\n #else:\n preds = best_model.predict_from_generator(test_generator)\n result = get_matrics(y_pred=preds, y_true=test_labels)\n oa = result['oa']\n aa = result[\"aa\"]\n kappa = result[\"k\"]\n apc = result[\"apc\"]\n print(\"oa\", oa)\n print('aa', aa)\n print(\"kappa\", kappa)\n print(\"apc\", apc.flatten())\n aoa.append(oa)\n aaa.append(aa)\n ak.append(kappa)\n aapc.append(apc)\n print(classification_report(test_labels, preds))\n aoa = np.array(aoa)\n aaa = np.array(aaa)\n ak = np.array(ak)\n std_aa = np.std(aaa)\n std_oa = np.std(aoa)\n std_ak = np.std(ak)\n aapc = np.concatenate(aapc, axis=0)\n print(\"mean oa\", np.mean(aoa))\n print(\"std_oa\", std_oa)\n print(\"mean aa\", np.mean(aaa))\n print(\"std_aa\", std_aa)\n print(\"mean kappa\", np.mean(ak))\n print(\"sta_ak\", std_ak)\n print(\"maapc\", np.mean(aapc, axis=0))\n print(\"maapc_std\", np.std(aapc, axis=0))\n print(\"below is aapc\")\n print(aapc)\n\nif __name__ ==\"__main__\":\n main()\n","repo_name":"hyperji/HSI_BERT","sub_path":"wrapup.py","file_name":"wrapup.py","file_ext":"py","file_size_in_byte":11972,"program_lang":"python","lang":"en","doc_type":"code","stars":18,"dataset":"github-code","pt":"79"} +{"seq_id":"5781807365","text":"import numpy as np\nfrom sklearn.metrics import make_scorer\nimport rampwf as rw\nfrom sklearn import multioutput\nimport xgboost as xgb\nfrom matplotlib import pyplot\nfrom sklearn.model_selection import GridSearchCV\nfrom sklearn.linear_model.base import BaseEstimator\nfrom problem import get_train_data\n\ndef EM99(y_true, y_pred):\n precision=3\n quant=0.99\n eps=1e-8\n\n if (y_pred < 0).any():\n return float('inf')\n\n ratio_err = np.array([(p + eps) / t for y_hat, y in zip(y_pred, y_true)\n for p, t in zip(y_hat, y) if t != 0])\n # sorted absolute value of mw2dB ratio err\n score = np.percentile(np.abs(10 * np.log10(ratio_err)), 100 * quant)\n return score\n\n\nclass Regressor(BaseEstimator):\n\n def __init__(self, max_depth=2, n_estimators=60, learning_rate=0.1):\n super().__init__()\n self.model = None\n\n self.max_depth = max_depth\n self.n_estimators = n_estimators\n self.learning_rate = learning_rate\n self.model = multioutput.MultiOutputRegressor(xgb.XGBRegressor(n_estimators=self.n_estimators, max_depth=self.max_depth, learning_rate=self.learning_rate))\n\n def fit(self, X, y):\n # Get data and create train data loaders\n X_56 = []\n for sample in X:\n metadata = sample[0]\n inp = sample[1]\n\n metadata_input = [0] * 8 * 3\n for i, j in zip(range(8), range(0, 24, 3)):\n if i >= len(metadata):\n metadata_input[j] = 0\n metadata_input[j+1] = 0\n metadata_input[j+2] = 0\n else:\n metadata_input[j] = 2 if metadata[i][0] == 'EDFA' else 1\n try:\n metadata_input[j+1] = metadata[i][1][0]\n except:\n metadata_input[j+1] = 0\n try:\n metadata_input[j+2] = metadata[i][1][1]\n except:\n metadata_input[j+2] = 0\n\n real_inp = inp + metadata_input\n X_56.append(np.asarray(real_inp))\n\n X_56 = np.asarray(X_56)\n self.model.fit(X_56, y, eval_metric=\"logloss\", verbose=True)\n \n def predict(self, X):\n X_56 = []\n for sample in X:\n metadata = sample[0]\n inp = sample[1]\n\n metadata_input = [0] * 8 * 3\n for i, j in zip(range(8), range(0, 24, 3)):\n if i >= len(metadata):\n metadata_input[j] = 0\n metadata_input[j+1] = 0\n metadata_input[j+2] = 0\n else:\n try:\n metadata_input[j] = 2 if metadata[i][0] == 'EDFA' else 1\n except:\n metadata_input[j] = 0 \n try:\n metadata_input[j+1] = metadata[i][1][0]\n except:\n metadata_input[j+1] = 0\n try:\n metadata_input[j+2] = metadata[i][1][1]\n except:\n metadata_input[j+2] = 0\n\n X_56.append(inp + metadata_input)\n\n X_56 = np.asarray(X_56)\n preds = self.model.predict(X_56)\n preds = preds * (preds > 0)\n return preds\n\n\nparameters = [{\n 'max_depth': [3, 4, 5, 6, 7],\n 'n_estimators': [50, 250, 500, 1000],\n 'learning_rate': [0.03, 0.05, 0.08, 0.1]\n}]\nEM99_score = make_scorer(EM99, greater_is_better=False)\n\n'''\nparameters = [{\n 'max_depth': [3, 4],\n 'learning_rate': [0.03, 0.05]\n}]\n'''\n\ngrid_search = GridSearchCV(\n estimator=Regressor(),\n param_grid=parameters,\n scoring = EM99_score,\n n_jobs = 8,\n cv = 4,\n verbose=True\n)\n\nx_train, y_train = get_train_data()\n\ngrid_search.fit(x_train, y_train)\nprint(grid_search.best_score_)\nprint(grid_search.best_params_)\n\n","repo_name":"gpspelle/huawei-AI","sub_path":"grid_search.py","file_name":"grid_search.py","file_ext":"py","file_size_in_byte":3909,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"73313476094","text":"from application import db, create_app\nfrom models import *\n\nQUESTION_FILE = \"./files/questions.csv\"\n\ndef main():\n user1 = User('x', 123, 'x', True)\n user2 = User('y', 124, 'y', True)\n user3 = User('z', 125, 'z', True)\n\n questions = Question.query.all()\n count = 0\n for question in questions:\n answer1 = Answer(question._id, False, 2)\n answer2 = Answer(question._id, False, 3)\n answer3 = Answer(question._id, True if count % 2 else False, 3)\n\n user1._answers.append(answer1)\n user2._answers.append(answer2)\n user3._answers.append(answer3)\n\n db.session.add(answer1)\n db.session.add(answer2)\n db.session.add(answer3)\n count += 1\n db.session.commit()\n\nif __name__ == \"__main__\":\n import models\n app = create_app()\n main()\n","repo_name":"hilfialkaff/matchmate","sub_path":"add-test-data.py","file_name":"add-test-data.py","file_ext":"py","file_size_in_byte":819,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"39121906871","text":"#coding=utf-8\nimport unittest\nfrom test_mathfunc import TestMathFunc\n\nif __name__ == '__main__':\n suite = unittest.TestSuite()\n tests = [TestMathFunc(\"test_add\"),TestMathFunc(\"test_minus\"),TestMathFunc(\"test_divide\")]\n # 将需要执行的case添加到Test Suite中,没有添加的不会被执行\n suite.addTests(tests)\n\n with open('UnittestTextReport.txt','a') as f:\n runner = unittest.TextTestRunner(stream=f,verbosity=2)\n runner.run(suite)\n\n\n","repo_name":"cuiboautotest/learnpython3","sub_path":"unittest自动化框架/unittest/test_suite.py","file_name":"test_suite.py","file_ext":"py","file_size_in_byte":476,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"26365165506","text":"import logging\nimport time\nimport webbrowser\n\nfrom pyfiglet import Figlet\n\nfrom logic import Logic\nfrom flask import Flask, render_template, request\nfrom turbo_flask import Turbo\nimport sqlite3 as sl\n\ncode = \"\"\nde = \"\"\n\napp = Flask(__name__)\nobj = Logic()\n\n\n@app.route('/', methods=['GET', 'POST'])\ndef index():\n return render_template(\"index.html\", code=code, de=de)\n\n\n@app.route('/encode', methods=['GET', 'POST'])\ndef encode():\n global code\n response = request.args.get(\"q\")\n code = Logic.encode_to_morse(response)\n print(code)\n return \"nothing\"\n\n\n@app.route('/decode')\ndef decode():\n global de\n response = request.args.get(\"q\")\n de = Logic.decode_from_morse(response)\n return \"nothing\"\n\n\nif __name__ == '__main__':\n log = logging.getLogger('werkzeug')\n log.setLevel(logging.ERROR)\n text = Figlet(font=\"digital\")\n print(text.renderText(\"MORSE\"))\n time.sleep(1)\n webbrowser.open(\"http://127.0.0.1:5000\")\n app.run()\n","repo_name":"andrew-leshs/morse","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":968,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"17584906145","text":"# Define your item pipelines here\n#\n# Don't forget to add your pipeline to the ITEM_PIPELINES setting\n# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html\n\n\n# useful for handling different item types with a single interface\nfrom itemadapter import ItemAdapter\nimport pymongo\n# 1.7版本以下导入settings的方法\n# from scrapy.conf import settings\n# 1.7版本以上引入settings的方法\nfrom scrapy.utils.project import get_project_settings\n\n\nclass ZufangSpiderPipeline:\n \"\"\"\n # 存入mongodb\n import pymongo\n\n collection_name = 'scrapy_items'\n\n def __init__(self):\n self.mongo_uri = mongo_uri\n self.mongo_db = mongo_db\n\n @classmethod\n def from_crawler(cls,crawler):\n return cls(\n mongo_uri = crawler.settings.get('MONGO_URI'),\n mongo_db = crawler.settings.get('MONGO_DATABASE', 'items')\n )\n\n def open_spider(self,spider):\n self.client = pymongo.MongoClient(self.mongo_uri)\n self.db = self.client(self.mongo_db)\n\n def close_spider(self, spider):\n self.client.close()\n\n def process_item(self, item, spider):\n self.db[self.collection_name].insert_one(dict(item))\n return item\n\n \"\"\"\n \"\"\"\n # 存入mysql\nclass MysqlPipeline():\n def __init__(self, host, database, user, password, port):\n self.host = host\n self.database = database\n self.user = user \n self.password = password\n self.port = port\n \n @classmethod\n def from_crawler(cls,crawler):\n return cls(\n host = crawler.settings.get('MYSQL_HOST'),\n database = crawler.settings.get('MYSQL_DATABASE'),\n user = crawler.settings.get('MYSQL_USER'),\n password = crawler.settings.get('MYSQL_PASSWORD'),\n port = crawler.settings.get('MYSQL_PORT') \n )\n def open_spider(self, spider):\n self.db = pymysql.connect(self.host, self.user, self.password, self.database, charset='utf8', port=self.port):\n self.cursor = self.db.cursor()\n \n def close_spider(self, spider):\n self.db.close()\n \n def process_item(self, item, spider):\n print(item['title'])\n data = dict(item)\n keys = ','.join(data.keys())\n values = ','.join(['%s'] * len(data))\n sql = 'insert into %s (%s) values (%s)' % (itme.table, keys, values)\n self.cursor.excute(sql,tuple(data.values()))\n self.db.commit()\n return item\n \n \"\"\"\n \"\"\"\n # 去重\nfrom scrapy.exceptions import DropItem\n\nclass DuplicatesPipeline(object):\n def __init__(self):\n self.ids_seen = set()\n \n def process_item(self, item, spider):\n if item['id'] in self.ids_seen:\n raise DropItem(\"Duplicate item found:%s\" % item)\n else:\n self.ids_seen.add(item['id'])\n return item\n \n \"\"\"\n\n def __init__(self):\n settings = get_project_settings()\n host = settings['MONGODB_HOST']\n port = settings['MONGODB_PORT']\n db_name = settings['MONGODB_DBNAME']\n client = pymongo.MongoClient(host=host, port=port)\n db = client[db_name]\n self.post = db[settings['MONGODB_DOCNAME']]\n\n # 数据持久化的操作\n def process_item(self, item, spider):\n zufang = dict(item)\n self.post.insert(zufang)\n return item\n","repo_name":"Jonescy/Scrapy","sub_path":"zufang_spider/zufang_spider/pipelines.py","file_name":"pipelines.py","file_ext":"py","file_size_in_byte":3360,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"79"} +{"seq_id":"74435454336","text":"import numpy as np\nimport pickle\nimport glob\nimport ipdb\nimport glob\nst = ipdb.set_trace\ndata_mod = \"bb\"\ntake_glob = True\nnum_boxes = []\nname_classes = []\nif take_glob:\n\troot_location = \"/projects/katefgroup/datasets/\"\n\tfolder_name = f\"{root_location}/replica_processed/npy/{data_mod}/*\"\t\n\t# txt_file_train = f\"{root_location}/replica_processed/npy/{data_mod}t.txt\"\n\tfile_list = glob.glob(folder_name)\n\tall_classes = []\n\tfor file in file_list:\n\t\tpickled_file = pickle.load(open(file,\"rb\"))\n\t\tbbox_origin = pickled_file[\"bbox_origin\"]\n\t\tclasses = pickled_file['object_category_names']\n\t\tall_classes = all_classes + classes\n\t\tnum_bbox_origin = len(pickled_file[\"bbox_origin\"])\n\t\tcategory_names = pickled_file[\"object_category_names\"]\n\t\tnum_boxes.append(num_bbox_origin)\n\t\tname_classes.append(category_names)\n\tnum_boxes = np.array(num_boxes)\n\tname_classes = np.array(name_classes)\n\tbox_ind = np.argmax(num_boxes)\n\tunique_classes = set(all_classes)\n\tprint(unique_classes,len(unique_classes))\n\tprint(name_classes[box_ind])\n\tprint(np.max(num_boxes))\n\t# st()\n\t# print(max(num_boxes))\n\t# pickled_file[]","repo_name":"HARPLab/gastronomy","sub_path":"perception/pytorch_disco/scripts/get_max_objects.py","file_name":"get_max_objects.py","file_ext":"py","file_size_in_byte":1095,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"79"} +{"seq_id":"28242920175","text":"from django.shortcuts import render, redirect\nfrom .forms import FoodForm\nfrom .models import Food\n# from django.contrib.auth.forms import UserCreationForm\n# from django.contrib.auth import authenticate,login\n\nfrom django.contrib import messages\n\ndef add_food(request):\n if request.method == 'POST':\n form = FoodForm(request.POST)\n if form.is_valid():\n form.save()\n return redirect('food_list')\n else:\n form = FoodForm()\n return render(request, 'trekar/add_food.html', {'form' : form})\n\ndef food_list(request):\n foods = Food.objects.all()\n return render(request, 'trekar/food_list.html', {'foods': foods})\n\ndef edit_food(request,food_id):\n food = Food.objects.get(id = food_id)\n if request.method == 'POST':\n form = FoodForm(request.POST,instance=food)\n if form.is_valid():\n form.save()\n return redirect('food_list')\n else:\n form = FoodForm()\n return render(request, 'trekar/edit_food.html', {'form' : form})\n\ndef delete_food(request, food_id):\n food = Food.objects.get(id = food_id)\n food.delete()\n return redirect('food_list')\n\n# def register(request):\n# if request.method == 'POST':\n# form = UserCreationForm(request.POST)\n# print(\"hello\")\n# if form.is_valid():\n# form.save()\n# Username=form.cleaned_data.get('username')\n# messages.success(request, f'account created for a {Username}')\n# return redirect('Login')\n# else:\n# form=UserCreationForm()\n# return render(request,'registrastion/register.html',{'form':form})\n \n# def user_login(request):\n# if request.method == 'POST':\n# form=LoginForm(request.POST)\n# if form.is_valid():\n# form.save()\n# username=form.cleaned_data['username']\n# password=form.cleaned_data['password']\n# user=authenticate(username=username,password=password)\n# if user is not None :\n# login(request,user)\n# return redirect('food_list')\n# else:\n# form.add_error(None,{'error':'not correct password'})\n# else:\n# form=LoginForm\n# return render(request,'registrastion/user_login.html',{'form':'form'})\n\n\n \n\n\n","repo_name":"diptidudhat778/calory_tracker","sub_path":"app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2290,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"12302647218","text":"import requests\nfrom bs4 import BeautifulSoup\nfrom sqlalchemy import create_engine\nfrom sqlalchemy.orm import sessionmaker\nfrom pacfree_searcher.constants import STATES, DATES_SOURCE\nfrom pacfree_searcher.models import Base, PrimaryDate\n\n\nclass DatesParser():\n \"\"\"Handles the details of parsing the election dates and adding them to\n the database.\"\"\"\n def __init__(self):\n \"\"\"Sets up the connection to the database and calls `get_dates`\"\"\"\n engine = create_engine('sqlite:///dates.db')\n Base.metadata.bind = engine\n self.db = sessionmaker(bind=engine)()\n self.get_dates()\n\n\n def get_dates(self):\n \"\"\"Finds all primary dates and calls `add_date` to add them to the\n database.\"\"\"\n resp = requests.get(DATES_SOURCE)\n html = BeautifulSoup(resp.text, 'lxml')\n data = html.find('table').find_all('td')\n state, date = (None, None)\n for i in range(0, len(data)):\n if i % 2 == 0:\n state = data[i].string\n assert date is None\n continue\n date = data[i].string\n entry = {\n 'state': state,\n 'abbr': STATES[state],\n 'date': date\n }\n self.add_entry(entry)\n date = None\n\n\n def add_entry(self, entry):\n \"\"\"Add primary date entry to the database.\"\"\"\n self.db.add(PrimaryDate(**entry))\n self.db.commit()\n\n\n\nDatesParser()\n","repo_name":"eicksl/PAC-Free-Searcher","sub_path":"dates.py","file_name":"dates.py","file_ext":"py","file_size_in_byte":1478,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"79"} +{"seq_id":"17989807564","text":"# Persians: Sydney Anderson, Tram Doan, Devon Knudsen, Zackary Phillips, Promyse Ward, James Wilson\r\n# GitHub Repo: https://github.com/devonknudsen/Vigenere-Cipher\r\n# Written in Python 3.7\r\n\r\nimport sys\r\nimport enum\r\nimport string\r\n\r\n# encrypts pain text to cipher text\r\ndef Cipher(plainText, key):\r\n P = list(plainText)\r\n K = list(key)\r\n C = [] * len(plainText)\r\n x = 0\r\n \r\n # iterate through each index in the plain text\r\n for i in range(0, len(P)):\r\n \r\n # x is used to track and repeat key\r\n # when x reaches the key's length, it resets\r\n if (x == len(K)):\r\n x = 0\r\n \r\n # checks if the letter of the plain text is an alphabetic character\r\n # if true: find and add integer value\r\n if (P[i].isalpha()):\r\n Pi = ASCIIValue(P[i])\r\n Ki = ASCIIValue(K[x])\r\n c_int = (Pi + Ki)%26\r\n x+= 1\r\n \r\n # else: add symbol/numerical value\r\n else:\r\n c_int = P[i]\r\n\r\n C.append(c_int)\r\n\r\n return C\r\n \r\n# decrypts cipher text to plain text\r\ndef Decipher(cipherText, key):\r\n C = list(cipherText)\r\n K = list(key)\r\n P = [] * len(cipherText)\r\n x = 0\r\n\r\n # iterate through each index in the cipher text\r\n for i in range(0, len(C)):\r\n \r\n # x is used to track and repeat key\r\n # when x reaches the key's length, it resets\r\n if (x == len(K)):\r\n x = 0\r\n \r\n # checks if the letter of the cipher text is an alphabetic character\r\n # if true: find and add integer value \r\n if (C[i].isalpha()):\r\n Ci = ASCIIValue(C[i])\r\n Ki = ASCIIValue(K[x])\r\n p_int = (Ci - Ki + 26)%26\r\n x += 1\r\n \r\n # else: add symbol/plainTextList value\r\n else:\r\n p_int = C[i]\r\n\r\n P.append(p_int)\r\n\r\n return P\r\n\r\n# returns the number value of a letter\r\ndef ASCIIValue(letter):\r\n\r\n # if the letter is lowercase, get lowercase alphabet\r\n if letter.islower():\r\n alphabet = list(string.ascii_lowercase)\r\n \r\n # else, get uppercase alphabet\r\n else:\r\n alphabet = list(string.ascii_uppercase)\r\n \r\n # find letter in alphabet\r\n for i in range(len(alphabet)):\r\n if (alphabet[i] == letter):\r\n return i\r\n \r\n# returns the letter value of a number\r\ndef CharValue(int, case):\r\n\r\n # if the letter was once uppercase, get lowercase alphabet\r\n if case == True:\r\n alphabet = list(string.ascii_lowercase)\r\n \r\n # else, get uppercase alphabet\r\n else:\r\n alphabet = list(string.ascii_uppercase)\r\n \r\n # return letter in alphabet\r\n return alphabet[int]\r\n\r\n\r\n# MAIN CODE #\r\n\r\n# remove spaces from key (can also be done in cipher/decipher functions)\r\nkey = sys.argv[2]\r\nkey = key.replace(\" \", \"\")\r\n\r\n# if want to encrypt a plain text\r\nif (sys.argv[1] == \"-e\"):\r\n \r\n while(True):\r\n\r\n # used try, except to prevent keyboardInterruption error thrown\r\n try:\r\n cipherText = \"\"\r\n plainText = input()\r\n plainTextList = list(plainText)\r\n \r\n # encipher plain text into a list of integers\r\n intList = Cipher(plainText, key)\r\n \r\n \r\n # convert each integer into it's mapped letter of the alphabet\r\n for i in range(0, len(intList)):\r\n \r\n # check if the object is an integer\r\n # if so, convert to letter\r\n if(isinstance(intList[i], int)):\r\n cipherText += str(CharValue(intList[i], plainTextList[i].islower()))\r\n \r\n # else, add symbol/numerical value\r\n else: \r\n cipherText += intList[i]\r\n\r\n print(cipherText)\r\n \r\n except KeyboardInterrupt:\r\n # this will exit if the user does ^d, or ^z, ^c, or whatever their system's exit is\r\n sys.exit(1)\r\n\r\n # this will catch the EOF error (EOF - End of file)\r\n # we're getting this error because we're not using stdin \r\n except EOFError:\r\n break\r\n\r\n \r\n# if want to decrypt a cipher text \r\nelif (sys.argv[1] == \"-d\"):\r\n while(True):\r\n # used try, except to prevent keyboardInterruption error thrown\r\n try:\r\n plainText = \"\"\r\n cipherText = input()\r\n plainTextList = list(cipherText)\r\n \r\n # decipher cipher text into a list of integers\r\n intList = Decipher(cipherText, key)\r\n \r\n # convert each integer into it's mapped letter of the alphabet\r\n for i in range(0, len(intList)):\r\n \r\n # check if the object is an integer\r\n # if so, convert to letter\r\n if(isinstance(intList[i], int)):\r\n plainText += str(CharValue(intList[i], plainTextList[i].islower()))\r\n \r\n # else, add symbol/numerical value\r\n else:\r\n plainText += intList[i\r\n]\r\n print(plainText)\r\n\r\n except KeyboardInterrupt:\r\n # this will exit if the user does ^d, or ^z, ^c, or whatever their system's exit is\r\n sys.exit(1)\r\n\r\n # this will catch the EOF error (EOF - End of file)\r\n # we're getting this error because we're not using stdin \r\n except EOFError:\r\n break\r\n","repo_name":"devonknudsen/Vigenere-Cipher","sub_path":"Vigenere.py","file_name":"Vigenere.py","file_ext":"py","file_size_in_byte":5510,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"79"} +{"seq_id":"8985196578","text":"from django.urls import path\n\nfrom django.contrib.auth.views import (\n LogoutView,\n LoginView,\n)\n\napp_name = 'users'\n\nurlpatterns = [\n path('logout/',\n LogoutView.as_view(\n template_name='users/logged_out.html',\n extra_context={'title': 'Вы вышли из своей учётной записи'}\n ),\n name='logout'),\n path(\n 'login/',\n LoginView.as_view(\n template_name='users/login.html',\n extra_context={'title': 'Авторизация'}\n ),\n name='login'),\n]\n","repo_name":"draft-pick/sector","sub_path":"users/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":577,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"40971604477","text":"import unittest\n\nfrom app import app\n\n\nclass TestPost(unittest.TestCase):\n def test_post(self):\n\n self.test_app = app.test_client()\n\n response = self.test_app.get('/', content_type='html/text')\n self.assertEqual(response.status_code, 200)\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"p0bailey/docker-flask","sub_path":"app/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":311,"program_lang":"python","lang":"en","doc_type":"code","stars":117,"dataset":"github-code","pt":"77"} +{"seq_id":"43709844317","text":"# Write a Python program to find the list of words that\r\n# are longer than n from a given list of words\r\n\r\ntxt = \"The quick brown fox jumps over the lazy dog\"\r\nx = txt.split()\r\nb = []\r\nfor i in x:\r\n if len(i) > 3:\r\n b.append(i)\r\nprint(b)","repo_name":"mumbikernikhil/Python-Imp-Codes","sub_path":"Python Programs/List/17. print words longer than 3.py","file_name":"17. print words longer than 3.py","file_ext":"py","file_size_in_byte":247,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"29311759249","text":"from random import randint\nnr = randint(1,20)\ni = 0\nwhile i < 5:\n perdiste = True\n n = int(input(\"Ingresa el numero: \"))\n if n > nr:\n print(\"Mi numero es mayor\")\n i += 1\n elif n < nr:\n print(\"Mi numero es menor\")\n i += 1\n elif n == nr:\n print(\"Adivinaste, mi numero era \", nr)\n perdiste = False\n break\nif perdiste:\n print(\"No adivinaste, mi numero era \", nr)","repo_name":"pabloschwarzenberg/grader","sub_path":"hito1_ej12/hito1_ej12_bbeac069d5a6da8d2f03ac31eb1b4d49.py","file_name":"hito1_ej12_bbeac069d5a6da8d2f03ac31eb1b4d49.py","file_ext":"py","file_size_in_byte":425,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"6581229658","text":"\"\"\"learning URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/3.2/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.urls import include, path\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\n\"\"\"\nfrom django.contrib import admin\nfrom django.urls import path\n\nfrom app.views import HomePageView, HowToView, TestView, TestRightView, TestWrongView, generate_simple_view, generate_odnorodn_view, HowTo2View, generate_quadratic_view, HowTo3View\n\nurlpatterns = [\n path('admin/', admin.site.urls),\n path('', HomePageView.as_view(), name='home'),\n path('howto', HowToView.as_view(), name='howto'),\n path('howto2', HowTo2View.as_view(), name='howto2'),\n path('howto3', HowTo3View.as_view(), name='howto3'),\n path('test/', TestView.as_view(), name='test'),\n path('test//right', TestRightView.as_view(), name='right'),\n path('test//wrong', TestWrongView.as_view(), name='wrong'),\n path('test/random', TestRandomView.as_view(), name='random'),\n path('generate-simple', generate_simple_view, name='generate_simple'),\n path('generate-odnorodn', generate_odnorodn_view, name='generate_odnorodn'),\n path('generate-quadratic', generate_quadratic_view, name='generate_quadratic'),\n]\n","repo_name":"IlyaHubnester/learning-trig","sub_path":"learning/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1685,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"75176949369","text":"\r\ndef palindrome(m): #definition de la fonction\r\n i=0 #definition d'un compteur\r\n longueur = len(m) #definition d'une variable contenant comme valeur la longueur du mot\r\n\r\n while i < longueur: #boucle while pour comparer les lettres du mot\r\n if m[i] != m[-i - 1]: #condition si la lettre egal a i est differente de la lettre opposer alors renvoie comme valeur false\r\n return False\r\n i += 1 #incrementation du compteur\r\n return True #si boucle while complete la fonction renvoi True\r\n\r\nmot=input('entrer un mot : ') #demande un mot a l'utillisateur\r\nif palindrome(mot): #test du mot dans la fonction palindrome definie plus haut\r\n print(\"Votre mot est un palindrome.\") #si la fonction renvoie vrai alors on affiche que le mot est un palindrome\r\nelse: #sinon on affiche que le mot n'est pas un palindrome\r\n print(\"Votre mot n'est pas palindrome.\")","repo_name":"Theo-Remy/pythonexe","sub_path":"palindrome.py","file_name":"palindrome.py","file_ext":"py","file_size_in_byte":888,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"5831063972","text":"from menu import MENU\n\n# Start of Function section\n\n\ndef userChoice():\n \"\"\"This is a function which checks what user entered to the machine.\n And returns the input string.\"\"\"\n\n UserChoice = input(\"What would you like? (espresso/latte/cappuccino): \")\n\n while (UserChoice != \"espresso\") and (UserChoice != \"latte\") and (UserChoice != \"cappuccino\") and (UserChoice != \"report\") and (UserChoice != \"off\"):\n UserChoice = input(\n \"What would you like? (espresso/latte/cappuccino): \")\n\n return UserChoice\n\n\ndef coinChecker(Coin, TypeOFCoin):\n \"\"\"This function is a support function to userCoins() function,\n Witch checks if the input of the user is valid for the system.\n And returns the amount of the coin.\"\"\"\n\n Coin = input(f\"How many {TypeOFCoin}?: \")\n\n while not Coin.isdigit():\n Coin = input(f\"How many {TypeOFCoin}?: \")\n\n return int(Coin)\n\n\ndef userCoins():\n \"\"\"This function returns a tuple of the amount of Coins the the user inserted into\n the machine.\"\"\"\n Quarters = 0\n Dimes = 0\n Nickles = 0\n Pennies = 0\n print(\"Please insert coins.\")\n Quarters = coinChecker(Quarters, \"Quarters\")\n Dimes = coinChecker(Dimes, \"Dimes\")\n Nickles = coinChecker(Nickles, \"Nickles\")\n Pennies = coinChecker(Pennies, \"Pennies\")\n\n return Quarters, Dimes, Nickles, Pennies\n\n\ndef suppliesCheck(MENU, UserChoice, Water, Milk, Coffee):\n \"\"\"This function checks if the machine has sufficient amount of each of the\n supplies and returns False when it finds that a supply is not sufficient for the current\n picked item.\"\"\"\n\n for ingredient in MENU[UserChoice][\"ingredients\"]:\n if ((Water - (MENU[UserChoice]['ingredients'][ingredient])) < 0):\n print(f\"Sorry there is not enough {ingredient}\")\n return False\n elif ((Milk - (MENU[UserChoice]['ingredients'][ingredient])) < 0):\n print(f\"Sorry there is not enough {ingredient}\")\n return False\n elif ((Coffee - (MENU[UserChoice]['ingredients'][ingredient])) < 0):\n print(f\"Sorry there is not enough {ingredient}\")\n return False\n\n return True\n\n\ndef coffeeMaking(UserChoice, Menu, Water, Milk, Coffee):\n \"\"\"This function creates the coffee and removes the amount of each resource\n depending what the user choose\"\"\"\n\n for ingredient in Menu[UserChoice][\"ingredients\"]:\n if ingredient == \"water\":\n Water = Water - Menu[UserChoice][\"ingredients\"][\"water\"]\n elif ingredient == \"milk\":\n Milk = Milk - Menu[UserChoice][\"ingredients\"][\"milk\"]\n else:\n Coffee = Coffee - Menu[UserChoice][\"ingredients\"][\"coffee\"]\n\n return Water, Milk, Coffee\n\n\ndef moneyChecking(Money, UserCoins, UserChoice, CoinValues, MENU):\n \"\"\"In this function checks if the user provided sufficient amount of\n money to pay\"\"\"\n UserChange = 0\n UserMoney = 0\n\n for coin in UserCoins:\n for coinValue in CoinValues:\n UserMoney = UserMoney + (coin * CoinValues[coinValue])\n\n UserChange = change(UserMoney, MENU, UserChoice)\n\n if UserChange == False:\n return False\n else:\n if UserChange == 0:\n print(f\"Here is you {UserChoice} ☕ Enjoy!\")\n return Money + MENU[UserChoice][\"cost\"]\n else:\n print(\n f\"Here is ${UserChange} in change\\nHere is you {UserChoice} ☕ Enjoy!\")\n return Money + MENU[UserChoice][\"cost\"]\n\n\ndef change(UserMoney, MENU, UserChoice):\n \"\"\"In this function checks if the user needs any change back\"\"\"\n\n UserChange = 0\n for coffeeType in MENU:\n if (coffeeType == UserChoice):\n if ((MENU[coffeeType][\"cost\"]) < UserMoney):\n UserChange = UserMoney - (MENU[coffeeType][\"cost\"])\n return UserChange\n elif ((MENU[coffeeType][\"cost\"]) > UserMoney):\n return False\n else:\n return 0\n\n\ndef adminOutput(Water, Milk, Coffee, Money):\n\n print(\n f\"Water: {Water}ml\\nMilk: {Milk}ml\\nCoffee: {Coffee}g\\nMoney: ${Money}\")\n\n\ndef adminCheck(AdminValues):\n \"\"\"In this function check the credential of admin\"\"\"\n\n print(\"You have two chances\")\n Username = input(\"Username: \")\n Password = input(\"Password: \")\n\n for value in AdminValues:\n if (AdminValues[value] == Username) and (AdminValues[value] == Password):\n print(f\"{value} Ok\")\n else:\n return False\n return True\n\n\n # End of Function section\nWater = 600\nMilk = 600\nCoffee = 600\nMachineMoney = 0\nMachineWorks = True\n\n# Coins\nCoinValues = {\"Quarters\": 0.25,\n \"Dimes\": 0.10,\n \"Nickles\": 0.15,\n \"Pennies\": 0.25}\n\n# Admin Values\nAdmin = {\"Username\": \"admin\",\n \"Password\": \"admin\",\n }\n\nwhile MachineWorks:\n UserChoice = userChoice()\n\n if (UserChoice == \"off\") or (UserChoice == \"report\"):\n if (adminCheck(Admin) == True):\n\n if (UserChoice == \"report\"):\n adminOutput(Water, Milk, Coffee, MachineMoney)\n elif (UserChoice == \"off\"):\n MachineWorks = False\n else:\n # espresso / latte / cappuccino\n if (suppliesCheck(MENU, UserChoice, Water, Milk, Coffee) == True):\n\n CheckMoney = moneyChecking(\n MachineMoney, userCoins(), UserChoice, CoinValues, MENU)\n if CheckMoney == False:\n print(\"Not enough money\")\n continue\n else:\n Water = coffeeMaking(UserChoice, MENU, Water, Milk, Coffee)[0]\n Milk = coffeeMaking(UserChoice, MENU, Water, Milk, Coffee)[1]\n Coffee = coffeeMaking(UserChoice, MENU, Water, Milk, Coffee)[2]\n\n MachineMoney = MachineMoney + float(CheckMoney)\n","repo_name":"sifisKoen/Pi-Playground","sub_path":"CoffeeMachine/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5808,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"73050079929","text":"#!/usr/bin/env python\nimport os\nimport sys\n\n\nfrom torch.autograd import Variable\nfrom torch.utils.data import Dataset, DataLoader\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence\n\nimport nltk\nnltk.download('punkt')\nimport time\nimport pandas as pd\nimport pickle\n\n\n# In[101]:\n\n\n#make torch\ndef make_variables(sentences, label,vocabulary):\n \n final_sentences = []\n \n #tokenizing\n for index, sentence in enumerate(sentences):\n final_sentences.append(nltk.word_tokenize(sentence))\n \n #indexing\n for index, sentence in enumerate(final_sentences):\n final_sentences[index]=[vocabulary[word] for word in sentence]\n\n #각자의 seq_length 구하기 (미니배치별로 진행)\n seq_lengths = []\n for sentence in final_sentences:\n seq_lengths.append(len(sentence))\n seq_lengths = torch.LongTensor(seq_lengths)\n\n# print(\"패딩전\")\n# print(final_sentences[:5])\n# print(seq_lengths[:5])\n# print(label[:5])\n return padding_tensor_sorting(final_sentences,seq_lengths,label)\n\n\n# In[102]:\n\n\ndef padding_tensor_sorting(sentences, seq_lengths, label):\n seq_tensor = torch.zeros((len(sentences), seq_lengths.max())).long()\n print('seq_tensor(max):' , seq_lengths.max())\n for idx, (seq, seq_len) in enumerate(zip(sentences, seq_lengths)):\n seq_tensor[idx, :seq_len] = torch.LongTensor(seq)\n \n if len(seq_tensor) !=1:\n seq_lengths, perm_idx = seq_lengths.sort(0,descending=True)\n seq_tensor = seq_tensor[perm_idx]\n \n \n target = torch.tensor(label, dtype=torch.long)\n \n if len(seq_tensor) !=1 and len(target)!=0:\n target = target[perm_idx]\n \n# print(\"패딩후\")\n# print(seq_tensor[:5])\n# print(seq_lengths[:5])\n# print(target[:5])\n return create_variable(seq_tensor), create_variable(seq_lengths), create_variable(target)\n\n\n# In[103]:\n\n\ndef create_variable(tensor):\n #tensor를 gpu 이용 가능한지\n if torch.cuda.is_available():\n return Variable(tensor.cuda())\n else:\n return Variable(tensor)\n\n\n# In[104]:\n\n\ndef pre_process_test(sentence,a1,a2,a3,a4):\n\n raw_data = []\n sentences = []\n \n temp =[]\n temp.append(sentence)\n temp.append(a1)\n temp.append(a2)\n temp.append(a3)\n temp.append(a4)\n raw_data.append(temp)\n \n print(\"총 문제 개수: \", len(raw_data))\n print(\"총 문장 개수: \", len(raw_data)*4)\n for row_index, row in enumerate(raw_data):\n row_sentence = []\n row_label = []\n hype1 = 0\n hype2 = 0\n for item_index, item in enumerate(row):\n # .뒤에 나오는 것 다 없애기 .이 여러 개 있을지 모르니 마지막 .을 이용하기. 두 문장인 경우 .과 ?과 !이 존재\n # 특수문자 앞에 공백으로 하기\n # 공백 없애기\n # 공백 두개, 세개 -> 한개로 바꾸기\n # _____,----- 연달아 있을시 index 찾아 양쪽 공백 만들기\n # 소문자로 바꾸기\n\n if item_index == 0:\n #\n index_list = []\n index_of_dot = item.rfind('.')\n index_of_question = item.rfind('?')\n index_of_surprise = item.rfind('!')\n index_list.append(index_of_dot)\n index_list.append(index_of_question)\n index_list.append(index_of_surprise)\n standard = max(index_list)+1\n if standard >0:\n item = item[:standard] \n\n #\n item = item.strip()\n #\n item = item.replace(\" \", \" \")\n item = item.replace(\" \", \" \")\n\n #\n index_of_hype1 = item.find('__')\n index_of_hype2 = item.find('--')\n index_of_hype3 = item.rfind('__')\n index_of_hype4 = item.rfind('--')\n if index_of_hype1 > index_of_hype2:\n if index_of_hype3 > index_of_hype4:\n hype1 = index_of_hype1\n hype2 = index_of_hype3+1\n elif index_of_hype2 > index_of_hype1:\n if index_of_hype4 > index_of_hype3:\n hype1 = index_of_hype2\n hype2 = index_of_hype4+1\n\n #----- => hype1 = 0 , hype2 = 4\n if hype1 == 0:\n if (hype2+1) < len(item):\n if item[hype2+1] != ' ':\n item = item[:hype2+1] + ' ' + item[hype2+1:]\n else:\n if item[hype1-1] != ' ':\n item = item[:hype1] + ' ' + item[hype1:]\n hype1 = hype1+1\n hype2 = hype2+1\n if (hype2+1) < len(item):\n if item[hype2+1] != ' ':\n item = item[:hype2+1] + ' ' + item[hype2+1:]\n #\n item = item.lower()\n # 문장이 아닐 때\n else:\n item = item.strip()\n raw_data[row_index][item_index] = item\n #sentence 4개 만들기\n row[0] = row[0].replace(row[0][hype1:hype2+1],\"\")\n for i in range(1,5):\n sentence = row[0][:hype1] + row[i] + row[0][hype1:]\n row_sentence.append(sentence)\n\n sentences.append(row_sentence)\n\n sentences= np.array(sentences)\n sentences= sentences.flatten()\n sentences = sentences.tolist()\n \n return sentences\n\nclass myModel(nn.Module):\n def __init__(self, n_layers, hidden_dim, n_vocab, embed_dim, n_classes, bidirectional=True, dropout_p=0.2):\n super(myModel, self).__init__()\n\n self.n_layers = n_layers\n self.hidden_dim = hidden_dim\n self.n_vocab = n_vocab\n self.embed_dim = embed_dim\n self.n_classes = n_classes\n self.n_directions = int(bidirectional) + 1\n\n self.embed = nn.Embedding(self.n_vocab, self.embed_dim)\n self.dropout = nn.Dropout(dropout_p)\n # self.lstm = nn.LSTM(self.embed_dim, self.hidden_dim,\n # num_layers=self.n_layers,\n # dropout=dropout_p,\n # batch_first=True)\n self.lstm = nn.GRU(self.embed_dim, self.hidden_dim,\n self.n_layers,\n dropout=dropout_p,\n batch_first=True)\n # self.relu = nn.ReLU()\n self.out = nn.Linear(self.hidden_dim, self.n_classes)\n\n # self.fc1 = nn.Linear(self.hidden_dim, 50)\n # self.fc2 = nn.Linear(50, self.n_classes)\n def forward(self, x, seq_lengths):\n x = x.t()\n sen_len = x.size(0)\n batch_size = x.size(1)\n print(self.n_layers, self.hidden_dim, self.n_vocab, self.embed_dim, self.n_classes, self.n_directions)\n\n print(x)\n embedded = self.embed(x)\n\n # print(embedded)\n lstm_input = pack_padded_sequence(embedded, seq_lengths.data.cpu().numpy())\n self.hidden = self._init_hidden(batch_size)\n self.lstm.flatten_parameters()\n\n lstm_out, self.hidden = self.lstm(lstm_input)\n lstm_out, lengths = pad_packed_sequence(lstm_out)\n\n # h_t = self.dropout(self.hidden[-1])\n # logit = self.out(h_t[-1])\n logit = self.out(self.hidden[-1])\n # print(logit)\n return logit\n\n def _init_hidden(self, batch_size):\n hidden = torch.zeros((self.n_layers, self.n_directions,\n batch_size, self.hidden_dim))\n return create_variable(hidden)\n def change(vocabsize):\n self.n_vocab = vocabsize\n self.embed = nn.Embedding(self.n_vocab,self.embed_dim)\n\n\nif __name__ == '__main__':\n os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'Micro.settings')\n try:\n from django.core.management import execute_from_command_line\n except ImportError as exc:\n raise ImportError(\n \"Couldn't import Django. Are you sure it's installed and \"\n \"available on your PYTHONPATH environment variable? Did you \"\n \"forget to activate a virtual environment?\"\n ) from exc\n execute_from_command_line(sys.argv)\n","repo_name":"Hwan-seok/Toeic_helper","sub_path":"Micro_Server_django/Micro/manage.py","file_name":"manage.py","file_ext":"py","file_size_in_byte":8431,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"77"} +{"seq_id":"22616779528","text":"import os\nfrom setuptools import setup, Extension\nfrom bard import __version__\n\n\nBOOST_PYTHON_LIB = os.getenv('BOOST_PYTHON_LIB', 'boost_python-py3')\n\nextra_compile_args = ['-std=gnu++17', '-fopenmp', '-Ofast', '-mtune=native',\n '-funroll-loops']\nmachine = os.uname().machine\nif machine not in ['ppc64', 'ppc64le']:\n extra_compile_args.append('-march=native')\n\nbard_ext = Extension('bard.bard_ext',\n define_macros=[('MAJOR_VERSION', '1'),\n ('MINOR_VERSION', '0'),\n ('_GLIBCXX_PARALLEL', None)],\n include_dirs=['/usr/include/boost'],\n libraries=[BOOST_PYTHON_LIB, 'gomp'],\n sources=['bard/bard_ext.cpp'],\n extra_compile_args=extra_compile_args + ['-fopenmp'])\n\n\nbard_audiofile = Extension('bard.bard_audiofile',\n define_macros=[('MAJOR_VERSION', '1'),\n ('MINOR_VERSION', '0'),\n ('_GLIBCXX_PARALLEL', None)],\n include_dirs=['/usr/include/boost',\n '/usr/include/ffmpeg'],\n libraries=[BOOST_PYTHON_LIB, 'avcodec',\n 'avformat', 'avutil', 'swresample'],\n sources=['bard/audiofile/audiofile.cpp',\n 'bard/audiofile/bufferaviocontext.cpp',\n 'bard/audiofile/bufferdecodeoutput.cpp',\n 'bard/audiofile/decodeoutput.cpp',\n 'bard/audiofile/filedecodeoutput.cpp',\n 'bard/audiofile/referencedata.cpp',\n 'bard/audiofile/pyaudiofile.cpp'],\n extra_compile_args=extra_compile_args)\nsetup(\n name=\"bard\",\n version=__version__,\n author=\"Antonio Larrosa\",\n author_email=\"larrosa@kde.org\",\n packages=[\"bard\"],\n include_package_data=True,\n url=\"https://github.com/antlarr/bard\",\n description=\"Bard Music Manager - A database to manage your music, \"\n \"find duplicates and fix tags\",\n long_description=\"Bard is a music manager that uses a database to store \"\n \"all information about your music: location, tags, \"\n \"properties, audio analysis, etc. Bard can also find \"\n \"audio duplicates (not using tags, but the audio itself) \"\n \"and run different kind of queries on the database.\",\n python_requires=\">=3.5\",\n install_requires=[\n \"pyacoustid\",\n \"mutagen\",\n \"Pillow\",\n \"pydub\",\n \"numpy\",\n \"dbus-python\",\n \"SQLAlchemy >= 2.0\",\n \"SQLAlchemy-Utils\",\n \"alembic\",\n \"Werkzeug\",\n \"Flask\",\n \"Flask-Cors\",\n \"Flask-Login\",\n \"Jinja2\",\n \"bcrypt\",\n \"paramiko\",\n \"importlib_resources; python_version < '3.7'\"\n ],\n data_files=[('share/doc/packages/bard/',\n ['README.md'])],\n scripts=[\"scripts/bard\"],\n license=\"GPL-3.0-only AND MIT\",\n ext_modules=[bard_ext, bard_audiofile],\n classifiers=[\n \"Development Status :: 4 - Beta\",\n \"Topic :: Multimedia :: Sound/Audio\",\n \"License :: OSI Approved :: GNU General Public License v3 (GPLv3)\",\n ],\n)\n","repo_name":"antlarr/bard","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":3494,"program_lang":"python","lang":"en","doc_type":"code","stars":62,"dataset":"github-code","pt":"77"} +{"seq_id":"17934180930","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nfrom django.http import HttpResponseRedirect\nfrom django.template import RequestContext\nfrom django.shortcuts import render_to_response\nfrom django.core.urlresolvers import reverse_lazy\nfrom django.contrib.auth.decorators import login_required\nfrom django.db.models import Q\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom django.template.defaultfilters import slugify\n\nfrom endless_pagination.decorators import page_template\nfrom unidecode import unidecode\n\nfrom .forms import TextForm\nfrom .forms import PhotoForm\nfrom .forms import VideoForm\nfrom .helper import create_tags\nfrom .helper import media_wall\nfrom post.models import Post\nfrom post.models import Follow\nfrom post.models import Tag\n\n\n@login_required(login_url=reverse_lazy('login'))\n@page_template('dashboard/post_info.html')\ndef dashboard(request, posts_filter=None, template='dashboard/index.html',\n extra_context=None):\n user = request.user\n if posts_filter == 'mine':\n posts = Post.objects.filter(author=user)\n elif posts_filter == 'likes':\n posts = Post.objects.filter(like__author=user)\n elif posts_filter == 'following':\n follows = Follow.objects.filter(follower=user)\n followings = (follow.following for follow in follows)\n posts = Post.objects.filter(author__in=followings)\n else:\n follows = Follow.objects.filter(follower=user)\n followings = (follow.following for follow in follows)\n posts = Post.objects.filter(Q(author=user) | Q(author__in=followings))\n\n if posts:\n posts = posts.order_by('-created_at')\n\n context = {\n 'posts': posts,\n 'filter': posts_filter,\n }\n if extra_context:\n context.update(extra_context)\n return render_to_response(template, context,\n context_instance=RequestContext(request))\n\n\n@login_required(login_url=reverse_lazy('login'))\ndef new_text(request, post_id=None, template_name='dashboard/new.html'):\n if request.method == 'POST':\n user = request.user\n form = TextForm(request.POST)\n if form.is_valid():\n title = form.cleaned_data['title']\n content = form.cleaned_data['content']\n tags = form.cleaned_data['tags']\n post_id = form.cleaned_data['post_id']\n\n tags = create_tags(tags)\n if post_id:\n try:\n post = Post.objects.filter(pk=post_id)\n post.update(title=title, content=content)\n post = post[0]\n post.tags.remove(*post.tags.all())\n post.tags.add(*tags)\n except ObjectDoesNotExist:\n return HttpResponseRedirect(reverse_lazy('dashboard'))\n else:\n post = Post.objects.create(author=user, title=title,\n content=content, kind='T')\n post.tags.add(*tags)\n\n user_slug = user.get_profile().slug\n if post.slug:\n redirect_link = reverse_lazy('post_detail_slug',\n kwargs={\n 'user_slug': user_slug,\n 'post_id': post.id,\n 'post_slug': post.slug,\n })\n else:\n redirect_link = reverse_lazy('post_detail',\n kwargs={\n 'user_slug': user_slug,\n 'post_id': post.id,\n })\n return HttpResponseRedirect(redirect_link)\n else:\n if post_id:\n try:\n post = Post.objects.get(pk=post_id)\n initial = {\n 'title': post.title,\n 'content': post.content,\n 'tags': ', '.join([tag.name for tag in post.tags.all()]),\n 'post_id': post_id,\n }\n form = TextForm(initial=initial)\n except ObjectDoesNotExist:\n return HttpResponseRedirect(reverse_lazy('dashboard'))\n else:\n form = TextForm()\n context = {\n 'form': form,\n 'kind': 'text',\n 'action_url': reverse_lazy('new_text'),\n }\n return render_to_response(template_name, context,\n context_instance=RequestContext(request))\n\n\n@login_required(login_url=reverse_lazy('login'))\ndef new_photo(request, post_id=None, template_name='dashboard/new.html'):\n if request.method == 'POST':\n user = request.user\n form = PhotoForm(request.POST)\n if form.is_valid():\n photo = form.cleaned_data['photo']\n content = form.cleaned_data['content']\n url = form.cleaned_data['url']\n tags = form.cleaned_data['tags']\n post_id = form.cleaned_data['post_id']\n\n tags = create_tags(tags)\n if post_id:\n try:\n post = Post.objects.filter(pk=post_id)\n post.update(photo=photo, content=content, link=url)\n post = post[0]\n post.tags.remove(*post.tags.all())\n post.tags.add(*tags)\n except ObjectDoesNotExist:\n return HttpResponseRedirect(reverse_lazy('dashboard'))\n else:\n post = Post.objects.create(author=user, photo=photo, link=url,\n content=content, kind='P')\n post.tags.add(*tags)\n\n user_slug = user.get_profile().slug\n if post.slug:\n redirect_link = reverse_lazy('post_detail_slug',\n kwargs={\n 'user_slug': user_slug,\n 'post_id': post.id,\n 'post_slug': post.slug,\n })\n else:\n redirect_link = reverse_lazy('post_detail',\n kwargs={\n 'user_slug': user_slug,\n 'post_id': post.id,\n })\n return HttpResponseRedirect(redirect_link)\n else:\n if post_id:\n try:\n post = Post.objects.get(pk=post_id)\n initial = {\n 'photo': post.photo,\n 'content': post.content,\n 'url': post.link,\n 'tags': ', '.join([tag.name for tag in post.tags.all()]),\n 'post_id': post_id,\n }\n form = PhotoForm(initial=initial)\n except ObjectDoesNotExist:\n return HttpResponseRedirect(reverse_lazy('dashboard'))\n else:\n form = PhotoForm()\n\n context = {\n 'form': form,\n 'kind': 'photo',\n 'action_url': reverse_lazy('new_photo'),\n }\n return render_to_response(template_name, context,\n context_instance=RequestContext(request))\n\n\n@login_required(login_url=reverse_lazy('login'))\ndef new_video(request, post_id=None, template_name='dashboard/new.html'):\n if request.method == 'POST':\n user = request.user\n form = VideoForm(request.POST)\n if form.is_valid():\n video = form.cleaned_data['video']\n content = form.cleaned_data['content']\n tags = form.cleaned_data['tags']\n post_id = form.cleaned_data['post_id']\n\n tags = create_tags(tags)\n if post_id:\n try:\n post = Post.objects.filter(pk=post_id)\n post.update(video=video, content=content)\n post = post[0]\n post.tags.remove(*post.tags.all())\n post.tags.add(*tags)\n except ObjectDoesNotExist:\n return HttpResponseRedirect(reverse_lazy('dashboard'))\n else:\n post = Post.objects.create(author=user, video=video,\n content=content, kind='V')\n post.tags.add(*tags)\n\n user_slug = user.get_profile().slug\n if post.slug:\n redirect_link = reverse_lazy('post_detail_slug',\n kwargs={\n 'user_slug': user_slug,\n 'post_id': post.id,\n 'post_slug': post.slug,\n })\n else:\n redirect_link = reverse_lazy('post_detail',\n kwargs={\n 'user_slug': user_slug,\n 'post_id': post.id,\n })\n return HttpResponseRedirect(redirect_link)\n else:\n if post_id:\n try:\n post = Post.objects.get(pk=post_id)\n initial = {\n 'video': post.video,\n 'content': post.content,\n 'tags': ', '.join([tag.name for tag in post.tags.all()]),\n 'post_id': post_id,\n }\n form = VideoForm(initial=initial)\n except ObjectDoesNotExist:\n return HttpResponseRedirect(reverse_lazy('dashboard'))\n else:\n form = VideoForm()\n\n context = {\n 'form': form,\n 'kind': 'video',\n 'action_url': reverse_lazy('new_video'),\n }\n return render_to_response(template_name, context,\n context_instance=RequestContext(request))\n\n\n@login_required(login_url=reverse_lazy('login'))\ndef edit_post(request, post_id):\n try:\n post = Post.objects.get(pk=post_id)\n if post.kind == 'T':\n return new_text(request, post_id)\n elif post.kind == 'P':\n return new_photo(request, post_id)\n elif post.kind == 'V':\n return new_video(request, post_id)\n\n except ObjectDoesNotExist:\n return HttpResponseRedirect(reverse_lazy('dashboard'))\n\n\n@login_required(login_url=reverse_lazy('login'))\ndef delete_post(request, post_id):\n try:\n post = Post.objects.get(pk=post_id, author=request.user)\n post.delete()\n except ObjectDoesNotExist:\n pass\n finally:\n return HttpResponseRedirect(reverse_lazy('dashboard'))\n\n\n@page_template('index/index_page.html')\ndef index(request, keyword=None, template='index/index.html',\n extra_context=None):\n posts_group = list(media_wall(keyword=keyword))\n q = request.GET.get('q')\n if not q:\n try:\n keyword = Tag.objects.get(slug=keyword).name\n except ObjectDoesNotExist:\n pass\n\n context = {\n 'posts_group': posts_group,\n 'keyword': keyword,\n }\n if extra_context:\n context.update(extra_context)\n return render_to_response(template, context,\n context_instance=RequestContext(request))\n\n\ndef search(request):\n keyword = request.GET.get('q')\n tag = slugify(unidecode(keyword))\n if not tag:\n return HttpResponseRedirect('/')\n else:\n return index(request, keyword=keyword)\n\n\n@login_required(login_url=reverse_lazy('login'))\ndef followers(request, template_name='dashboard/followers.html'):\n user = request.user\n follows = Follow.objects.filter(following=user)\n followers = (follow.follower for follow in follows)\n\n context = {\n 'followers': followers,\n }\n return render_to_response(template_name, context,\n context_instance=RequestContext(request))\n","repo_name":"mozillazg/mtum","sub_path":"mtum/dashboard/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":12192,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"} +{"seq_id":"4664351749","text":"import requests\nimport random\nimport smtplib\nimport os\nfrom email.message import EmailMessage\n\n\n\ndef getBlogData():\n # https://mocki.io/fake-json-api --> json website\n return requests.get(os.getenv(\"BLOG_END_POINT\")).json()\n\n\ndef randomNumberGenerator():\n return random.randint(1, 10)\n\n\ndef send_mail(name,mail,mobile,msg):\n senderMail = 'thanigaisolutions@gmail.com'\n receipient_mail = 'thanigaifacts@gmail.com'\n\n mailMsg = EmailMessage()\n\n mailMsg['Subject'] = 'Thanigai Facts - New Contact Form!'\n mailMsg['From'] = senderMail\n mailMsg['To'] = receipient_mail\n msgfile = f\"Name : {name}\\nMail : {mail}\\nMobile : {mobile}\\nMessage : {msg}\"\n mailMsg.set_content(msgfile)\n\n with smtplib.SMTP_SSL('smtp.gmail.com', 465) as smtp:\n smtp.login(os.getenv(\"MAIL_USER_NAME\"), os.getenv(\"MAIL_PASSWORD\"))\n smtp.send_message(mailMsg)\n\n return True,\"Message Sent Successfully!\"\n\n\n\n\n\n\n\n\n\n","repo_name":"ThanigaiFacts/thanigaifacts","sub_path":"utility.py","file_name":"utility.py","file_ext":"py","file_size_in_byte":930,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"30338664006","text":"# -*- coding: utf-8 -*-\n# A - スーパーICT高校生\n# https://atcoder.jp/contests/arc022/tasks/arc022_1\n\nS = input()\nans = ''\n\nfor s in S:\n if len(ans) == 0 and (s == 'I' or s == 'i'):\n ans += s\n elif len(ans) == 1 and (s == 'C' or s == 'c'):\n ans += s\n elif len(ans) == 2 and (s == 'T' or s == 't'):\n ans += s\n\nif len(ans) == 3:\n print('YES')\nelse:\n print('NO')\n\n# 10:41 - 10:47(AC)\n","repo_name":"yu5shi8/AtCoder","sub_path":"ARC_A/ARC022A.py","file_name":"ARC022A.py","file_ext":"py","file_size_in_byte":427,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"943472","text":"from selenium import webdriver\nfrom selenium.webdriver.common.keys import Keys\nimport numpy as np\nimport os\nimport json\nimport re\nimport time\n# import datetime\n\nchrome_path = r'C:\\Users\\Roi Shmueli\\Downloads\\chromedriver_win32\\chromedriver.exe'\nbrowser = webdriver.Chrome(chrome_path)\nposts = []\nbrowser.get('https://www.facebook.com/juttoujewelry/')\n\nSCROLL_PAUSE_TIME = 0.5\n\n# Get scroll height\nlast_height = browser.execute_script(\"return document.body.scrollHeight\")\ni = 1\n\nwhile True:\n\t# Scroll down to bottom\n\tbrowser.execute_script(\"window.scrollTo(0, document.body.scrollHeight);\")\n\t# Wait to load page\n\ttime.sleep(SCROLL_PAUSE_TIME)\n\t# Calculate new scroll height and compare with last scroll height\n\tnew_height = browser.execute_script(\"return document.body.scrollHeight\")\n\t# if new_height == last_height: break\n\tlast_height = new_height\n\ti = i+1\n\tif(i>200):\n\t\tbreak\n\nelement = browser.find_elements_by_class_name('userContentWrapper')\nj=1\nfor i in element:\n time1 = i.find_element_by_class_name('_5ptz')\n time2 = time1.get_attribute(\"title\")\n\n print(time2)\n\n contents = i.find_elements_by_tag_name('p')\n content = '';\n if len(contents) > 0:\n \t# print(len(contents))\n \tfor c in contents:\n \t\tcontent = content + c.text\n\n content = content.replace(\"\\n\", \" \")\n content = content.replace(\"...\", \" \")\n content = content.replace('\"', \"'\")\n try:\n post = {}\n post[\"time\"] = time2\n post[\"number\"] = j\n post[\"content\"] = content\n posts.append(post)\n except AttributeError:\n \tpass\n j+=1\nfilename = \"data1.json\"\nwith open(filename, mode='w', encoding='utf-8') as f:\n json.dump(posts, f, indent=2, ensure_ascii=False)\n\nbrowser.quit()\n","repo_name":"roishmueli1/Juttou","sub_path":"cralwer.py","file_name":"cralwer.py","file_ext":"py","file_size_in_byte":1719,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"30172459666","text":"import json\n\nf = open('lol.json')\ndata = json.load(f)\n\ninfo = data['info']\n\nframes = data[\"info\"][\"frames\"]\n\np_frames = []\n\n# set the counter\nCHAMPION_KILL = 0\nCHAMPION_SPECIAL_KILL = 0\nELITE_MONSTER_KILL = 0\nBUILDING_KILL = 0\n\nCHAMPION_KILL_INFO = []\nCHAMPION_SPECIAL_KILL_INFO = []\nELITE_MONSTER_KILL_INFO = []\nBUILDING_KILL_INFO = []\n\nfor i in range(len(frames)):\n temp_eventList = frames[i]['events']\n # print(len(temp_eventList))\n for j in range(len(temp_eventList)):\n if temp_eventList[j]['type'] == \"CHAMPION_KILL\":\n CHAMPION_KILL = CHAMPION_KILL + 1\n CHAMPION_KILL_INFO.append(temp_eventList[j]['timestamp'])\n CHAMPION_KILL_INFO.append(temp_eventList[j]['position'])\n if temp_eventList[j]['type'] == \"CHAMPION_SPECIAL_KILL\":\n CHAMPION_SPECIAL_KILL = CHAMPION_SPECIAL_KILL + 1\n CHAMPION_SPECIAL_KILL_INFO.append(temp_eventList[j]['timestamp'])\n CHAMPION_SPECIAL_KILL_INFO.append(temp_eventList[j]['position'])\n if temp_eventList[j]['type'] == \"ELITE_MONSTER_KILL\":\n ELITE_MONSTER_KILL = ELITE_MONSTER_KILL + 1\n ELITE_MONSTER_KILL_INFO.append(temp_eventList[j]['timestamp'])\n ELITE_MONSTER_KILL_INFO.append(temp_eventList[j]['position'])\n if temp_eventList[j]['type'] == \"BUILDING_KILL\":\n BUILDING_KILL = ELITE_MONSTER_KILL + 1\n BUILDING_KILL_INFO.append(temp_eventList[j]['timestamp'])\n\n try:\n if temp_eventList[j]['towerType']:\n BUILDING_KILL_INFO.append(temp_eventList[j]['towerType'])\n if temp_eventList[j]['buildingType']:\n BUILDING_KILL_INFO.append(temp_eventList[j]['towerType'])\n except KeyError:\n continue\n\nprint(\"CHAMPION_KILL: \" + str(CHAMPION_KILL))\nprint(\"CHAMPION_SPECIAL_KILL: \" + str(CHAMPION_SPECIAL_KILL))\nprint(\"ELITE_MONSTER_KILL: \" + str(ELITE_MONSTER_KILL))\nprint(\"BUILDING_KILL: \" + str(BUILDING_KILL))\n\nprint(\"CHAMPION_KILL_INFO: \")\nprint(CHAMPION_KILL_INFO)\n\nprint(\"CHAMPION_SPECIAL_KILL_INFO: \")\nprint(CHAMPION_SPECIAL_KILL_INFO)\n\nprint(\"ELITE_MONSTER_KILL_INFO: \")\nprint(ELITE_MONSTER_KILL_INFO)\n\nprint(\"BUILDING_KILL_INFO: \")\nprint(BUILDING_KILL_INFO)\n","repo_name":"kratewong/MOBA_Data_Analysis","sub_path":"Count.py","file_name":"Count.py","file_ext":"py","file_size_in_byte":2221,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"} +{"seq_id":"43692431951","text":"from collective.geo.settings.interfaces import IGeoCustomFeatureStyle\nfrom zope.component import queryAdapter\n\n\ndef initializeCustomFeatureStyles(obj, event):\n \"\"\"Initializes IGeoCustomFeatureStyle for AddressBlocks\n upon object creation.\n\n For AddressBlocks we don't want to display the map viewlet in one of the\n default viewlet managers but only in the custom 'contact_view'.\n \"\"\"\n custom_styles = queryAdapter(obj, IGeoCustomFeatureStyle)\n custom_styles.set('use_custom_styles', True)\n custom_styles.set('map_height', '30em')\n custom_styles.set('map_viewlet_position', 'fake-manager') # Don't display\n","repo_name":"4teamwork/ftw.contentpage","sub_path":"ftw/contentpage/handlers/geo_config.py","file_name":"geo_config.py","file_ext":"py","file_size_in_byte":633,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"77"} +{"seq_id":"72304909370","text":"from django.test import TestCase\n\nfrom smokeshop.forms import FeedbackForm\n\n\nclass FeedbackFormTestCase(TestCase):\n\n def test_valid_form_with_correct_data(self):\n \"\"\"Тест формы при передачи корректных данных\"\"\"\n form = FeedbackForm(data={\n 'text': 'Some text', 'rating': 4\n })\n self.assertTrue(form.is_valid())\n tag_data = '