diff --git "a/6334.jsonl" "b/6334.jsonl"
new file mode 100644--- /dev/null
+++ "b/6334.jsonl"
@@ -0,0 +1,1603 @@
+{"seq_id":"11588253041","text":"import os\nfrom flask import Flask, jsonify, request\nfrom faker import Factory\nfrom twilio.access_token import AccessToken, ConversationsGrant\n\napp = Flask(__name__)\nfake = Factory.create()\n\n@app.route('/')\ndef index():\n return app.send_static_file('index.html')\n\n@app.route('/token')\ndef token():\n # get credentials for environment variables\n account_sid = os.environ['TWILIO_ACCOUNT_SID']\n api_key = os.environ['TWILIO_API_KEY']\n api_secret = os.environ['TWILIO_API_SECRET']\n\n # Create an Access Token\n token = AccessToken(account_sid, api_key, api_secret)\n\n # Set the Identity of this token\n token.identity = fake.user_name()\n\n # Grant access to Conversations\n grant = ConversationsGrant()\n grant.configuration_profile_sid = os.environ['TWILIO_CONFIGURATION_SID']\n token.add_grant(grant)\n\n # Return token info as JSON\n return jsonify(identity=token.identity, token=token.to_jwt())\n\nif __name__ == '__main__':\n app.run(debug=True)\n","repo_name":"mattmakai/video-calls-python-swift","sub_path":"video-quickstart-python-master/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":980,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"77"}
+{"seq_id":"4678283261","text":"import discord, asyncio, requests, re, time, os, sys, json;from re import search;from discord.ext import commands;from discord.ext.commands import has_permissions, MissingPermissions\r\nimport subprocess\r\nimport random\r\nimport json\r\nfrom itertools import cycle\r\nfrom exchanges.bitfinex import Bitfinex\r\n\r\nwith open('config.json', 'r+', encoding='utf-8') as f:\r\n config = json.load(f)\r\n\r\nbot = commands.Bot(command_prefix='$')\r\n\r\nbot.remove_command('help')\r\n@bot.event\r\nasync def on_ready():\r\n os.system('title Bot running.')\r\n print('Bot started / Running.')\r\n while True:\r\n await bot.change_presence(activity=discord.Activity(type=discord.ActivityType.watching, name=f\"$help | BTC: ${Bitfinex().get_current_price()}\"))\r\n await asyncio.sleep(60)\r\n\r\ndisplayoptions = [\"If you'd like to contribute to the bot's development (not required), feel free to send any necessary amount to 17rpaAv4XXDLeTLP6kzMKxd3d3zqdkCpgD\", \" Invite this discord bot to your server! https://discord.com/oauth2/authorize?client_id=806580500986593282&scope=bot\"]\r\ndef checkConfirmations(txid, proxy=None):\r\n if proxy == None:\r\n getconv = requests.get(f'https://api.blockcypher.com/v1/btc/main/txs/{txid}?limit=50&includeHex=true')\r\n if getconv.status_code == 200:\r\n if getconv.json()['double_spend'] == True:\r\n return \"DoubleSpent\"\r\n else:\r\n return getconv.json()['confirmations']\r\n else:\r\n return checkConfirmations(txid)\r\n\r\ndef blockcypheraccelerate(rawtxid):\r\n data = {\r\n 'tx': rawtxid\r\n }\r\n r = requests.post(' https://api.blockcypher.com/v1/bcy/test/txs/push', data=data)\r\n if r.status_code == 200:\r\n return True\r\n else:\r\n return False\r\ndef smartbitaccelerate(rawtxid):\r\n data = {\r\n 'hex': rawtxid\r\n }\r\n r = requests.post('https://api.smartbit.com.au/v1/blockchain/pushtx', data=data)\r\n if r.status_code == 200:\r\n return True\r\n else:\r\n return False\r\n\r\ndef coinbinaccelerate(rawtxid):\r\n params = {\r\n 'uid': 1,\r\n 'key': 12345678901234567890123456789012,\r\n 'setmodule': 'bitcoin',\r\n 'request': 'sendrawtransaction'\r\n }\r\n data = {\r\n 'rawtx': rawtxid\r\n }\r\n r = requests.get(f'https://coinb.in/api/?uid=1&key=12345678901234567890123456789012&setmodule=bitcoin&request=sendrawtransaction', params=params, data=data)\r\n if r.status_code == 200:\r\n return True\r\n else:\r\n return False\r\n\r\n@bot.command()\r\nasync def check(ctx, txid=None, confcheck=None):\r\n if txid != None:\r\n try:\r\n if confcheck == None:\r\n confcheck = 1\r\n \r\n currrentconf = checkConfirmations(txid)\r\n if currrentconf != 'DoubleSpent':\r\n if int(checkConfirmations(txid)) >= int(confcheck):\r\n embed = discord.Embed(\r\n description=f'{ctx.author.mention}, your transaction ``{txid}`` has already hit ``{confcheck}`` confirmations. The transaction is currently on ``{checkConfirmations(txid)}`` confirmation(s).',\r\n color=0xd43b33\r\n )\r\n\r\n embed.set_footer(text=random.choice(displayoptions))\r\n\r\n await ctx.send(embed=embed)\r\n else:\r\n embed = discord.Embed(\r\n description=f'{ctx.author.mention}, monitoring your transaction ``{txid}`` on the bitcoin network for ``{confcheck}`` confirmations. The transaction is currently on ``{checkConfirmations(txid)}`` confirmations.',\r\n color=0x5CDBF0\r\n )\r\n\r\n embed.set_footer(text=random.choice(displayoptions))\r\n\r\n message = await ctx.send(embed=embed)\r\n\r\n embed = discord.Embed(\r\n description=f'{ctx.author.mention}, monitoring your transaction ``{txid}`` on the bitcoin network for ``{confcheck}`` confirmations. The transaction is currently on ``{checkConfirmations(txid)}`` confirmations.\\n**Your transaction was successfully accelerated on smartbit, coinbin, and blockcypher!** ✅',\r\n color=0x38f232\r\n )\r\n\r\n embed.set_footer(text=random.choice(displayoptions))\r\n\r\n\r\n boosttxid = requests.get(f'https://blockstream.info/api/tx/{txid}/hex').text\r\n coinbinaccelerate(boosttxid)\r\n smartbitaccelerate(boosttxid)\r\n blockcypheraccelerate(boosttxid)\r\n await message.edit(embed=embed)\r\n while True:\r\n await asyncio.sleep(30)\r\n currrentconf = checkConfirmations(txid)\r\n if currrentconf != 'DoubleSpent':\r\n if int(currrentconf) >= int(confcheck):\r\n await ctx.send(f'{ctx.author.mention}, your transaction ``{txid}`` has successfully hit ``{confcheck}`` confirmations.')\r\n await ctx.author.send(f'{ctx.author.mention}, your transaction ``{txid}`` has successfully hit ``{confcheck}`` confirmations.')\r\n break\r\n else:\r\n embed = discord.Embed(\r\n description=f'{ctx.author.mention} **WARNING** your transaction ``{txid}`` was maliciously labeled as doublespent on the senders\\' side. If you are undergoing a deal, please stay cautious and know that the bitcoin delivered will be rolled back to the sender.',\r\n color=0xd43b33\r\n )\r\n\r\n embed.set_footer(text=random.choice(displayoptions))\r\n\r\n message = await ctx.send(embed=embed)\r\n message = await ctx.author.send(embed=embed)\r\n else:\r\n embed = discord.Embed(\r\n description=f'{ctx.author.mention} **WARNING** your transaction ``{txid}`` was maliciously labeled as doublespent on the senders\\' side. If you are undergoing a deal, please stay cautious and know that the bitcoin delivered will be rolled back to the sender.',\r\n color=0xd43b33\r\n )\r\n\r\n embed.set_footer(text=random.choice(displayoptions))\r\n\r\n message = await ctx.send(embed=embed)\r\n await ctx.author.send(embed=embed)\r\n\r\n except discord.ext.commands.errors.MissingRequiredArgument:\r\n await ctx.send(f'{ctx.author.mention}, a required arguement is missing when using this command. Please retry the command by running ``!check (txid) (confirmations)``')\r\n else: \r\n await ctx.author.send('The required bitcoin network transaction ID is missing when using this command. Please retry the command by running ``!check (txid) (confirmations)``')\r\n\r\n@bot.command()\r\nasync def invite(ctx):\r\n await ctx.send(f'{ctx.author.mention}, invite the ``Crypto Checker`` discord bot to your discord server by using the following link: https://discord.com/oauth2/authorize?client_id=806580500986593282&scope=bot')\r\n\r\n@bot.command()\r\nasync def help(ctx):\r\n pass\r\n\r\nbot.run(config['token'])\r\n","repo_name":"acierp/CryptoChecker","sub_path":"bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":7323,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"77"}
+{"seq_id":"24300819058","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sun Nov 8 21:38:07 2020\r\n\r\n@author: Navneet Yadav\r\n\"\"\"\r\nimport cv2\r\nimport numpy as np\r\n#%% image print function\r\ndef imagepr(img):\r\n cv2.imshow('image', img) \r\n k = cv2.waitKey(0) & 0xFF\r\n \r\n # # wait for 's' key to save and exit\r\n if k == ord('s'): \r\n cv2.imwrite('copy.png',img) \r\n cv2.destroyAllWindows() \r\n # any key to exit \r\n else :\r\n cv2.destroyAllWindows()\r\n#%% resize function\r\ndef resize(img,w):\r\n h_org, w_org = img.shape[:2]\r\n # Calculating the ratio \r\n ratio = w / w_org\r\n # Creating a tuple containing width and height \r\n dim = (w, int(h_org * ratio)) \r\n # Resizing the image \r\n return cv2.resize(img, dim)\r\n#%%\r\ndef canny(image, sigma=0.33):\r\n\t# compute the median of the single channel pixel intensities\r\n\tv = np.median(image)\r\n\t# apply automatic Canny edge detection using the computed median\r\n\tlower = int(max(0, (1.0 - sigma) * v))\r\n\tupper = int(min(255, (1.0 + sigma) * v))\r\n\tedged = cv2.Canny(image, lower, upper)\r\n\t# return the edged image\r\n\treturn edged\r\n# %% taking pic\r\ncap = cv2.VideoCapture(0)\r\ncap.set(3,1280)\r\ncap.set(4,720)\r\nwhile(cap.isOpened()):\r\n ret, frame = cap.read()\r\n if ret==True:\r\n # Display the resulting frame\r\n cv2.imshow('frame', frame)\r\n if cv2.waitKey(1) & 0xFF == ord('s'):\r\n cv2.imwrite(\"NewPicture.jpg\",frame)\r\n break\r\n else:\r\n break\r\n# Release everything if job is finished\r\ncap.release()\r\ncv2.destroyAllWindows()\r\n#%%image preprocessing\r\nimg = cv2.imread(\"rgpv_smart_card.jpg\")\r\nimg=resize(img, 400)\r\ngray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\r\nblurred = cv2.GaussianBlur(gray, (3, 3), 0)\r\nedges = cv2.Canny(gray, 75, 200)\r\n\r\n#morphological dilation\r\nkernel = np.ones((5,5),np.uint8)\r\ndilation = cv2.dilate(edges,kernel,iterations = 1)\r\nimagepr(edges)\r\n#%% finding contours \r\ncontours, hierarchy = cv2.findContours(edges.copy(),cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)\r\ncnts = sorted(contours, key = cv2.contourArea, reverse = True)[:5]\r\n# loop over the contours\r\nfor c in cnts:\r\n\t# approximate the contour\r\n\tperi = cv2.arcLength(c, True)\r\n\tapprox = cv2.approxPolyDP(c, 0.02 * peri, True)\r\n\t# if our approximated contour has four points, then we\r\n\t# can assume that we have found our screen\r\n\tif len(approx) == 4:\r\n\t\tpCnt = approx\r\n\t\tbreak\r\n#%%\r\n#cv2.drawContours(img, [pCnt], -1, (0, 255, 0), 2)\r\n#imagepr(img)\r\n#%%\r\nx, y, width, height = cv2.boundingRect(pCnt)\r\nroi = img[y:y+height, x:x+width]\r\ncv2.imwrite(\"result_doc.png\", roi)","repo_name":"navneet05/doc_scanner_opencv","sub_path":"doc_scanner.py","file_name":"doc_scanner.py","file_ext":"py","file_size_in_byte":2548,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"}
+{"seq_id":"319533740","text":"\n\"\"\"Mock handlers for the collector datastore backends.\"\"\"\n\nfrom typing import List, Dict, Set, Optional, Iterable\n\nfrom .. import ConfigEntity\nfrom ..abc_backend import (\n AbcDataStoreBackend,\n Entity,\n NamespaceTemplateEntity,\n as_namespace_template_entity,\n ServiceIdConfigEntity,\n as_service_id_config_entity,\n GatewayConfigEntity,\n as_gateway_config_entity,\n ServiceColorTemplateEntity,\n as_service_color_template_entity,\n TemplateEntity,\n)\nfrom .....protect import RouteProtection\n\n\nclass MockBackend(AbcDataStoreBackend):\n \"\"\"Back-end data store mock object.\"\"\"\n active_versions: Set[str]\n namespace_entities: Dict[NamespaceTemplateEntity, str]\n service_color_entities: Dict[ServiceColorTemplateEntity, str]\n gateway_entities: Dict[GatewayConfigEntity, str]\n service_id_entities: Dict[ServiceIdConfigEntity, str]\n committed: Optional[str]\n\n def __init__(self):\n self.active_versions = set()\n self.namespace_entities = {}\n self.service_color_entities = {}\n self.gateway_entities = {}\n self.service_id_entities = {}\n self.committed = None\n\n def get_active_version(self, activity: str) -> str:\n assert activity not in self.active_versions\n self.active_versions.add(activity)\n return activity\n\n def start_changes(self, activity: str) -> str:\n assert activity not in self.active_versions\n self.active_versions.add(activity)\n return activity\n\n def commit_changes(self, version: str) -> None:\n assert version in self.active_versions\n self.committed = version\n\n def download(self, version: str, entity: Entity) -> str:\n assert version in self.active_versions\n sct = as_service_color_template_entity(entity)\n if sct:\n return self.service_color_entities[sct]\n nte = as_namespace_template_entity(entity)\n if nte:\n return self.namespace_entities[nte]\n sic = as_service_id_config_entity(entity)\n if sic:\n return self.service_id_entities[sic]\n gwc = as_gateway_config_entity(entity)\n assert gwc\n return self.gateway_entities[gwc]\n\n def upload(self, version: str, entity: Entity, contents: str) -> None:\n assert version in self.active_versions\n sct = as_service_color_template_entity(entity)\n if sct:\n self.service_color_entities[sct] = contents\n return\n nte = as_namespace_template_entity(entity)\n if nte:\n self.namespace_entities[nte] = contents\n return\n sic = as_service_id_config_entity(entity)\n if sic:\n self.service_id_entities[sic] = contents\n return\n gwc = as_gateway_config_entity(entity)\n assert gwc\n self.gateway_entities[gwc] = contents\n\n def rollback_changes(self, version: str) -> None:\n self.active_versions.remove(version)\n self.gateway_entities = {}\n self.service_id_entities = {}\n self.service_color_entities = {}\n self.namespace_entities = {}\n\n def get_template_entities(self, version: str) -> Iterable[TemplateEntity]:\n ret: List[TemplateEntity] = list(self.service_color_entities.keys())\n ret.extend(self.namespace_entities.keys())\n return ret\n\n def get_config_entities(self, version: str) -> Iterable[ConfigEntity]:\n ret: List[ConfigEntity] = list(self.service_id_entities.keys())\n ret.extend(self.gateway_entities.keys())\n return ret\n\n def get_namespace_template_entities(\n self, version: str, namespace: Optional[str] = None,\n protection: Optional[RouteProtection] = None, purpose: Optional[str] = None,\n ) -> Iterable[NamespaceTemplateEntity]:\n for entity in self.namespace_entities:\n if (\n (namespace is None or namespace == entity.namespace)\n and (protection is None or protection == entity.protection)\n and (purpose is None or purpose == entity.purpose)\n ):\n yield entity\n\n def get_gateway_config_entities(\n self, version: str, namespace: Optional[str] = None,\n protection: Optional[RouteProtection] = None, purpose: Optional[str] = None,\n ) -> Iterable[GatewayConfigEntity]:\n for entity in self.gateway_entities:\n if (\n (namespace is None or namespace == entity.namespace_id)\n and (protection is None or protection == entity.protection)\n and (purpose is None or purpose == entity.purpose)\n ):\n yield entity\n\n def get_service_color_template_entities(\n self, version: str, namespace: Optional[str] = None,\n service: Optional[str] = None, color: Optional[str] = None,\n purpose: Optional[str] = None,\n ) -> Iterable[ServiceColorTemplateEntity]:\n for entity in self.service_color_entities:\n if (\n (namespace is None or namespace == entity.namespace)\n and (service is None or service == entity.service)\n and (color is None or color == entity.color)\n and (purpose is None or purpose == entity.purpose)\n ):\n yield entity\n\n def get_service_id_config_entities(\n self, version: str, namespace_id: Optional[str] = None,\n service_id: Optional[str] = None, service: Optional[str] = None,\n color: Optional[str] = None, purpose: Optional[str] = None,\n ) -> Iterable[ServiceIdConfigEntity]:\n for entity in self.service_id_entities:\n if (\n (namespace_id is None or namespace_id == entity.namespace_id)\n and (service_id is None or service_id == entity.service_id)\n and (service is None or service == entity.service)\n and (color is None or color == entity.color)\n and (purpose is None or purpose == entity.purpose)\n ):\n yield entity\n","repo_name":"groboclown/nightjar-mesh","sub_path":"old-stuff-for-reference/nightjar-base/nightjar-src/python-src/nightjar/backend/api/data_store/tests/mock.py","file_name":"mock.py","file_ext":"py","file_size_in_byte":6124,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"77"}
+{"seq_id":"14017925530","text":"# IMPORT MODULES\nimport hashlib\nimport json\nfrom time import time\nfrom urllib.parse import urlparse\nfrom uuid import uuid4\nimport requests\nfrom flask import Flask, jsonify, request\n\n# CREATE BLOCKCHAIN CLASS\nclass Blockchain:\n def __init__(self):\n self.current_transactions = []\n self.chain = []\n self.nodes = set() # for idempotency\n\n # CREATE GENESIS BLOCK\n self.new_block(previous_hash=1, proof=100)\n\n # NEW BLOCK METHOD (create new block and add to the chain)\n def new_block(self, proof, previous_hash):\n block = {\n 'index': len(self.chain) + 1,\n 'timestamp': time(),\n 'transactions': self.current_transactions,\n 'proof': proof,\n 'previous_hash': previous_hash or self.hash(self.chain[-1])\n }\n\n # reset current transactions list\n self.current_transactions = []\n self.chain.append(block)\n\n return block\n\n # NEW TRANSACTION METHOD (create new transaction for next mined block)\n def new_transaction(self, sender, recipient, amount):\n self.current_transactions.append({\n 'sender': sender,\n 'recipient': recipient,\n 'amount': amount\n })\n\n return self.last_block['index'] + 1\n\n # PROPERTY DECORATOR TO RETURN THE LAST BLOCK\n @property\n def last_block(self):\n return self.chain[-1]\n\n # HASH METHOD (create SHA-256 hash of a block)\n # TURN HASH METHOD INTO STATIC METHOD (cannot modify class or object)\n @staticmethod\n def hash(block):\n # must have an ORDERED dictionary\n block_string = json.dumps(block, sort_keys=True).encode()\n\n return hashlib.sha256(block_string).hexdigest()\n\n # PROOF OF WORK (POW) METHOD (where check condition is hash(pp') has 4 leading 0s)\n def proof_of_work(self, last_proof):\n proof = 0\n while self.valid_proof(last_proof, proof) is False:\n proof += 1\n\n return proof\n\n # TURN VALID PROOF METHOD INTO STATIC METHOD (validates if hash has 4 leading 0s)\n @staticmethod\n def valid_proof(last_proof, proof):\n guess = f'{last_proof}{proof}'.encode()\n guess_hash = hashlib.sha256(guess).hexdigest()\n\n return guess_hash[:4] == '0000'\n\n # REGISTERING NODE METHOD (add new ndoe to list of nodes)\n def register_node(self, address):\n parsed_url = urlparse(address)\n\n if parsed_url.netloc:\n self.nodes.add(parsed_url.netloc)\n elif parsed_url.path:\n # accepts url without a 'xxx.xxx.x.x:xxxx' scheme\n self.nodes.add(parsed_url.path)\n else:\n raise ValueError('INVALID URL')\n\n '''\n CODE BELOW IS THE CONSENSUS ALGORITHM - ADDRESSES CONFLICTS IN THE BLOCKCHAIN OVER DIFFERENT NODES BY FINDING THE LONGEST CHAIN AND REPLACING WITH THAT CHAIN\n '''\n\n # VALID CHAIN CHECK METHOD\n def valid_chain(self, chain):\n last_block = chain[0]\n current_index = 1\n\n while current_index < len(chain):\n block = chain[current_index]\n print(f'{last_block}')\n print(f'{block}')\n print(\"\\n-----------\\n\")\n\n # check if previous hash is correct\n if block['previous_hash'] != self.hash(last_block):\n return False\n\n # check POW\n if not self.valid_proof(last_block['proof'], block['proof']):\n return False\n\n last_block = block\n current_index += 1\n\n return True\n\n # CONFLICT RESOLUTION METHOD\n def resolve_conflicts(self):\n neighbors = self.nodes\n new_chain = None\n\n max_length = len(self.chain)\n # grab chains from all other nodes and check their length\n for node in neighbors:\n response = requests.get(f'http://{node}/chain')\n\n if response.status_code == 200:\n length = response.json()['length']\n chain = response.json()['chain']\n\n # compare length with current node's chain and check validity of chain\n if length > max_length and self.valid_chain(chain):\n max_length = length\n new_chain = chain\n\n # replace with new longest chain\n if new_chain:\n self.chain = new_chain\n return True\n\n return False\n\n'''\nCODE BELOW IS FOR SETTING UP API TO MAKE HTTP REQUESTS TO THE BLOCKCHAIN\n'''\n\n# INSTANTIATE NODE\napp = Flask(__name__)\n\n# GENERATE GLOBALLY UNIQUE ADDRESS FOR NODE\nnode_id = str(uuid4()).replace('-','')\n\n# INSTANTIATE BLOCKCHAIN\nblockchain = Blockchain()\n\n'''\nBELOW ARE ALL THE ROUTES NEEDED TO CREATE AND MANAGE THE BLOCKCHAIN\n'''\n# MINE A NEW BLOCK\n@app.route('/mine', methods=['GET'])\ndef mine():\n # run POW algo\n last_block = blockchain.last_block\n last_proof = last_block['proof']\n proof = blockchain.proof_of_work(last_proof)\n\n # reward the sender for the mine; sender is '0' to signify that this node has mined a new coin\n blockchain.new_transaction(\n sender=\"0\",\n recipient=node_id,\n amount=1\n )\n\n # add new block to the chain\n previous_hash = blockchain.hash(last_block)\n block = blockchain.new_block(proof, previous_hash)\n\n response = {\n 'message': 'New Block Forged',\n 'index': block['index'],\n 'transactions': block['transactions'],\n 'proof': block['proof'],\n 'previous_hash': block['previous_hash']\n }\n\n return jsonify(response), 200\n\n# MAKE A NEW TRANSACTION\n@app.route('/transaction/new', methods=['POST'])\ndef new_transaction():\n values = request.get_json()\n print(values)\n\n # check required fields\n required = ['sender', 'recipient', 'amount']\n if not all(k in values for k in required):\n return 'MISSING VALUES', 400\n\n # create new transaction\n index = blockchain.new_transaction(values['sender'], values['recipient'], values['amount'])\n response = {'message': f'Transaction will be added to Block {index}'}\n\n return jsonify(response), 201\n\n# GET THE FULL CHAIN AND LENGTH\n@app.route('/chain', methods=['GET'])\ndef full_chain():\n response = {\n 'chain': blockchain.chain,\n 'length': len(blockchain.chain)\n }\n\n return jsonify(response), 200\n\n# REGISTER A NEW NODE\n@app.route('/nodes/register', methods=['POST'])\ndef register_nodes():\n values = request.get_json()\n\n nodes = values.get('nodes')\n if nodes is None:\n return \"ERROR: PLEASE SUPPLY A VALID LIST OF NODES\", 400\n\n for node in nodes:\n blockchain.register_node(node)\n\n response = {\n 'message': 'New nodes have been added',\n 'total_nodes': list(blockchain.nodes)\n }\n\n return jsonify(response), 201\n\n# RESOLVE NODES\n@app.route('/nodes/resolve', methods=['GET'])\ndef consensus():\n replaced = blockchain.resolve_conflicts()\n\n if replaced:\n response= {\n 'message': 'Our chain was replaced/updated',\n 'new_chain': blockchain.chain\n }\n else:\n response = {\n 'message': 'Our chain is authoritative',\n 'chain': blockchain.chain\n }\n\n return jsonify(response), 200\n\nif __name__ == '__main__':\n app.run(host='127.0.0.1', port=5000)\n","repo_name":"sophia2798/blockchain","sub_path":"blockchain.py","file_name":"blockchain.py","file_ext":"py","file_size_in_byte":7245,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"}
+{"seq_id":"73339890808","text":"\"\"\"\n\n## Stream Producer\nLambda function that will query all metrics for a given namespace\nand send metric data for the last 60 seconds to a kinesis stream\n\n\"\"\"\nimport os\nimport json\nfrom typing import List\nfrom datetime import timedelta, datetime, timezone\nimport dateutil.parser\nfrom enum import Enum\nimport itertools\n\nimport boto3\nfrom botocore.exceptions import ClientError\n\nfrom dataquality.stream import MetricStream\nfrom definitions.definition import Definition\n\nCW_CLIENT = boto3.client('cloudwatch')\nKINESIS_CLIENT = boto3.client('kinesis')\n\nclass StreamName(Enum):\n KINESIS_MINUTE_STREAM_NAME: str = 'minute'\n KINESIS_HOUR_STREAM_NAME: str = 'hour'\n KINESIS_DAY_STREAM_NAME: str = 'day'\n\ndef main(\n event: dict,\n context: dict\n) -> None:\n \"\"\"Lambda Handler.\"\"\"\n\n account_number = context.invoked_function_arn.split(\":\")[4]\n definition = Definition(account=account_number)\n dataset_stream = MetricStream(metric_sets=definition.metric_sets)\n\n end_time_exact = datetime.utcnow()\n end_time = end_time_exact - timedelta(minutes=end_time_exact.minute % 10,\n seconds=end_time_exact.second,\n microseconds=end_time_exact.microsecond)\n\n md_queries = dataset_stream.metric_data_queries(\n frequency=event['frequency']\n )\n\n if len(md_queries) <= 0:\n print(f\"No metrics matched for {event['frequency']} frequency.\")\n return False\n else:\n print(\"Matched metrics:\")\n print(md_queries)\n\n # Group metrics by Period\n grouped_dict = {}\n for i in md_queries:\n k = i[\"MetricStat\"][\"Period\"]\n if (grouped_dict.get(k) == None):\n grouped_dict[k]=[i]\n else:\n grouped_dict[k].append(i)\n\n # For each Period in grouped_dict get metric data and append to metrics_data list\n metrics_data = []\n\n for period, md in grouped_dict.items():\n start_time = end_time - timedelta(seconds=period)\n period_metrics_data = get_metric_data(\n metric_data_queries=md,\n start_time=start_time,\n end_time=end_time\n )\n for metric_object in period_metrics_data:\n metrics_data.append(metric_object)\n print(metric_object)\n\n put_metrics(\n metrics_data=metrics_data,\n time=end_time,\n event=event,\n context=context,\n metric_sets=dataset_stream.metrics\n )\n\ndef get_metric_data(metric_data_queries, start_time, end_time):\n \"\"\"Paginate and return all metric data under namspace.\"\"\"\n metric_data_results = []\n paginator = CW_CLIENT.get_paginator('get_metric_data')\n page_iterator = paginator.paginate(\n MetricDataQueries=metric_data_queries,\n StartTime=start_time,\n EndTime=end_time\n )\n for page in page_iterator:\n metric_data_results += page['MetricDataResults']\n return metric_data_results\n\ndef translate_metrics_to_records(metrics_data: List[dict], time: datetime, event: dict, context: dict, metric_sets):\n \"\"\"Translate CW metrics list to Kinesis stream records.\"\"\"\n records = []\n metadata_map = {}\n dimensions_map = {}\n\n for metric_object in metrics_data:\n for metric in metric_sets:\n if metric.unique_id() == metric_object['Id']:\n metric_object['Namespace'] = metric.namespace\n metric_object['Name'] = metric.name\n metric_object['Period'] = metric.period\n metric_object['Statistic'] = metric.statistic\n if metric.metadata:\n for meta in metric.metadata:\n metadata_map[meta.name] = meta.value\n metric_object['Metadata'] = metadata_map\n if metric.dimensions:\n for dimension in metric.dimensions:\n dimensions_map[dimension.name] = dimension.value\n metric_object['Dimensions'] = dimensions_map\n else:\n continue\n\n metric_object['CollectionTime'] = time.replace(tzinfo=timezone.utc).isoformat()\n metric_object['AccountId'] = context.invoked_function_arn.split(\":\")[4]\n metric_object['Region'] = context.invoked_function_arn.split(\":\")[3]\n metric_object['MetricTimestamp'] = metric_object['Timestamps'][0] if len(metric_object['Timestamps']) > 0 else None\n metric_object['MetricValue'] = metric_object['Values'][0] if len(metric_object['Values']) > 0 else None\n metric_object['Frequency'] = event['frequency']\n records.append({\n 'Data': json.dumps(metric_object, default=str),\n 'PartitionKey': 'default'\n })\n print(records)\n return records\n\ndef put_metrics(metrics_data: List[dict], time: datetime, event: dict, context: dict, metric_sets):\n \"\"\"Put records to kinesis stream\"\"\"\n try:\n KINESIS_CLIENT.put_records(\n Records=translate_metrics_to_records(\n metrics_data=metrics_data,\n time=time,\n event=event,\n context=context,\n metric_sets=metric_sets\n ),\n StreamName=os.environ[StreamName(event['frequency']).name]\n )\n except ClientError as ex:\n raise ex\n\ndef frequency_to_period(frequency: str) -> int:\n \"\"\" Convert rate string to period in seconds.\"\"\"\n if frequency == \"day\":\n period = 86400\n if frequency == \"minute\":\n period = 60\n if frequency == \"hour\":\n period = 3600\n return period\n","repo_name":"awslabs/aws-dataset-ingestion-metrics-collection-framework","sub_path":"lambda/metric_stream_producer.py","file_name":"metric_stream_producer.py","file_ext":"py","file_size_in_byte":5529,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"77"}
+{"seq_id":"34038245148","text":"###########################################\r\n######### Enrico Ubaldino ##############\r\n###########################################\r\n\r\nimport cv2\r\n\r\nimport numpy as np\r\n\r\nimport matplotlib.pyplot as plt\r\n\r\n \r\n#Creo variabile per avvio cattura frame. 0 prima webcam, 1 la seconda\r\nlive_Camera = cv2.VideoCapture(0)\r\n\r\n \r\n\r\n #lower_bound = np.array([11,33,111])\r\nlower_bound = np.array([18,50,50])\r\nlower_bound = np.array(lower_bound, dtype=\"uint8\")\r\n\r\n#upper_bound = np.array([90,255,255])\r\nupper_bound = np.array([35,255,255])\r\nupper_bound = np.array(upper_bound, dtype=\"uint8\")\r\n\r\n \r\n\r\nwhile(live_Camera.isOpened()):\r\n\r\n ret, frame = live_Camera.read()\r\n\r\n frame = cv2.resize(frame,(1280,720))\r\n\r\n frame = cv2.flip(frame,1)\r\n\r\n \r\n\r\n frame_smooth = cv2.GaussianBlur(frame,(7,7),0)\r\n cv2.imshow(\"Image Gauss\",frame_smooth)\r\n \r\n frame_hsv = cv2.cvtColor(frame_smooth,cv2.COLOR_BGR2HSV)\r\n cv2.imshow(\"Image HSV\",frame_hsv)\r\n \r\n mask = cv2.inRange(frame_hsv, lower_bound, upper_bound)\r\n \r\n\r\n output = cv2.bitwise_and(frame,frame_hsv, mask)\r\n cv2.imshow(\"Image processing\",output)\r\n image_binary = output\r\n\r\n \r\n\r\n check_if_fire_detected = cv2.countNonZero(mask)\r\n\r\n \r\n\r\n if int(check_if_fire_detected) >= 20000 :\r\n\r\n cv2.putText(frame,\"Fire Detected !\",(300,60),cv2.FONT_HERSHEY_COMPLEX,3,(0,0,255),2)\r\n print('Fire detected')\r\n \r\n\r\n \r\n\r\n cv2.imshow(\"Fire Detection\",frame)\r\n \r\n\r\n \r\n\r\n if cv2.waitKey(10) == 27 :\r\n\r\n break\r\n\r\n \r\n\r\nlive_Camera.release()\r\n\r\ncv2.destroyAllWindows()\r\n","repo_name":"Cyberg96/UnioneElettronica","sub_path":"FireDetection_UE.py","file_name":"FireDetection_UE.py","file_ext":"py","file_size_in_byte":1558,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"}
+{"seq_id":"36825391924","text":"# -*- coding: utf-8 -*-\r\n#\r\n# Copyright (c) 2018 Leland Stanford Junior University\r\n# Copyright (c) 2018 The Regents of the University of California\r\n#\r\n# This file is part of the SimCenter Backend Applications\r\n#\r\n# Redistribution and use in source and binary forms, with or without\r\n# modification, are permitted provided that the following conditions are met:\r\n#\r\n# 1. Redistributions of source code must retain the above copyright notice,\r\n# this list of conditions and the following disclaimer.\r\n#\r\n# 2. Redistributions in binary form must reproduce the above copyright notice,\r\n# this list of conditions and the following disclaimer in the documentation\r\n# and/or other materials provided with the distribution.\r\n#\r\n# 3. Neither the name of the copyright holder nor the names of its contributors\r\n# may be used to endorse or promote products derived from this software without\r\n# specific prior written permission.\r\n#\r\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\r\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\r\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\r\n# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE\r\n# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\r\n# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\r\n# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\r\n# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN\r\n# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\r\n# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE\r\n# POSSIBILITY OF SUCH DAMAGE.\r\n#\r\n# You should have received a copy of the BSD 3-Clause License along with\r\n# this file. If not, see .\r\n#\r\n# Contributors:\r\n# Kuanshi Zhong\r\n#\r\n\r\nimport os\r\nimport subprocess\r\nimport json\r\nimport random\r\nimport numpy as np\r\nimport pandas as pd\r\nfrom FetchOpenSHA import *\r\n\r\n\r\ndef create_earthquake_scenarios(scenario_info, stations, input_dir=[], output_dir=[], output_format='SimCenterEvent'):\r\n\r\n # Number of scenarios\r\n source_num = scenario_info.get('Number', 1)\r\n if source_num == 'All':\r\n # Large number to consider all sources in the ERF\r\n source_num = 10000000\r\n # Directly defining earthquake ruptures\r\n if scenario_info['Generator'] == 'Simulation':\r\n # TODO:\r\n print('Physics-based earthquake simulation is under development.')\r\n return 1\r\n # Searching earthquake ruptures that fulfill the request\r\n elif scenario_info['Generator'] == 'Selection':\r\n # Collecting all possible earthquake scenarios\r\n lat = []\r\n lon = []\r\n for s in stations['Stations']:\r\n lat.append(s['Latitude'])\r\n lon.append(s['Longitude'])\r\n # Reference location\r\n lat = np.mean(lat)\r\n lon = np.mean(lon)\r\n ref_station = [lat, lon]\r\n # Getting earthquake rupture forecast data\r\n source_type = scenario_info['EqRupture']['Type']\r\n if source_type == 'ERF':\r\n source_model = scenario_info['EqRupture']['Model']\r\n source_name = scenario_info['EqRupture'].get('Name', None)\r\n min_M = scenario_info['EqRupture'].get('min_Mag', 5.0)\r\n max_M = scenario_info['EqRupture'].get('max_Mag', 9.0)\r\n max_R = scenario_info['EqRupture'].get('max_Dist', 1000.0)\r\n eq_source = getERF(source_model, True)\r\n erf_data = export_to_json(eq_source, ref_station, outfile = None, \\\r\n EqName = source_name, minMag = min_M, \\\r\n maxMag = max_M, maxDistance = max_R, \\\r\n maxSources = np.max([500, source_num]))\r\n # Parsing data\r\n feat = erf_data['features']\r\n tag = []\r\n for i, cur_f in enumerate(feat):\r\n if source_name and (source_name not in cur_f['properties']['Name']):\r\n continue\r\n if min_M > cur_f['properties']['Magnitude']:\r\n continue\r\n tag.append(i)\r\n # Abstracting desired ruptures\r\n s_tag = random.sample(tag, min(source_num, len(tag)))\r\n erf_data['features'] = list(feat[i] for i in s_tag)\r\n scenario_data = dict()\r\n for i, rup in enumerate(erf_data['features']):\r\n scenario_data.update({i: {\r\n 'Type': source_type,\r\n 'RuptureForecast': source_model,\r\n 'SourceIndex': rup['properties']['Source'],\r\n 'RuptureIndex': rup['properties']['Rupture']\r\n }})\r\n # Cleaning tmp outputs\r\n del erf_data\r\n elif source_type == 'PointSource':\r\n scenario_data = dict()\r\n try:\r\n magnitude = scenario_info['EqRupture']['Magnitude']\r\n location = scenario_info['EqRupture']['Location']\r\n average_rake = scenario_info['EqRupture']['AverageRake']\r\n average_dip = scenario_info['EqRupture']['AverageDip']\r\n scenario_data.update({0: {\r\n 'Type': source_type,\r\n 'Magnitude': magnitude,\r\n 'Location': location,\r\n 'AverageRake': average_rake,\r\n 'AverageDip': average_dip\r\n }})\r\n except:\r\n print('Please check point-source inputs.')\r\n # Simulating the earthquake\r\n elif scenario_info['Generator'] == 'ShakerMaker':\r\n from imm.ShakerMakerSimulation import ShakerMakerModel\r\n # initialization\r\n sm_model = ShakerMakerModel(scenario_info['SimuConfig'], stations['Stations'], input_dir)\r\n # configuration\r\n sm_model.model_configuration()\r\n scenario_data = sm_model\r\n sm_model.run_simulation(output_dir, output_format)\r\n\r\n # return\r\n return scenario_data\r\n\r\n\r\ndef create_wind_scenarios(scenario_info, event_info, stations, data_dir):\r\n\r\n # Number of scenarios\r\n source_num = scenario_info.get('Number', 1)\r\n # Directly defining earthquake ruptures\r\n if scenario_info['Generator'] == 'Simulation':\r\n # Collecting site locations\r\n lat = []\r\n lon = []\r\n for s in stations['Stations']:\r\n lat.append(s['Latitude'])\r\n lon.append(s['Longitude'])\r\n # Station list\r\n station_list = {\r\n 'Latitude': lat,\r\n 'Longitude': lon\r\n }\r\n # Track data\r\n try:\r\n track_file = scenario_info['Storm'].get('Track')\r\n df = pd.read_csv(os.path.join(data_dir, track_file), header = None, index_col = None)\r\n track = {\r\n 'Latitude': df.iloc[:, 0].values.tolist(),\r\n 'Longitude': df.iloc[:, 1].values.tolist()\r\n }\r\n except:\r\n print('CreateScenario: no storm track provided or file format not accepted.')\r\n # Save Lat_w.csv\r\n track_simu_file = scenario_info['Storm'].get('TrackSimu', None)\r\n if track_simu_file: \r\n df = pd.read_csv(os.path.join(data_dir, track_simu_file), header = None, index_col = None)\r\n track_simu = df.iloc[:, 0].values.tolist()\r\n else:\r\n track_simu = track['Latitude']\r\n # Reading Terrain info (if provided)\r\n terrain_file = scenario_info.get('Terrain', None)\r\n if terrain_file:\r\n with open(os.path.join(data_dir, terrain_file)) as f:\r\n terrain_data = json.load(f)\r\n else:\r\n terrain_data = []\r\n # Parsing storm properties\r\n param = []\r\n param.append(scenario_info['Storm']['Landfall']['Latitude'])\r\n param.append(scenario_info['Storm']['Landfall']['Longitude'])\r\n param.append(scenario_info['Storm']['LandingAngle'])\r\n param.append(scenario_info['Storm']['Pressure'])\r\n param.append(scenario_info['Storm']['Speed'])\r\n param.append(scenario_info['Storm']['Radius'])\r\n # Monte-Carlo\r\n #del_par = [0, 0, 0] # default\r\n # Parsing mesh configurations\r\n mesh_info = [1000., scenario_info['Mesh']['DivRad'], 1000000.]\r\n mesh_info.extend([0., scenario_info['Mesh']['DivDeg'], 360.])\r\n # Wind speed measuring height\r\n measure_height = event_info['IntensityMeasure']['MeasureHeight']\r\n # Saving results\r\n scenario_data = dict()\r\n for i in range(source_num):\r\n scenario_data.update({i: {\r\n 'Type': 'Wind',\r\n 'CycloneParam': param,\r\n 'StormTrack': track,\r\n 'StormMesh': mesh_info,\r\n 'Terrain': terrain_data,\r\n 'TrackSimu': track_simu,\r\n 'StationList': station_list,\r\n 'MeasureHeight': measure_height\r\n }})\r\n # return\r\n return scenario_data\r\n else:\r\n print('Currently only supporting Simulation generator.')\r\n","repo_name":"kuanshi/HazardSimulation","sub_path":"CreateScenario.py","file_name":"CreateScenario.py","file_ext":"py","file_size_in_byte":9159,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"}
+{"seq_id":"41487245267","text":"def breadth_first_search(graph, node):\n if node not in graph: return\n\n queue = [node]\n visited = [node]\n\n while len(queue) > 0:\n node = queue.pop(0)\n print(node, end = \" \")\n\n for neighbor in graph[node]:\n if neighbor not in visited:\n visited.append(neighbor)\n queue.append(neighbor)\n\n\n#key is node, list is neighbors\nsample_graph = {\n 0: [1, 2, 3, 5],\n 1: [2, 3, 5],\n 2: [4, 5],\n 3: [4],\n 4: [5],\n 5: []\n}\n\nbreadth_first_search(sample_graph, 0)","repo_name":"FOSS-UCSC/FOSSALGO","sub_path":"algorithms/gr-bfsrh/python3/breadth_first_search.py","file_name":"breadth_first_search.py","file_ext":"py","file_size_in_byte":482,"program_lang":"python","lang":"en","doc_type":"code","stars":58,"dataset":"github-code","pt":"77"}
+{"seq_id":"40967219846","text":"import pandas as pd\nfrom techminer.core import explode\n\n\ndef keywords_coverage(data, column, keywords_list):\n\n data = data[[column, \"ID\"]].dropna()\n num_documents = len(data)\n x = pd.DataFrame({column: keywords_list})\n x[\"Cum Coverage (Cum Num Documents)\"] = 0\n\n data[column] = data[column].map(lambda w: w.split(\";\"))\n data[\"SELECTED\"] = False\n keywords_list = [\" \".join(keyword.split(\" \")[:-1]) for keyword in keywords_list]\n x.index = keywords_list\n for keyword in keywords_list:\n data[\"SELECTED\"] = data.SELECTED | data[column].map(lambda w: keyword in w)\n selected = data[data.SELECTED][[\"ID\"]].drop_duplicates()\n x.loc[keyword, \"Cum Coverage (Cum Num Documents)\"] = len(selected)\n\n x[\"Cum Coverage (%)\"] = x[\"Cum Coverage (Cum Num Documents)\"].map(\n lambda w: str(round(100 * w / num_documents, 1)) + \" %\"\n )\n x = x.reset_index()\n x.pop(\"index\")\n return x\n","repo_name":"jdvelasq/techminer","sub_path":"src/core/keywords_coverage.py","file_name":"keywords_coverage.py","file_ext":"py","file_size_in_byte":932,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"77"}
+{"seq_id":"10678621473","text":"import matplotlib.pyplot as plt\nimport numpy as np\n\nimport sampler as randc\n\n############################################################################\n# RANDOM COUNTERS\n############################################################################\nbins = 17\nrc = randc.Sampler(np.ones(bins))\nrc.update(.3, 6)\nrc.update(.3, 7)\nrc.update(.5, 1)\nrc.update(.1, bins - 1)\nsampling = []\nfor i in range(10000):\n sampling.append(rc.sample())\nprint(rc.score_tree)\nplt.hist(sampling, bins=bins)\nplt.show()\n","repo_name":"remilepriol/sdca4crf","sub_path":"test/random_counters_test.py","file_name":"random_counters_test.py","file_ext":"py","file_size_in_byte":500,"program_lang":"python","lang":"de","doc_type":"code","stars":2,"dataset":"github-code","pt":"77"}
+{"seq_id":"34510334883","text":"\"\"\"\nWrite a Python program that accept a positive number and subtract from this number the sum of its digits and so on. Continues this operation until the number is positive.\n\"\"\"\n\nnum = int(input(\"Enter a number\"))\ndigits = 0\nnum1 = num\nwhile (num != 0):\n digi = num % 10\n digits += digi\n num = num // 10\n# print(digits)\nwhile num1 > 0:\n num1 = num1 - digits\n print(num1)\n","repo_name":"shraddhaa43/Python-Interview-Solutions-Repository","sub_path":"Challenge Zone Medium to Hard Level/Q23.py","file_name":"Q23.py","file_ext":"py","file_size_in_byte":387,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"}
+{"seq_id":"7212013091","text":"import sys\nimport os\n\npath_dir = os.getcwd()\nsys.path.insert(0, \"..\")\n#print(sys.path)\n\n\n\"\"\"Selenium Packages\"\"\"\nimport argparse\n\nfrom selenium.common.exceptions import ElementClickInterceptedException, NoSuchElementException, \\\n ElementNotInteractableException\nfrom selenium.webdriver import Keys\nfrom selenium.webdriver.support import expected_conditions\n\"\"\" \nPage Object classes \nCreated By: Prankur Garg\nDate: 25th May 2022\n\"\"\"\nfrom Cumulative_Reporting.BaseClass import BaseClass\nfrom PageObjectAdiosPlus.AdiosPlusDespatch import AdiosPlusDespatch\nfrom PageObjectAdiosPlus.AdiosPlusHome import AdiosPlusHome\nfrom PageObjectAdiosPlus.AdiosPlusResourcesPage import AdiosPlusResourcesPage\n\n\n\"\"\"Python Package\"\"\"\nimport time\nimport datetime\nimport logging\n\n\nclass AdiosPlusMain(BaseClass):\n def __init__(self):\n self._current_time = datetime.datetime.now()\n self._current_time = self._current_time.strftime(\"%Y%m%d\")\n self._log_name = \"Adios_run_\" + self._current_time + \".log\"\n self._logger = logging.getLogger(__name__)\n self._logger.setLevel(logging.INFO)\n self._handler = logging.FileHandler(self._log_name)\n self._logger.addHandler(self._handler)\n self._formatter = logging.Formatter('%(asctime)s-%(message)s')\n self._handler.setFormatter(self._formatter)\n\n def parse_commandline_args(self):\n parser = argparse.ArgumentParser(description=\"Need to enter test cycle, qual name and team name\")\n parser.add_argument('--test_cycle', type=str, help=\"Cumulative epack test cycle\", required=True)\n parser.add_argument('--qual', type=str, help=\"e.g. Cumulative_Epack_\", required=True)\n parser.add_argument('--team', type = str, help = \"Enter Team Name core/data-services\", required= True)\n if len(sys.argv[1:]) < 6 or '--test_cycle' not in sys.argv[1:] or '--qual' not in sys.argv[1:] or '--team' not in sys.argv[1:]:\n self.driver.close()\n self._logger.error(\"Either parameters were not provided or wrongly given\")\n parse_args = parser.parse_args()\n sys.exit(\"Required Parameters were not provided\")\n teams = [\"core\", \"data-services\"]\n parse_args = parser.parse_args()\n self._test_cycle = parse_args.test_cycle\n self._qual = parse_args.qual\n self._team = parse_args.team\n if self._team not in teams:\n self._logger.error(\"Team name entered is wrong. It should be either core or data-sustaining\")\n sys.exit(\"Team name entered is wrong. It should be either core or data-sustaining\")\n self._logger.info(f\"User has entered Test Cycle name as {self._test_cycle} and qual Name as {self._qual}\")\n\n\n def adios_plus_login(self):\n self.driver.get(\"http://adiosplus.cec.lab.emc.com/dashboard\")\n self._home_adios_plus = AdiosPlusHome(self.driver)\n user_name = self._home_adios_plus.get_user_name()\n user_name.send_keys(\"gargp6\")\n user_password = self._home_adios_plus.get_user_password()\n user_password.send_keys(\"\")\n submit_button = self._home_adios_plus.submit_button()\n submit_button.click()\n self._logger.info(\"User has entered username and password and logging into Adios Page\")\n\n def navigate_resources_page(self):\n element = self._home_adios_plus.navigate_resources()\n self.driver.execute_script(\"arguments[0].click();\", element)\n self._logger.info(\"We are resource tab of Adios Page\")\n resource_page = AdiosPlusResourcesPage(self.driver)\n time.sleep(30)\n print(AdiosPlusResourcesPage.search_plus)\n\n self.wait.until(expected_conditions.presence_of_element_located((AdiosPlusResourcesPage.search_plus)))\n\n self.wait.until(expected_conditions.element_to_be_clickable((AdiosPlusResourcesPage.search_plus)))\n\n data_services_suites = {\n \"Cumulative_Testing_P2\": \"10.60.153.253\",\n \"Cumulative_Testing_P3\": \"10.60.153.195\",\n \"Defrag_Shrink_SP_Cumulative\": \"10.60.154.6\",\n \"LREP_SP_Cumulative\" : \"10.60.154.51\",\n \"RDF_SP_Cumulative\" : \"10.60.155.123\",\n \"Cumulative_Testing_P1\" : \"10.60.153.202\",\n \"Long_Running_Tests_SP_Cumulative\": \"10.60.153.132\",\n \"Cumulative_Testing_P4\": \"10.60.153.195\"\n }\n\n core_suites = {\n \"ACS DataMobility\" : \"10.60.153.55\",\n \"ACT Platform\" : \"10.60.153.55\",\n \"ACT Config\" : \"10.60.153.55\",\n \"ACT Backend\" : \"10.60.153.55\",\n \"ACS_Enginuity Services\" : \"10.60.153.55\"\n }\n self._suites = {}\n if self._team == \"core\":\n self._suites = core_suites\n elif self._team == \"data-services\":\n self._suites = data_services_suites\n\n self._dict_value = [value for value in self._suites.values()]\n self._dict_keys = [value for value in self._suites.keys()]\n print(self._dict_value)\n print(self._dict_keys)\n for host in self._dict_value:\n try:\n search = resource_page.search_host()\n self.driver.execute_script(\"arguments[0].click()\", search)\n #search.click()\n except ElementClickInterceptedException:\n search = resource_page.search_host()\n search.click()\n time.sleep(2)\n self._logger.info(f\"We are adding {host} in resource tab of Adios Page\")\n self.wait.until(expected_conditions.presence_of_element_located((AdiosPlusResourcesPage.host_name)))\n try:\n host_name = resource_page.enter_host_name()\n host_name.send_keys(host)\n except NoSuchElementException as ex:\n self.driver.get_screenshot_as_file(\"Host_error.png\")\n host_name = resource_page.enter_host_name()\n host_name.send_keys(host)\n\n confirm = resource_page.confirm_button()\n confirm.click()\n time.sleep(2)\n self._logger.info(f\"Successfully added {host} in resource tab of Adios Page\")\n\n def navigate_dispatch(self):\n element = self._home_adios_plus.navigate_despatch()\n self.driver.execute_script(\"arguments[0].click();\", element)\n self._logger.info(f\" Now when we have resources added. Will add suites to Dispatch tab\")\n\n self.wait.until(expected_conditions.presence_of_element_located((AdiosPlusDespatch.search)))\n self.wait.until(expected_conditions.element_to_be_clickable((AdiosPlusDespatch.search)))\n\n despatch_page = AdiosPlusDespatch(self.driver)\n search_button = despatch_page.search_button()\n search_button.click()\n\n handles = self.driver.window_handles\n self.driver.switch_to.window(handles[0])\n self._logger.info(\"Navigated to dispatch page of Adios Page\")\n for suite in self._dict_keys:\n self.wait.until(expected_conditions.element_to_be_clickable((AdiosPlusDespatch.clear_filter)))\n clear_button = despatch_page.clear_filter_button()\n clear_button.click()\n\n suite_name = despatch_page.enter_suite_name()\n suite_name.send_keys(suite)\n\n search_suite = despatch_page.search_suite_button()\n search_suite.click()\n\n self._logger.info(f\"Adding suite {suite} to adios Page\")\n\n self.wait.until(expected_conditions.presence_of_element_located((AdiosPlusDespatch.select_suite)))\n time.sleep(4)\n\n select_suite = despatch_page.select_suite_checkbox()\n select_suite[1].click()\n\n self.wait.until(expected_conditions.element_to_be_clickable((AdiosPlusDespatch.right_arrow)))\n\n right_arrow = despatch_page.select_right_arrow()\n right_arrow.click()\n\n select_suites = despatch_page.select_all_suites()\n select_suites[2].click()\n\n add_suites = despatch_page.add_suites_button()\n add_suites.click()\n\n self._logger.info(f\"Successfully added all suites to View\")\n\n for key, value in self._suites.items():\n suite_search = despatch_page.search_suite_to_run()\n self._logger.info(f'Selected suite {key} to run on host {value}')\n suite_search.send_keys(key)\n\n try:\n add_button = despatch_page.suite_add_button()\n except ElementClickInterceptedException as ex:\n add_button = despatch_page.suite_add_button()\n add_button.click()\n\n try:\n suite_run = despatch_page.select_suite_to_despatch()\n suite_run.click()\n except ElementClickInterceptedException as ex:\n suite_run = despatch_page.select_suite_to_despatch()\n suite_run.click()\n self.wait.until(expected_conditions.presence_of_element_located((AdiosPlusDespatch.test_cycle)))\n\n test_cycle = despatch_page.test_cycle_input()\n\n test_cycle.send_keys(self._test_cycle)\n\n test_cycle.send_keys(Keys.ENTER)\n time.sleep(1)\n qual = despatch_page.qual_input()\n\n qual.send_keys(self._qual)\n\n qual.send_keys(Keys.ENTER)\n time.sleep(2)\n\n host = despatch_page.host_input()\n host.send_keys(value)\n host.send_keys(Keys.ENTER)\n self.wait.until(expected_conditions.presence_of_element_located((AdiosPlusDespatch.host_select_message)))\n\n message = despatch_page.get_host_message()\n message = message.text\n self._logger.info(f' Currently status of host is {message}')\n if \"Currently validating\" in message:\n time.sleep(5)\n message = despatch_page.get_host_message()\n message = message.text\n if \"ready to run\" in message:\n self._logger.info(f'Despatcher state of Host {value} is ready')\n time.sleep(8)\n box = despatch_page.select_symm()\n box.send_keys(\"OLKCK\")\n box.send_keys(Keys.ENTER)\n time.sleep(2)\n\n self.wait.until(expected_conditions.element_to_be_clickable((AdiosPlusDespatch.run)))\n run_button = despatch_page.run_button()\n run_button.click()\n\n handles = self.driver.window_handles\n self.driver.switch_to.window(handles[0])\n time.sleep(5)\n ok_button = despatch_page.ok_button_function()\n ok_button.click()\n time.sleep(20)\n self.wait.until(expected_conditions.presence_of_element_located((AdiosPlusDespatch.after_run_message)))\n\n self.driver.get_screenshot_as_file(f'{key}.png')\n time.sleep(30)\n\n search_field = despatch_page.search_suite_text_field()\n\n clear_suite = despatch_page.clear_search_suite()\n clear_suite.click()\n\n despatch_page.deselect_suite_link().click()\n time.sleep(4)\n\n else:\n self._logger.info(f'Despatcher state of Host {value} is not in ready state')\n print(\"host is not in ready state\")\n clear_suite = despatch_page.clear_search_suite()\n clear_suite.click()\n print(\"Clearing suite\")\n time.sleep(5)\n\n deselect_suite = despatch_page.deselect_suite_link()\n deselect_suite.click()\n time.sleep(4)\n continue\n self.driver.close()\n\n\nobj = AdiosPlusMain()\nobj.parse_commandline_args()\nobj.adios_plus_login()\nobj.navigate_resources_page()\nobj.navigate_dispatch()\n\n\n\n\n\n","repo_name":"gargprankur/Selenium_Projects","sub_path":"PageObjectAdiosPlus/AdiosPlusMain.py","file_name":"AdiosPlusMain.py","file_ext":"py","file_size_in_byte":12006,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"}
+{"seq_id":"11707904240","text":"import redis\nimport logging\nimport time\n\n\nSOCKET_TIMEOUT = 15000\nCONNECTION_MAX_ATTEMPTS = 5\n\n\ndef retry(times):\n def decorator(func):\n def wrapper(*args, **kwargs):\n attempt = 0\n while attempt < times:\n try:\n return func(*args, **kwargs)\n except Exception as e:\n logging.exception(\"Retry of %s faild: %s\" % (func.__name__, e))\n attempt += 1\n time.sleep(1)\n return func(*args, **kwargs)\n\n return wrapper\n\n return decorator\n\n\nclass Store(object):\n def __init__(self, host, port, socket_timeout=SOCKET_TIMEOUT):\n try:\n self.r = redis.Redis(host=host, port=port, socket_timeout=socket_timeout)\n except Exception as e:\n logging.exception(\"Can't init store: %s\" % e)\n\n @retry(CONNECTION_MAX_ATTEMPTS)\n def get(self, key):\n return self.r.get(key)\n\n @retry(CONNECTION_MAX_ATTEMPTS)\n def set(self, key, value, time):\n self.r.set(key, value, ex=time)\n\n def cache_get(self, key):\n try:\n return self.get(key)\n except Exception as e:\n logging.exception(\"Can't get from cache: %s\" % e)\n return None\n\n def cache_set(self, key, value, time):\n try:\n self.set(key, value, time)\n except Exception as e:\n logging.exception(\"Can't set to cache: %s\" % e)\n","repo_name":"tatiana-vakhrameeva/python-course","sub_path":"hw3-1/store.py","file_name":"store.py","file_ext":"py","file_size_in_byte":1447,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"}
+{"seq_id":"5329222844","text":"# Read in assignment input and store in string\nwith open('day3_input') as f:\n contents = f.read()\n\n# Split input lines into array\ninputArray = contents.splitlines()\npriority = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ'\nprioritySum = 0\n\n# Itterate through the list, split the input string in half and find the character that appears in each half\n# After identifing the common character, determine it's priority value based on the priority string above \n# the characters priority is its position in the priority string starting with a in position 1\n# Add all of the priorites together\nfor i in inputArray:\n half = int(len(i)/2)\n compartmentOne = i[0:half]\n compartmentTwo = i[half:len(i)]\n sameItem = ''\n for i in compartmentOne:\n position = -1\n position = compartmentTwo.find(i)\n if position != -1:\n sameItem = compartmentTwo[position]\n prioritySum += priority.find(sameItem)+1\n\n# Print the sum of all priorities\nprint(prioritySum)","repo_name":"davidlukemt/advent-of-code","sub_path":"2022/day3/day3_part1.py","file_name":"day3_part1.py","file_ext":"py","file_size_in_byte":995,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"}
+{"seq_id":"71032475768","text":"import copy\n\nimport torch\nimport torch.nn as nn\n\n\nclass WrapLayer(nn.Module):\n def __init__(self, layer, max_seq_len):\n super().__init__()\n self.layer = layer\n self.position_ids = nn.Parameter(\n torch.arange(max_seq_len).unsqueeze(0).view(-1, max_seq_len),\n requires_grad=False,\n )\n\n def forward(self, x):\n _, seq_len, h = x.shape\n return self.layer(x, position_ids=self.position_ids[:, :seq_len])[0]\n\n\nclass CastOutputToFloat(nn.Sequential):\n def forward(self, x):\n return super().forward(x).to(torch.float32)\n\n\nclass AdapterModel(nn.Module):\n def __init__(\n self,\n pretrained_model,\n max_seq_len,\n adapter_size,\n adapter_dropout=0.1,\n init_with_existing_tokens=False,\n ): # add cast to 8bit param\n super().__init__()\n # Copy token embedding layer from the pretrained model\n vocab_size = pretrained_model.config.vocab_size\n hidden_size = pretrained_model.config.hidden_size\n self.token_emb = nn.Embedding(vocab_size, hidden_size)\n self.token_emb.weight = copy.deepcopy(pretrained_model.gpt_neox.embed_in.weight)\n self.token_emb.weight.requires_grad = False\n pretrained_model.gpt_neox.embed_in = None\n\n # Initialize adapter from normal distribution\n self.adapter = nn.Parameter(\n torch.randn(adapter_size, hidden_size) * 0.002, requires_grad=True\n )\n self.adapter_dropout = nn.Dropout(adapter_dropout)\n\n # Freeze pretrained layers\n for param in pretrained_model.parameters():\n param.requires_grad = False\n if param.ndim == 1:\n # cast the small parameters (e.g. layernorm) to fp32 for stability\n param.data = param.data.to(torch.float32)\n\n # Make transformer layers sequential\n self.transformer_layers = nn.Sequential(\n *[\n WrapLayer(layer, max_seq_len)\n for layer in pretrained_model.gpt_neox.layers\n ]\n )\n\n # Output layers -- cast final output to fp32\n self.out = nn.Sequential(\n pretrained_model.gpt_neox.final_layer_norm,\n CastOutputToFloat(pretrained_model.embed_out),\n )\n\n def forward(self, x):\n bsz, _ = x.shape\n token_emb = self.token_emb(x) # bsz, inp_len, embed_dim\n adapter_emb = self.adapter_dropout(\n self.adapter.unsqueeze(0).repeat(bsz, 1, 1)\n ) # bsz, adapter_size, embed_dim\n seq = torch.cat([adapter_emb, token_emb], dim=1)\n seq = torch.utils.checkpoint.checkpoint_sequential(\n self.transformer_layers, 3, seq\n )\n return self.out(seq)\n","repo_name":"andersonbcdefg/instruct-pythia-ptuning","sub_path":"adapter.py","file_name":"adapter.py","file_ext":"py","file_size_in_byte":2741,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"}
+{"seq_id":"24060419146","text":"#%%\n# Dependencies and Setup\nimport requests\nimport gmaps\n\n# Import API key\nfrom config import g_key\n\n# %%\n# Set the parameters to search for a hotel in Paris.\nparams = {\n \"radius\": 5000,\n \"types\": \"lodging\",\n \"key\": g_key,\n \"location\": \"48.8566, 2.3522\"}\n# Use base URL to search for hotels in Paris.\nbase_url = \"https://maps.googleapis.com/maps/api/place/nearbysearch/json\"\n# Make request and get the JSON data from the search.\nhotels = requests.get(base_url, params=params).json()\n\n# %%\n# Iterate through the DataFrame.\nfor index, row in hotel_df.iterrows():\n # Get the latitude and longitude.\n lat = row[\"Lat\"]\n lng = row[\"Lng\"]\n\n # Add the latitude and longitude to the params dictionary as values to the location key.\n params[\"location\"] = f\"{lat},{lng}\"\n\n # Use the search term: \"lodging\" and our latitude and longitude.\n base_url = \"https://maps.googleapis.com/maps/api/place/nearbysearch/json\"\n # Make request and get the JSON data from the search.\n hotels = requests.get(base_url, params=params).json()\n # Grab the first hotel from the results and store the name.\n hotel_df.loc[index, \"Hotel Name\"] = hotels[\"results\"][0][\"name\"]\n\n# %%\n","repo_name":"ebskii52/World_Weather_Analysis","sub_path":"Google_Nearby_Search.py","file_name":"Google_Nearby_Search.py","file_ext":"py","file_size_in_byte":1191,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"}
+{"seq_id":"39242225005","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Sep 19 23:13:41 2020\n\n@author: Admin\n\"\"\"\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n#%%\n# Create a class for a block \nclass block(nn.Module):\n def __init__(self, in_channels, out_channels, identity_downsample = None, expansion = 1, stride = 1):\n super(block, self).__init__()\n self.expanded_outchannels = out_channels * expansion # Borrowing the idea of expansion factor from Resnet50-101-152\n \n self.conv1 = nn.Conv2d(in_channels, out_channels, kernel_size = 3, stride = stride, padding = 1)\n self.bn1 = nn.BatchNorm2d(out_channels)\n \n \n self.conv2 = nn.Conv2d(out_channels, self.expanded_outchannels, kernel_size = 3, stride = 1, padding = 1)\n self.bn2 = nn.BatchNorm2d(self.expanded_outchannels)\n \n self.identity_downsample = identity_downsample\n \n def forward(self, x):\n identity = x\n x = F.relu(self.bn1(self.conv1(x)))\n x = F.relu(self.bn2(self.conv2(x)))\n if self.identity_downsample is not None:\n identity = self.identity_downsample(identity)\n # print(\"identity.shape = \", identity.shape)\n # print(\"x.shape = \", x.shape)\n x += identity\n x = F.relu(x)\n return x\nclass ResNetCIFAR(nn.Module):\n def __init__(self,image_channels, num_classes, expansion, num_blocks_per_layer = 2):\n super(ResNetCIFAR, self).__init__()\n self.in_channels = 16 # meaning this is the first number of channels to upsample to from image_channels\n self.expansion = expansion\n self.num_blocks_per_layer = 2\n \n self.conv1 = nn.Conv2d(image_channels, 16, kernel_size = 3, stride = 1, padding = 1)\n self.bn1 = nn.BatchNorm2d(16) # \n \n # Resnet layers\n self.layerconv2 = self._make_layer(block, out_channels = 16, stride = 1)\n self.layerconv3 = self._make_layer(block, out_channels = 32, stride = 2)\n self.layerconv4 = self._make_layer(block, out_channels = 64, stride = 2)\n \n self.avgpool = nn.AdaptiveAvgPool2d((1,1))\n self.fc = nn.Linear(64 * self.expansion, num_classes)\n def forward(self, x):\n x = F.relu(self.bn1(self.conv1(x)))\n # print(\"Output shape after conv1: \", x.shape)\n x = self.layerconv2(x)\n x = self.layerconv3(x)\n x = self.layerconv4(x)\n \n x = self.avgpool(x)\n x = torch.flatten(x, 1)\n x = self.fc(x)\n \n return x\n def _make_layer(self, block, out_channels, stride):\n layers = []\n # Because any time this function _make_layer is called, this resulting layer will downsample the input.\n # Hence identity_downsample is always needed\n identity_downsample = nn.Sequential(\n nn.Conv2d(self.in_channels, out_channels * self.expansion, kernel_size = 1,\n stride = stride),\n nn.BatchNorm2d(out_channels * self.expansion)\n )\n # Creating first block for this layer\n layers.append(block(in_channels = self.in_channels, out_channels = out_channels, identity_downsample = identity_downsample, \\\n expansion = self.expansion, stride = stride))\n \n self.in_channels = out_channels * self.expansion\n \n # Creating subsequent blocks for this layer\n # For subsequent blocks, the input dimensions match the output dimensions, so no identity_downsample is needed,\n # meaning only perform simple addition of the input and the output\n for i in range(self.num_blocks_per_layer - 1):\n layers.append(block(self.in_channels, out_channels, expansion = self.expansion))\n \n return nn.Sequential(*layers)\n \n# resnet = ResNetCIFAR(image_channels = 3, num_classes = 100, expansion = 3, num_blocks_per_layer = 2)\n# x = torch.randn(3, 3, 32, 32)\n# y = resnet(x)","repo_name":"giaphattram/ResNet-CIFAR100-ImageClassification","sub_path":"model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":4045,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"}
+{"seq_id":"35172967962","text":"#Name: Min Jung\r\n#Due Date:12/05/2021\r\n#Final Project: Lab 2\r\n\r\nclass Triangle:\r\n def __init__(self, base1, height1, base2, height2):\r\n self.base1 =base1\r\n self.height1= height1\r\n self.base2= base2\r\n self.height2 = height2\r\n def compare_area(self):\r\n self.area1 = 0.5 * self.base1 * self.height1\r\n self.area2 = 0.5 * self.base2 * self.height2\r\n if self.area1 > self.area2:\r\n print(f'Base:{self.base1:.2f}')\r\n print(f'Height:{self.height1:.2f}')\r\n print(f'Area:{self.area1:.2f}')\r\n elif self.area1 == self.area2:\r\n print(\"Both Triangles have the same area!\")\r\n print(f'Triangle 1 Area: {self.area1:.2f}')\r\n print(f'Triangle 2 Area: {self.area2:.2f}')\r\n else:\r\n print(f'Base:{self.base2:.2f}')\r\n print(f'Height:{self.height2:.2f}')\r\n print(f'Area:{self.area2:.2f}')\r\ndef main():\r\n base1= float(input())\r\n height1= float(input())\r\n base2= float(input())\r\n height2= float(input())\r\n \r\n area = Triangle(base1,height1,base2,height2)\r\n print(\"\\nTriangle with larger area:\")\r\n area.compare_area()\r\nmain()","repo_name":"minjung1004/CS-2520","sub_path":"Final Project/lab2.py","file_name":"lab2.py","file_ext":"py","file_size_in_byte":1188,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"}
+{"seq_id":"43171645302","text":"#!/usr/bin/env python3\n\n'''\n 字段:学生姓名 班级 Linux PHP Python\n stu1 = {'id':1,'sname':'tom','bj':'1','Linux':100,}\n'''\n\nstu_info = []\nid = 0\n\ndef menu():\n print('''\n | ----------学生成绩系统--------|\n | |\n | ==========主功能菜单==========|\n | |\n | | \n | 1.录入学生成绩 |\n | 2.查询学生成绩 |\n | 3.删除学生的成绩 |\n | 4.修改学生的成绩 |\n | 5.展示所有学生成绩 |\n | 0.退出系统 |\n | |\n |-------------------------------|\n\n\n\n\n\n ''')\n\n#录入学生成绩\ndef add_info():\n while True:\n sname = input('输入学生的姓名:')\n if not sname:\n print('学生姓名不能为空')\n continue\n bj = input('输入学生的班级:')\n Linux = input('输入Linux成绩:')\n PHP = input('输入PHP成绩:')\n Python = input('输入Python成绩:')\n\n global id \n id += 1\n stu = {'id':id,'sname':sname,'bj':bj,'Linux':Linux,'PHP':PHP,'Python':Python}\n stu_info.append(stu)\n\n print(stu_info)\n key = input('是否继续录入y/n?')\n if key == 'y':\n continue\n else:\n break\n\n\n#显示所有学生的成绩\ndef show():\n '''\n 遍历列表,获取到每个学生的信息\n '''\n format_title = '{:^6}{:^12}\\t{:^12}{:^12}{:^12}{:^12}'\n format_data = '{:^6}{:^13}\\t{:^15}{:^13}{:^15}{:^14}'\n print(format_title.format('ID','姓名','班级','Linux成绩','PHP成绩','Python成绩'))\n\n for i in stu_info:\n id = i.get('id')\n sname = i.get('sname')\n bj = i.get('bj')\n Linux = i.get('Linux')\n PHP = i.get('PHP')\n Python = i.get('Python')\n print(format_data.format(id,sname,bj,Linux,PHP,Python))\n\ndef search():\n '''\n 根据名字查询学生的成绩\n\n '''\n \n format_title = '{:^6}{:^12}\\t{:^12}{:^12}{:^12}{:^12}'\n format_data = '{:^6}{:^13}\\t{:^15}{:^13}{:^15}{:^14}'\n sname = input('输入要查询学生的姓名:')\n\n print(format_title.format('ID','姓名','班级','Linux成绩','PHP成绩','Python成绩'))\n \n #提取到所有学生的名字\n name_list = [stu_info[i].get('sname') for i in range(len(stu_info))]\n if sname in name_list:\n for i in stu_info:\n if sname == i.get('sname'):\n id = i.get('id')\n sname = i.get('sanme')\n bj = i.get('bj')\n Linux = i.get('Linux')\n PHP = i.get('PHP')\n Python = i.get('Python')\n print(id,sname,bj,Linux,PHP,Python)\n else:\n print('学生名字不存在')\n\n\ndef delete():\n global id\n sname = input('请输入要删除学生得名字:')\n\n if stu_info:\n for i in stu_info:\n if i['sname'] == sname:\n stu_info.remove(i) #删除该字段\n print('删除成功')\n #修改剩下学生的id id - 1\n #for i,v in enumerate(stu_info):\n for i in range(len(stu_info)):\n id = i + 1\n stu_info[i]['id'] = id\n if not stu_info:\n id = 0\n show()\n\n#修改学生信息,只修改学生的成绩\ndef modify():\n sname = input()\n if stu_info: \n for i in stu_info:\n if key == '1':\n Linux = input('')\n i['Linux'] = Linux\n else:\n print('学生不存在')\n\n\ndef main():\n while True: \n menu()\n key = input('请选择功能:')\n if key == '1':\n add_info()\n if key == '2':\n search()\n if key == '3':\n delete()\n if key == '5':\n show()\n\n if key == '0':\n break\n\n\nmain()\n","repo_name":"sunyuanheng/000404","sub_path":"student.py","file_name":"student.py","file_ext":"py","file_size_in_byte":3974,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"}
+{"seq_id":"38932665298","text":"from django.http import HttpResponse\nimport datetime\nfrom django.template import Template , Context\nfrom django.template.loader import get_template\nfrom django.shortcuts import render\n\n\nclass Persona(object):\n def __init__(self,nombre,apellido,edad,direccion):\n self.nombre = nombre\n self.apellido = apellido\n self.edad = edad\n self.direccion = direccion\n\n\n\n\ndef index(request):\n\n documento = ('',\n '',\n '',\n '',\n 'MI PRIMERA PAGINA WEB DJANGO',\n '',\n '
Hola esta es mi primera pagina web con django
',\n '
BIENVENIDOS
',\n '',\n '')\n\n return HttpResponse(documento)\n\n\ndef saludo(request):\n\n\n Datos = Persona(\"CAMILA\",\"LEAL\",34,\"GUACARA\")\n\n temas = ['administracion','matematicas','ingles','idiomas','php']\n\n #variables\n nombre = 'anderson'\n apellido = 'garcia'\n fecha = datetime.datetime.now()\n\n #-------------------------------------------------------------------------------------------------------------------#\n # doc_externo = open(\"D:\\PROYECTOS DJANGO\\proyecto1\\proyecto1\\Templates\\miPlantilla.html\")\n\n #creamos objeto tipo template\n\n # plt = Template(doc_externo.read())\n\n #cerramos el doc_externo\n\n # doc_externo.close()\n\n #creamos contexto\n\n # ctx = Context({\"nombre_persona\":Datos.nombre,\"apellido_persona\":Datos.apellido,\"fecha\":fecha,\"temas\":temas})\n\n #renderizamos\n\n # documento = ctx.render(ctx)\n # -----------------------------------------------------------------------------------------------------------------#\n\n\n\n ####### Cargar templates con Settings #######\n\n # TEMPLATES = get_template('miPlantilla.html')\n\n # DOCUMENTO_TEMPLATES = TEMPLATES.render({\"nombre_persona\":Datos.nombre,\"apellido_persona\":Datos.apellido,\"fecha\":fecha,\"temas\":temas})\n\n # return HttpResponse(DOCUMENTO_TEMPLATES)\n\n ####### Cargar templates con Shortcut Render #######\n\n return render(request,\"miPlantilla.html\",{\"nombre_persona\":Datos.nombre,\"apellido_persona\":Datos.apellido,\"fecha\":fecha,\"temas\":temas})\n\n\ndef damefecha(request):\n fecha = datetime.datetime.now()\n documento = '''\n \n \n \n MI PRIMERA PAGINA WEB DJANGO\n \n
Hola esta es la fecha actual: %s
\n
BIENVENIDOS
\n '\n ''' %fecha\n\n return HttpResponse(documento)\n\n\ndef calculaedad(request , ano):\n\n edadactual = 38\n periodo = ano - 2022\n\n edadfutura = edadactual+periodo\n\n documento ='''\n \n \n \n MI PRIMERA PAGINA WEB DJANGO\n \n
En el año: %s tendras %s años
\n
BIENVENIDOS
\n '\n ''' %(ano,edadfutura)\n\n return HttpResponse(documento)\n\n\ndef calculaedadparametros(request , edad, agno):\n\n periodo = agno - 2022\n\n edadfutura = edad+periodo\n\n documento ='''\n \n \n \n MI PRIMERA PAGINA WEB DJANGO\n \n
En el año: %s tendras %s años
\n
BIENVENIDOS
\n '\n ''' %(agno,edadfutura)\n\n return HttpResponse(documento)\n\n\n\n\ndef cursoC(request):\n fecha = datetime.datetime.now()\n\n return render(request,\"CursoC.html\", {\"fecha\":fecha})\n\ndef cursoCss(request):\n\n return render(request,\"Cursocss.html\")\n\n\n\n","repo_name":"ARSYSTEMAS/CURSO-DJANGO","sub_path":"proyecto1/proyecto1/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4862,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"}
+{"seq_id":"7278418357","text":"import random\r\nnum=random.randint(1,10)\r\ncondition=True\r\nwhile condition:\r\n guess=int(input('請輸入猜的數字(介於0~10之間)'))\r\n if(guess<0 or guess>10):\r\n print('請輸入猜的數字(介於0~10之間)')\r\n elif(num==guess):\r\n print('你猜對了')\r\n condition=False\r\n else:\r\n print('你猜錯了')","repo_name":"charlie14516/AE402-python","sub_path":"猜數字_2.py","file_name":"猜數字_2.py","file_ext":"py","file_size_in_byte":347,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"}
+{"seq_id":"14706245419","text":"\n#https://likegeeks.com/python-gui-examples-tkinter-tutorial/\n\nfrom tkinter import *\n\nwindow = Tk()\n\nwindow.title(\"Some guji\")\n\nwindow.geometry('350x200')\n\nlbl = Label(window,text = \"Heloo there\")#, font = (\"Arial Bold\", 50))\n\nlbl.grid(column=0,row=0)\n\ntxt = Entry(window, width = 10, state = 'normal') # disabled, readonly\ntxt.grid(column=1, row=0)\ntxt.focus()\n\ndef clicked():\n res = \"Welcome to \" + txt.get()\n lbl.configure(text = res)\n\nbtn = Button(window, text= \"Clikc me\", bg = \"green\", fg = \"grey\", command = clicked)\nbtn.grid(column=2,row=0)\n\n\n\nwindow.mainloop()\n\n\n\n###########################################################################################\n\n#Add a combobox widget\n\nfrom tkinter import *\n\nfrom tkinter.ttk import *\n\nwindow = Tk()\n\nwindow.title(\"Welcome to LikeGeeks app\")\n\nwindow.geometry('350x200')\n\ncombo = Combobox(window)\n\ncombo['values'] = (1,2,3,4,5,\"Text\")\n\ncombo.current(1) # set the selected item\n\ncombo.grid(column=0, row=0)\n\nlbl = Label(window,text = combo.get())#, font = (\"Arial Bold\", 50))\n\nlbl.grid(column=1,row=0)\n\nwindow.mainloop()\n\n###########################################################################################\n\n# Add a Checkbutton widget (Tkinter checkbox)\n\nfrom tkinter import *\n\nfrom tkinter.ttk import *\n\nwindow = Tk()\n\nwindow.title(\"Welcome to LikeGeeks app\")\n\nwindow.geometry('350x200')\n\nchk_state = BooleanVar()\n\nchk_state.set(True) # set check state\n\nchk = Checkbutton(window, text='Choose', var=chk_state)\n\nchk.grid(column=0, row=0)\n\nwindow.mainloop()\n\n###########################################################################################\n\n# Add radio buttons widgets\n\nfrom tkinter import *\n\nfrom tkinter.ttk import *\n\nwindow = Tk()\n\nwindow.title(\"Welcome to LikeGeeks app\")\n\nselected = IntVar()\n\nrad1 = Radiobutton(window, text='First', value=1, variable=selected)\n\nrad2 = Radiobutton(window, text='Second', value=2, variable=selected)\n\nrad3 = Radiobutton(window, text='Third', value=3, variable=selected)\n\n\ndef clicked():\n print(selected.get())\n\n\nbtn = Button(window, text=\"Click Me\", command=clicked)\n\nrad1.grid(column=0, row=0)\n\nrad2.grid(column=1, row=0)\n\nrad3.grid(column=2, row=0)\n\nbtn.grid(column=3, row=0)\n\nwindow.mainloop()\n\n\n\n# Create a MessageBox\n\nfrom tkinter import *\n\nwindow = Tk()\n\nwindow.title(\"Welcome to LikeGeeks app\")\n\nwindow.geometry('350x200')\n\n\n#def clicked():\n #messagebox.showinfo('Message title', 'Message content')\n #messagebox.showwarning('Message title', 'Message content')\n #messagebox.showerror('Message title', 'Message content')\n\n\n # res = messagebox.askquestion('Message title', 'Message content')\n # res = messagebox.askyesno('Message title', 'Message content')\n # res = messagebox.askyesnocancel('Message title', 'Message content')\n # res = messagebox.askokcancel('Message title', 'Message content')\n # res = messagebox.askretrycancel('Message title', 'Message content')\n\ndef clicked():\n\n return messagebox.askyesnocancel('Message title', 'Message content')\n\nbtn = Button(window, text='Click here', command=clicked)\n\nbtn.grid(column=0, row=0)\n\nlbl = Label(window,text = clicked())#, font = (\"Arial Bold\", 50))\n\nlbl.grid(column=1,row=0)\n\nwindow.mainloop()\n\n###########################################################################################\n\n# Add a SpinBox (numbers widget)\n\nfrom tkinter import *\n\nwindow = Tk()\n\nwindow.title(\"Welcome to LikeGeeks app\")\n\nwindow.geometry('350x200')\n\nspin = Spinbox(window, from_=0, to=100, width=5)\n\nspin.grid(column=0, row=0)\n\nwindow.mainloop()\n\n###########################################################################################\n\n# Progress Bar\n\n\nfrom tkinter import *\n\nfrom tkinter.ttk import Progressbar\n\nfrom tkinter import ttk\n\nwindow = Tk()\n\nwindow.title(\"Welcome to LikeGeeks app\")\n\nwindow.geometry('350x200')\n\nstyle = ttk.Style()\n\nstyle.theme_use('default')\n\nstyle.configure(\"black.Horizontal.TProgressbar\", background='black')\n\nbar = Progressbar(window, length=200, style='black.Horizontal.TProgressbar')\n\nbar['value'] = 70\n\nbar.grid(column=0, row=0)\n\nwindow.mainloop()\n\n###########################################################################################\n\n# Add a Menu bar\n\nfrom tkinter import *\n\nfrom tkinter import Menu\n\nwindow = Tk()\n\nwindow.title(\"Welcome to LikeGeeks app\")\n\nmenu = Menu(window)\n\nnew_item = Menu(menu)\n\nnew_item.add_command(label='New')\n\nnew_item.add_separator()\n\nnew_item.add_command(label='Edit')\n\nnew_item2 = Menu(menu)\n\nnew_item2.add_command(label='New2')\n\nnew_item2.add_separator()\n\nnew_item2.add_command(label='Edit2')\n\nmenu.add_cascade(label='File', menu=new_item)\nmenu.add_cascade(label='Options', menu=new_item2)\n\nwindow.config(menu=menu)\n\nwindow.mainloop()\n\n###########################################################################################\n\n# Add Widgets\n\nfrom tkinter import *\n\nfrom tkinter import ttk\n\nwindow = Tk()\n\nwindow.title(\"Welcome to LikeGeeks app\")\n\ntab_control = ttk.Notebook(window)\n\ntab1 = ttk.Frame(tab_control)\n\ntab2 = ttk.Frame(tab_control)\n\ntab_control.add(tab1, text='First')\n\ntab_control.add(tab2, text='Second')\n\nlbl1 = Label(tab1, text='label1')\n\nlbl1.grid(column=0, row=0)\n\nlbl2 = Label(tab2, text='label2')\n\nlbl2.grid(column=0, row=0)\n\ntab_control.pack(expand=1, fill='both')\n\nwindow.mainloop()","repo_name":"macicekm/PyCourses","sub_path":"Tkinter2.py","file_name":"Tkinter2.py","file_ext":"py","file_size_in_byte":5270,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"}
+{"seq_id":"74933734967","text":"from sprite_object import *\nfrom npc import *\nfrom map import *\nimport random\nimport math\n\nclass ObjectHandler:\n def __init__(self, game):\n # Sprite variables\n self.npc_sprite_path = 'resources/sprites/npc/'\n self.static_sprite_path = 'resources/sprites/static_sprites/'\n self.anim_sprite_path = 'resources/sprites/animated_sprites/'\n\n self.game = game\n self.sprite_list = []\n self.npc_list = []\n self.alive_npc_list = []\n self.npc_positions = {}\n self.lastRespawned = pg.time.get_ticks()\n self.killed = 0\n\n self.gameMap = Map(game)\n self.map_size = self.gameMap.get_size()\n\n self.hpRestored = False\n self.dmgIncreased = False\n\n # Sprites on the Map\n self.add_sprite(SpriteObject(game))\n for i in range(0, self.randomNum(10, 30)):\n newX = self.randomNum(0, self.map_size[0])\n newY = self.randomNum(0, self.map_size[1])\n self.add_sprite(AnimatedSprite(game, pos=(newX - 0.5, newY - 0.5)))\n\n # Little bit more interesting spawning, but also more problematic\n print(\"MAP Size X: \" + str(self.map_size[0]) + \" | Y: \" + str(self.map_size[1]) + \" | Empty: \" + str(self.gameMap.world_empty_space))\n\n cloRangeNPC = 0\n while cloRangeNPC < 10:\n newX = self.randomNum(4, math.floor(self.map_size[0] / 4))\n newY = self.randomNum(4, math.floor(self.map_size[1] / 6))\n if not self.gameMap.isWall(newX, newY):\n self.add_npc(NPC(self.game, pos=(float(newX + 0.5), float(newY + 0.5))))\n cloRangeNPC = cloRangeNPC + 1\n\n midRangeNPC = 0\n while midRangeNPC < 60:\n newX = self.randomNum(math.floor(self.map_size[0] / 4) + 1, math.floor(self.map_size[0] / 2))\n newY = self.randomNum(math.floor(self.map_size[1] / 6) + 1, math.floor(self.map_size[1] / 3))\n if not self.gameMap.isWall(newX, newY):\n self.add_npc(NPC(self.game, pos=(float(newX + 0.5), float(newY + 0.5))))\n midRangeNPC = midRangeNPC + 1\n\n farRangeNPC = 0\n while farRangeNPC < 40:\n newX = self.randomNum(math.floor(self.map_size[0] / 2) + 1, self.map_size[0])\n newY = self.randomNum(math.floor(self.map_size[1] / 3) + 1, self.map_size[1])\n if not self.gameMap.isWall(newX, newY):\n self.add_npc(NPC(self.game, pos=(float(newX + 0.5), float(newY + 0.5))))\n farRangeNPC = farRangeNPC + 1\n\n def killReward(self):\n reward = self.killed\n if reward > 15 and not self.hpRestored:\n self.game.player.set_health(200)\n self.game.sound.hpHealed.play()\n self.hpRestored = True\n\n if reward > 20 and not self.dmgIncreased:\n self.game.weapon.set_damage(100)\n self.game.sound.dmgIncrease.play()\n self.dmgIncreased = True\n\n def update(self):\n self.killReward()\n self.npc_positions = {npc.map_pos for npc in self.npc_list if npc.alive}\n [sprite.update() for sprite in self.sprite_list]\n for npc in self.npc_list:\n npc.update()\n\n for npc in self.alive_npc_list:\n if not npc.isAlive():\n self.alive_npc_list.pop(self.alive_npc_list.index(npc))\n self.killed = self.killed + 1\n\n time_now = pg.time.get_ticks()\n if time_now - self.lastRespawned > 15000:\n while True:\n newX = self.randomNum(0, self.map_size[0])\n newY = self.randomNum(0, self.map_size[1])\n if not self.gameMap.isWall(newX, newY):\n self.add_npc(NPC(self.game, pos=(float(newX + 0.5), float(newY + 0.5))))\n break\n self.lastRespawned = time_now\n\n def add_npc(self, npc):\n self.npc_list.append(npc)\n self.alive_npc_list.append(npc)\n\n def add_sprite(self, sprite):\n self.sprite_list.append(sprite)\n\n def randomNum(self, minNum, maxNum):\n return random.randint(minNum, maxNum)\n","repo_name":"DomasBar/FinalProjectGame","sub_path":"object_handler.py","file_name":"object_handler.py","file_ext":"py","file_size_in_byte":4095,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"77"}
+{"seq_id":"74148229687","text":"# -*- coding: UTF-8 -*-\nclass LRUCache:\n\n def __init__(self, capacity):\n \"\"\"\n 146.LRU缓存机制(review)\n acity: int\n \"\"\"\n self.hash = {}\n self.cur = capacity\n self.list = []\n\n def get(self, key):\n \"\"\"\n :type key: int\n :rtype: int\n \"\"\"\n if key in self.list:\n self.list.remove(key)\n self.list.insert(0, key)\n return self.hash.get(key)\n else:\n return -1\n\n def put(self, key, value):\n \"\"\"\n :type key: int\n :type value: int\n :rtype: void\n \"\"\"\n if self.hash.get(key):\n self.list.remove(key)\n self.list.insert(0, key)\n elif self.cur == 0: # cur减少到0的时候就开始往外弹过期的\n self.hash.pop(self.list.pop())\n self.list.insert(0, key)\n else:\n self.cur -= 1 # cur不为0就减1\n self.list.insert(0, key)\n\n self.hash[key] = value\n\n\nif __name__ == '__main__':\n cache = LRUCache(1)\n cache.put(2, 1)\n cache.put(1, 1)\n cache.put(2, 3)\n cache.put(4, 1)\n print(cache.get(1))\n","repo_name":"fuyao-w/PYAlgorithmsAndDataStructures","sub_path":"dataStructure/LRUCache.py","file_name":"LRUCache.py","file_ext":"py","file_size_in_byte":1169,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"}
+{"seq_id":"74947974327","text":"\"\"\"\nOpenCV resize\ncomparing various methods of resize interpolation\n\"\"\"\nfrom __future__ import print_function\nimport argparse\n\nimport os\nimport cv2\nimport numpy as np\n\n\nif __name__ == '__main__':\n # Parse\n parser = argparse.ArgumentParser(description='OpenCV resize comparison')\n parser.add_argument('--input', '-i', default=None, help='input file path')\n parser.add_argument('--output', '-o', default=None, help='output folder directory')\n args = parser.parse_args()\n\n filepath = os.path.dirname(os.path.realpath(__file__))\n\n if args.input is not None:\n photo_file_path = args.input\n else:\n photo_file_path = os.path.join(filepath, '../../assets/compare/0/photo0_xinput.jpg')\n\n if args.output is not None:\n output_dir = args.output\n else:\n output_dir = os.path.join(filepath, '../../assets/compare/0')\n\n input_img = cv2.imread(photo_file_path, cv2.IMREAD_COLOR)\n input_image_height = input_img.shape[0]\n input_image_width = input_img.shape[1]\n output_image_height = 2 * input_image_height\n output_image_width = 2 * input_image_width\n\n scaled_input_img = cv2.resize(input_img, (output_image_width, output_image_height), interpolation=cv2.INTER_NEAREST)\n cv2.imwrite(os.path.join(output_dir, 'nearest.jpg'), scaled_input_img)\n scaled_input_img = cv2.resize(input_img, (output_image_width, output_image_height), interpolation=cv2.INTER_LINEAR)\n cv2.imwrite(os.path.join(output_dir, 'linear.jpg'), scaled_input_img)\n scaled_input_img = cv2.resize(input_img, (output_image_width, output_image_height), interpolation=cv2.INTER_AREA)\n cv2.imwrite(os.path.join(output_dir, 'area.jpg'), scaled_input_img)\n scaled_input_img = cv2.resize(input_img, (output_image_width, output_image_height), interpolation=cv2.INTER_CUBIC)\n cv2.imwrite(os.path.join(output_dir, 'cubic.jpg'), scaled_input_img)\n scaled_input_img = cv2.resize(input_img, (output_image_width, output_image_height), interpolation=cv2.INTER_LANCZOS4)\n cv2.imwrite(os.path.join(output_dir, 'lanczos.jpg'), scaled_input_img)\n\n\n","repo_name":"corochann/SeRanet","sub_path":"src/tools/opencv_resize.py","file_name":"opencv_resize.py","file_ext":"py","file_size_in_byte":2079,"program_lang":"python","lang":"en","doc_type":"code","stars":82,"dataset":"github-code","pt":"77"}
+{"seq_id":"23567931321","text":"import pandas as pd\nimport numpy as np\n\n# import requests\n# from urllib3 import request\n# import json\nimport sqlalchemy as sa\n\n# import psycopg2\n\nfrom io import StringIO\nimport csv\n\nfrom win10toast import ToastNotifier\n\nimport road_index_calculations as calc\n\ntoast = ToastNotifier()\ntoast.show_toast(\n \"SCRIPT RUNNING\",\n \"Inserting records from FulcrumApp\",\n duration=10,\n)\n\nTABLE = \"road_visual_assessment_view\"\nCREATED_TABLE = \"road_visual_assessment_created\"\n\nSCHEMA = \"assessment\"\n\nCSV = r\"https://web.fulcrumapp.com/shares/48fc49435c5a0199.csv\"\nCSV_ANCILLARY = (\n r\"https://web.fulcrumapp.com/shares/48fc49435c5a0199.csv?child=ancillary_assets\"\n)\n\nJSON = r\"https://web.fulcrumapp.com/shares/48fc49435c5a0199.json\"\n\nGEOJSON = r\"https://web.fulcrumapp.com/shares/48fc49435c5a0199.geojson\"\nGEOJSON_ANCILLARY = (\n r\"https://web.fulcrumapp.com/shares/48fc49435c5a0199.geojson?child=ancillary_assets\"\n)\n\nDB_NAME = \"wc_asset_management\"\nDB_USER = \"postgres\"\nDB_PASS = \"post@dmin100!\"\nDB_HOST = \"10.73.1.2\"\nDB_PORT = \"5436\"\n\nENGINE_URL = sa.engine.URL.create(\n \"postgresql\",\n username=DB_USER,\n password=DB_PASS,\n host=DB_HOST,\n port=DB_PORT,\n database=DB_NAME,\n)\n\nENGINE = sa.create_engine(\n ENGINE_URL\n)\n\nPROJECT = 'ODM RRAMS 2021 - 2023'\n\ndef get_int_columns():\n cols_qry = \"\"\"select column_name\n from information_schema.columns\n where table_schema = 'assessment' and table_name = 'road_visual_assessment' \n and data_type in ('integer', 'smallint', 'bigint');\"\"\"\n cols = pd.read_sql_query(cols_qry, ENGINE)\n cols = list(cols['column_name'])\n return cols\n\ndef psql_insert_copy(table, conn, keys, data_iter):\n \"\"\"\n Execute SQL statement inserting data\n\n Parameters\n ----------\n table : pandas.io.sql.SQLTable\n conn : sqlalchemy.engine.Engine or sqlalchemy.engine.Connection\n keys : list of str\n Column names\n data_iter : Iterable that iterates the values to be inserted\n \"\"\"\n # gets a DBAPI connection that can provide a cursor\n dbapi_conn = conn.connection\n with dbapi_conn.cursor() as cur:\n s_buf = StringIO()\n writer = csv.writer(s_buf)\n writer.writerows(data_iter)\n s_buf.seek(0)\n\n columns = \", \".join('\"{}\"'.format(k) for k in keys)\n if table.schema:\n table_name = \"{}.{}\".format(table.schema, table.name)\n else:\n table_name = table.name\n\n sql = \"COPY {} ({}) FROM STDIN WITH CSV\".format(table_name, columns)\n cur.copy_expert(sql=sql, file=s_buf)\n\ndef main():\n try:\n\n df = pd.read_csv(CSV, low_memory=False)\n ancillary_data = pd.read_csv(CSV_ANCILLARY, low_memory=False)\n\n try:\n df.rename(columns = {'project':'project_name', 'kerbs': 'kerbs_degree'}, inplace = True)\n except:\n pass\n df['kerbs_degree'].loc[df['kerbs_degree']=='N'] = np.nan\n\n cols = pd.read_sql_query(\"select * from assessment.road_visual_assessment limit 1;\", ENGINE)\n cols = list(cols.columns)\n\n int_cols = get_int_columns()\n int_cols = [i for i in int_cols if i in df.columns]\n\n inspected = df[df[\"status\"] == \"inspected\"]\n inspected[\"segment_id\"] = inspected[\"asset_id\"]\n \n # inspected = calc.main(inspected)\n\n created = df[df[\"status\"] == \"created\"]\n created[\"segment_id\"] = created[\"fulcrum_id\"]\n\n # created = calc.main(created)\n\n inspected.drop('asset_id', axis=1, inplace=True)\n\n inspected[int_cols] = inspected[int_cols].astype(int).fillna(0)\n created[int_cols] = created[int_cols].astype(int).fillna(0)\n\n inspected = inspected[inspected.columns.intersection(cols)]\n created = created[created.columns.intersection(cols)]\n\n inspected.to_sql(\n TABLE,\n ENGINE,\n schema=SCHEMA,\n if_exists=\"append\",\n index=False,\n method=psql_insert_copy,\n )\n created.to_sql(\n TABLE,\n ENGINE,\n schema=SCHEMA,\n if_exists=\"append\",\n index=False,\n method=psql_insert_copy,\n )\n\n # try:\n # conn = psycopg2.connect(\n # dbname=\"asset_management_master\",\n # user=\"postgres\",\n # password=\"$admin\",\n # host=\"localhost\",\n # port=5432,\n # )\n # conn.set_session(autocommit=True)\n # cur = conn.cursor()\n # cur.callproc(\n # \"assessment.rva_indices\",\n # )\n # except (Exception, psycopg2.DatabaseError) as e:\n # print(e)\n # finally:\n # if conn is not None:\n # conn.close()\n\n # try:\n # conn = psycopg2.connect(\n # dbname=\"asset_management_master\",\n # user=\"postgres\",\n # password=\"$admin\",\n # host=\"localhost\",\n # port=5432,\n # )\n # conn.set_session(autocommit=True)\n # cur = conn.cursor()\n # cur.callproc(\n # \"assessment.rva_indices_2\",\n # )\n # except (Exception, psycopg2.DatabaseError) as e:\n # print(e)\n # finally:\n # if conn is not None:\n # conn.close()\n\n toast.show_toast(\n \"SCRIPT RAN SUCCESSFULLY\",\n \"Inserting records from FulcrumApp\",\n duration=10,\n )\n except:\n toast.show_toast(\n \"SOMETHING WENT WRONG - PLEASE CHECK IMPORT FUNCTION\",\n \"Inserting records from FulcrumApp\",\n duration=10,\n )\n\n\nif __name__ == '__main__':\n main()","repo_name":"brandtosaurus/inspections","sub_path":"import_inspection_from_fulcrum.py","file_name":"import_inspection_from_fulcrum.py","file_ext":"py","file_size_in_byte":5578,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"}
+{"seq_id":"31142554303","text":"from django.apps import apps\nfrom django.shortcuts import render, redirect\nfrom django.views import View\n\n\nclass BaseGenericView(View):\n template_name = None\n object_name = None\n model_class = None\n context = None\n values = None\n\n def get_for_single(self, model_class, id_object):\n return model_class.objects.get(id=id_object)\n\n def get(self, request, *args, **kwargs):\n method = kwargs.get('method', 'get')\n self.object_name = kwargs['object_name']\n self.model_class = apps.get_model(app_label='app', model_name=self.object_name)\n self.context = self.model_class.get_context() if hasattr(self.model_class, 'get_context') else {}\n if kwargs['pk'] is not None:\n self.context['object'] = self.get_for_single(self.model_class, kwargs['pk'])\n if method == 'list':\n self.context['object_list'] = self.get_for_list()\n self.get_template_name(method)\n return render(request, self.template_name, self.context)\n\n def get_for_list(self):\n return self.model_class.objects.all()\n\n def get_template_name(self, method):\n if method == 'delete':\n post_fix = '_delete.html'\n elif method == 'list':\n post_fix = '_list.html'\n else:\n post_fix = '_form.html'\n\n self.template_name = 'app/' + self.object_name.lower() + post_fix\n\n def post(self, request, *args, **kwargs):\n object_name = kwargs['object_name']\n self.model_class = apps.get_model(app_label='app', model_name=object_name)\n if kwargs['pk'] is None:\n self.set_values(request)\n self.create(request, *args, **kwargs)\n elif request.POST.get('delete'):\n self.delete(*args, **kwargs)\n else:\n self.set_values(request)\n self.edit(request, *args, **kwargs)\n return redirect('detail', method='list', object_name=object_name)\n\n def create(self, request, *args, **kwargs):\n model_object = self.model_class(**self.values)\n model_object.save()\n\n def edit(self, request, *args, **kwargs):\n model_object = self.model_class.objects.filter(id=kwargs['pk'])\n model_object.update(**self.values)\n \n def delete(self, *args, **kwargs):\n model_object = self.model_class.objects.get(id=kwargs['pk'])\n model_object.delete()\n\n def set_values(self, request):\n post_items = dict(request.POST)\n fields = [field.name for field in self.model_class._meta.get_fields()]\n self.values = {field: request.POST[field] for field in fields\n if field != 'id' if field in post_items.keys()}\n","repo_name":"Karolucha/pmb","sub_path":"adminek/views/generic_views.py","file_name":"generic_views.py","file_ext":"py","file_size_in_byte":2671,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"}
+{"seq_id":"6484559014","text":"# -*- coding: utf-8 -*-\nfrom simple_perms import PermissionLogic, register\n\nfrom helpers.mixins import BasicPermissionLogicMixin\n\n\nclass TagPermissionLogic(BasicPermissionLogicMixin, PermissionLogic):\n def view(self, user, tag, *args):\n \"\"\"\n Permissions for viewing/editing Tag\n \"\"\"\n if user.is_anonymous or user.is_client:\n return False\n\n if user.is_administrator:\n return True\n\n if user.is_manager:\n return False\n\n if user.is_advisor:\n return tag.owning_group == user.group\n\n return self.admin_permission(user, tag, *args)\n\n def create(self, user, tag, *args):\n \"\"\"\n Permissions for creating Tag\n \"\"\"\n if user.is_anonymous or user.is_client:\n return False\n\n if user.is_administrator:\n return True\n\n if user.is_manager or user.is_advisor:\n if tag.owning_group.pk == user.group.pk:\n return True\n\n return self.admin_permission(user, tag, *args)\n\n change = view\n delete = view\n\n\nregister(\"tag\", TagPermissionLogic)\nregister(\"fac/tag\", TagPermissionLogic)\n","repo_name":"alexandrenorman/mixeur","sub_path":"fac/perms/tag_perm.py","file_name":"tag_perm.py","file_ext":"py","file_size_in_byte":1162,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"}
+{"seq_id":"36704320289","text":"from typing import List\n\n\nclass Solution:\n def isPalindrome(self, s: str) -> bool:\n left, right = 0, len(s) - 1\n while left < right:\n while left < right and not s[left].isalnum():\n left += 1\n while left < right and not s[right].isalnum():\n right -= 1\n if s[left].upper() != s[right].upper():\n return False\n left += 1\n right -= 1\n return True\n\n\nif __name__ == \"__main__\":\n S = Solution()\n print(S.isPalindrome(\"A man, a plan, a canal: Panama\"))\n","repo_name":"JasmineRain/Algorithm","sub_path":"Python/Double Pointers/125_Easy_验证回文串.py","file_name":"125_Easy_验证回文串.py","file_ext":"py","file_size_in_byte":574,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"}
+{"seq_id":"15224525144","text":"\nfrom django.core.management.base import BaseCommand, CommandError\nfrom django.conf import settings\n\nfrom uuid import uuid4\nimport tinys3\nimport os\n\nAWS_ACCESS_KEY_ID = 'AKIAJOAPGZD2SYWXWBWQ'\nAWS_SECRET_ACCESS_KEY = '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'\n\n\ndef key_name_for_path(p):\n s = str(uuid4())\n basename = os.path.basename(p)\n if '.' in basename:\n s += '.' + basename.split('.')[-1]\n return s\n\n\nclass Command(BaseCommand):\n def add_arguments(self, parser):\n parser.add_argument('image_path', type=str)\n\n def handle(self, *args, **options):\n image_path = options['image_path']\n key_name = key_name_for_path(image_path)\n print('Uploading \"%s\" to \"%s\" on S3...' % (image_path, key_name))\n\n conn = tinys3.Connection(AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY, default_bucket='siphon-assets')\n with open(image_path, 'rb') as fp:\n result = conn.upload(key_name, fp, public=True)\n\n print('Done.')\n print('\\n--> %s' % result.url)\n","repo_name":"siphoncode/siphon-web","sub_path":"siphon/web/apps/core/management/commands/image_to_s3.py","file_name":"image_to_s3.py","file_ext":"py","file_size_in_byte":1023,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"}
+{"seq_id":"40749440139","text":"from typing import Optional\n# Definition for singly-linked list.\nclass ListNode:\n def __init__(self, val=0, next=None):\n self.val = val\n self.next = next\n\nclass Solution:\n def reorderList(self, head: Optional[ListNode]) -> None:\n \"\"\"\n Do not return anything, modify head in-place instead.\n \"\"\"\n mid = self.findMiddle(head)\n left = head\n right = mid.next\n mid.next = None\n right = self.reverseList(right)\n return self.mergeTwoLists(left, right)\n \n def findMiddle(self, head):\n slow = fast = head\n while fast.next and fast.next.next:\n slow = slow.next\n fast = fast.next.next\n return slow\n \n def reverseList(self, head):\n cur = None\n while head:\n new_head = head.next\n head.next = cur\n cur = head\n head = new_head\n return cur\n \n def mergeTwoLists(self, left, right):\n head = ListNode()\n cur = head\n while left and right:\n new_left, new_right = left.next, right.next\n cur.next = left\n cur.next.next = right\n cur = right\n left, right = new_left, new_right\n if left:\n cur.next = left\n if right:\n cur.next = right\n return head.next","repo_name":"zt5rice/LC-archive","sub_path":"0143. Reorder List/143.Reorder-List.py","file_name":"143.Reorder-List.py","file_ext":"py","file_size_in_byte":1356,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"}
+{"seq_id":"74581835127","text":"'''\nLeia um caractere maiúsculo, que indica uma operação que deve ser realizada e uma matriz M[12][12].\nEm seguida, calcule e mostre a soma ou a média considerando somente aqueles elementos que estão na \nárea superior da matriz, conforme ilustrado abaixo (área verde).\n'''\n\ndef criar_matriz(lin,col):\n mat = []\n for i in range(lin):\n mat.append([0] * col)\n return mat\n\ndef ler_matriz(mat,lin,col):\n for i in range(lin):\n for j in range(col):\n mat[i][j] = float(input())\n\nsoma = 0\nO = input(\"\")\nM = criar_matriz(12,12)\nler_matriz(M, 12, 12)\n\nfor i in range (5):\n for j in range (11,0,-1):\n if j > i and j-i >= 1 and i + j < 11:\n soma = soma + M[i][j]\n\nif O == \"s\" or O == \"S\":\n print(round(soma, 1))\n\nelse:\n print(round(soma/30,1))","repo_name":"weep-dev/Python","sub_path":"Exercise2/AreaSuperior.py","file_name":"AreaSuperior.py","file_ext":"py","file_size_in_byte":765,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"}
+{"seq_id":"32273118328","text":"# -*- coding: utf-8 -*-\n\n\"\"\" OpenStreetMap related tools\n\nAmong available tools:\n- Use Overpass API to query OSM database and convert JSON\nresponse to geopandas dataframe (thanks to https://github.com/yannforget/OSMxtract for inspiration !)\n\"\"\"\nimport geojson\nimport geopandas as gpd\nfrom osmnx import geocode_to_gdf\nfrom osmnx.downloader import overpass_request\n\nfrom gistools.exceptions import QlQueryError\nfrom gistools.geometry import merge\nfrom osmnx.settings import default_crs\nfrom shapely.geometry import LineString, Point, MultiLineString, Polygon, MultiPolygon, MultiPoint\n\nfrom gistools.utils.check.value import check_string\n\nGEOMETRY_CLASS = {'linestring': (LineString, MultiLineString), 'polygon': (Polygon, MultiPolygon),\n 'point': (Point, MultiPoint)}\n\n\ndef _to_point_features(json):\n \"\"\" Read json response and extract point geometries\n\n :param json: JSON response from overpass API\n :return: GeoJSON FeatureCollection\n \"\"\"\n features = []\n elements = [e for e in json['elements'] if e['type'] == 'node']\n for elem in elements:\n coords = [elem['lon'], elem['lat']]\n features.append(geojson.Feature(id=elem['id'], geometry=Point(coords), properties=_feature_tags(elem)))\n\n return geojson.FeatureCollection(features)\n\n\ndef _to_features(json, geometry_type):\n \"\"\" Read json response and extract (multi)linestring/polygon geometries\n\n :param json: json response\n :param geometry_type: {'linestring', 'polygon'}\n :return:\n \"\"\"\n features = []\n\n if geometry_type == 'linestring':\n elements = [e for e in json['elements'] if e['type'] == 'way' or e['type'] == 'relation']\n else:\n elements = [e for e in json['elements'] if e['type'] == 'way' or (e['type'] == 'relation' and e['tags']['type']\n in ('multipolygon', 'boundary'))]\n\n for elem in elements:\n\n if elem['type'] == 'way':\n coords = [[node['lon'], node['lat']] for node in elem['geometry']]\n try:\n geom = GEOMETRY_CLASS[geometry_type][0](coords) # LineString, Polygon\n except ValueError:\n pass\n else:\n features.append(geojson.Feature(id=elem['id'], geometry=geom, properties=_feature_tags(elem)))\n\n elif elem['type'] == 'relation':\n collection = []\n for member in elem['members']:\n if member['type'] == 'way':\n member_coords = [(node['lon'], node['lat']) for node in member['geometry']]\n collection.append(LineString(member_coords))\n geom_collection = merge(collection)\n\n if geom_collection:\n try:\n geom = GEOMETRY_CLASS[geometry_type][1]([GEOMETRY_CLASS[geometry_type][0](line) for line in\n geom_collection]) # MultiLineString, MultiPolygon\n except ValueError:\n pass\n else:\n features.append(geojson.Feature(id=elem['id'], geometry=geom, properties=_feature_tags(elem)))\n\n return geojson.FeatureCollection(features)\n\n\ndef _feature_tags(json_element):\n \"\"\" Update feature tags to set OSM ID and type\n\n :param json_element: 'elements' feature in JSON dict response\n :return:\n \"\"\"\n if 'id' not in json_element['tags'].keys():\n tags = dict(osm_id=json_element['id'], **json_element['tags'])\n else:\n tags = json_element['tags']\n\n # Add osm type to attributes\n tags.update(osm_type=json_element['type'])\n\n return tags\n\n\ndef download_osm_features(place, osm_type, tag, values=None, by_poly=True, timeout=180):\n \"\"\" Download OSM features within given place\n\n :param place: single place name query (e.g: \"London\", \"Bonn\", etc.)\n :param osm_type: OSM geometry type str ('node', 'way', 'relation')\n :param tag: OSM tag to query\n :param values: str/list of possible values for the provided OSM tag\n :param by_poly: if True, retrieve features within polygon's list of coordinates, otherwise use bounds\n :param timeout:\n :return:\n \"\"\"\n gdf_geometry = geocode_to_gdf(place)\n\n try:\n geometry = gdf_geometry.geometry[0]\n except AttributeError: # Empty GeoDataFrame\n return None\n\n responses = []\n\n if by_poly:\n polygon_coord_strs = get_polygons_coordinates(geometry)\n for poly_coord_str in polygon_coord_strs:\n query = ql_query(osm_type, tag, values, polygon_coord=poly_coord_str, timeout=timeout)\n responses.append(overpass_request(data={'data': query}))\n else:\n query = ql_query(osm_type, tag, values, bounds=geometry.bounds, timeout=timeout)\n responses.append(overpass_request(data={'data': query}))\n\n return responses\n\n\ndef get_polygons_coordinates(geometry):\n \"\"\"\n Extract exterior coordinates from polygon(s) to pass to OSM in a query by\n polygon. Ignore the interior (\"holes\") coordinates.\n\n Parameters\n ----------\n geometry : shapely Polygon or MultiPolygon\n the geometry to extract exterior coordinates from\n\n Returns\n -------\n polygon_coord_strs : list\n\n Note\n ----\n Function from osmnx package version 0.10 (https://github.com/gboeing/osmnx)\n \"\"\"\n\n # extract the exterior coordinates of the geometry to pass to the API later\n polygons_coords = []\n if isinstance(geometry, Polygon):\n x, y = geometry.exterior.xy\n polygons_coords.append(list(zip(x, y)))\n elif isinstance(geometry, MultiPolygon):\n for polygon in geometry:\n x, y = polygon.exterior.xy\n polygons_coords.append(list(zip(x, y)))\n else:\n raise TypeError('Geometry must be a shapely Polygon or MultiPolygon')\n\n # convert the exterior coordinates of the polygon(s) to the string format\n # the API expects\n polygon_coord_strs = []\n for coords in polygons_coords:\n s = ''\n separator = ' '\n for coord in list(coords):\n # round floating point lats and longs to 6 decimal places (ie, ~100 mm),\n # so we can hash and cache strings consistently\n s = '{}{}{:.6f}{}{:.6f}'.format(s, separator, coord[1], separator, coord[0])\n polygon_coord_strs.append(s.strip(separator))\n\n return polygon_coord_strs\n\n\ndef json_to_geodataframe(response, geometry_type):\n \"\"\" Convert JSON responses to\n\n :param response: json response\n :param geometry_type: type of geometry to extract ('point', 'linestring', 'polygon', 'multipolygon')\n :return:\n \"\"\"\n geometry_type = check_string(geometry_type, ('point', 'linestring', 'polygon'))\n\n if geometry_type == 'point':\n return gpd.GeoDataFrame.from_features(_to_point_features(response), crs=default_crs)\n else:\n return gpd.GeoDataFrame.from_features(_to_features(response, geometry_type), crs=default_crs)\n\n\ndef ql_query(osm_type, tag, values=None, bounds=None, polygon_coord=None, timeout=180):\n \"\"\" QL query (thanks to https://github.com/yannforget/OSMxtract for inspiration !)\n\n :param osm_type: OSM geometry type str {'node', 'way', 'relation', 'nwr'}\n :param tag: OSM tag to query\n :param values: str/list of possible values for the provided OSM tag\n :param bounds: geometry bounds\n :param polygon_coord: location's polygon list of coordinates\n :param timeout:\n :return:\n \"\"\"\n osm_type = check_string(osm_type, ('node', 'way', 'relation', 'nwr'))\n\n if isinstance(values, str):\n values = [values]\n\n if bounds and not polygon_coord:\n west, south, east, north = bounds\n boundary = f'({south:.6f},{west:.6f},{north:.6f},{east:.6f})'\n elif polygon_coord and not bounds:\n boundary = f'(poly:\"{polygon_coord}\")'\n else:\n raise QlQueryError(\"Must define either geometry bounds or polygon coordinates\")\n\n if values:\n if len(values) > 1:\n tags = f'[\"{ tag }\"~\"{ \"|\".join(values) }\"]'\n else:\n tags = f'[\"{ tag }\"=\"{ values[0] }\"]'\n else:\n tags = f'[\"{tag}\"]'\n\n return f'[out:json][timeout:{timeout}];{osm_type}{tags}{boundary};out geom;'\n","repo_name":"benjaminpillot/gis-tools","sub_path":"gistools/osm.py","file_name":"osm.py","file_ext":"py","file_size_in_byte":8207,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"77"}
+{"seq_id":"25975327922","text":"from asyncio import TimeoutError\nfrom TicTacToe import TicTacToe\n\nROUND_TIME_LIMIT = 40.0\nTOO_SLOW_MESSAGE = \"too slow darling -_-\"\nGAME_OVER_MESSAGE = \"GAME OVER\"\n\n\nasync def start_game(client, channel, user):\n game = TicTacToe()\n # user will play the game through a message and its discord-reacts.\n empty_board = str(game)\n game_message = await channel.send(empty_board)\n for square in game.board:\n await game_message.add_reaction(square)\n # start game loop\n await get_user_reaction(client, channel, user, game, game_message)\n\n\nasync def get_user_reaction(client, channel, user, game, game_message):\n # user must react within time limit.\n try:\n user_reaction, user = await client.wait_for(\n 'reaction_add',\n timeout=ROUND_TIME_LIMIT,\n check=lambda r, u: game.is_position_empty(str(r.emoji)) and u == user)\n # user took too long\n except TimeoutError:\n await channel.send(TOO_SLOW_MESSAGE)\n # user chose a react in time\n else:\n await update_game_message(client, channel, user, user_reaction, game, game_message)\n\n\nasync def update_game_message(client, channel, user, user_reaction, game, game_message):\n \"\"\"Update the original game-message with the new board.\"\"\"\n user_move = str(user_reaction.emoji)\n game.update_board(user_move=user_move)\n await game_message.edit(content=str(game))\n if not game.is_over():\n await get_user_reaction(client, channel, user, game, game_message)\n else:\n await channel.send(GAME_OVER_MESSAGE)\n","repo_name":"benji1123/002-Discord-Bot","sub_path":"TicTacToeHandler.py","file_name":"TicTacToeHandler.py","file_ext":"py","file_size_in_byte":1555,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"}
+{"seq_id":"37036067303","text":"\"\"\"Python Script Template.\"\"\"\nimport seaborn as sns\nfrom exps.utilities import get_exact_reps, get_exact_q_reps, get_sample_qreps\n\npalette = sns.color_palette(n_colors=10)\n\n\n\"\"\"Python Script Template.\"\"\"\nimport seaborn as sns\nfrom exps.utilities import get_exact_reps, get_exact_q_reps\n\npalette = sns.color_palette(n_colors=10)\n\n\ndef get_eta_agents(env, eta, alpha, *args, **kwargs):\n \"\"\"Return agents that need the model.\"\"\"\n agents = {\n \"ExactQREPS-0.001\": get_exact_q_reps(\n env, eta=0.001 * eta, alpha=1.0, *args, **kwargs\n ),\n \"ExactQREPS-0.01\": get_exact_q_reps(\n env, eta=0.01 * eta, alpha=1.0, *args, **kwargs\n ),\n \"ExactQREPS-0.1\": get_exact_q_reps(\n env, eta=0.1 * eta, alpha=1.0, *args, **kwargs\n ),\n \"ExactQREPS-1\": get_exact_q_reps(env, eta=eta, alpha=1.0, *args, **kwargs),\n \"ExactQREPS-10\": get_exact_q_reps(\n env, eta=10 * eta, alpha=1.0, *args, **kwargs\n ),\n }\n return agents\n\n\ndef get_linestyle(name: str):\n \"\"\"Get agent linestyle.\"\"\"\n if \"QREPS\" in name:\n return \"solid\"\n else:\n return \"dashed\"\n\n\ndef get_color(name: str):\n \"\"\"Get plot color.\"\"\"\n if \"-100\" in name:\n return palette[1]\n elif \"-10\" in name:\n return palette[2]\n elif \"-1\" in name:\n return palette[0]\n elif \"-0.1\" in name:\n return palette[3]\n elif \"-0.01\" in name:\n return palette[4]\n elif \"-0.001\" in name:\n return palette[8]\n","repo_name":"sebascuri/qreps","sub_path":"exps/effect_of_eta_on_q/utilities.py","file_name":"utilities.py","file_ext":"py","file_size_in_byte":1514,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"77"}
+{"seq_id":"3441337869","text":"import os.path\n\nfrom aws_cdk.aws_s3_assets import Asset\nfrom constructs import Construct\n\nfrom aws_cdk import (\n # Duration,\n CfnOutput,\n Stack,\n aws_kms as kms,\n)\n\ndirname = os.path.dirname(__file__)\n\nclass BaseKmsStack(Stack):\n\n @property\n def kms(self):\n return self._kms\n\n # @property\n # def alias(self):\n # return self._alias\n\n def __init__(self, scope: Construct, construct_id: str, prefix_name: str, **kwargs) -> None:\n super().__init__(scope, construct_id, **kwargs)\n\n self._kms = kms.Key(\n self, f\"{prefix_name.capitalize()}KmsKey\",\n description=\"base kms key\",\n alias=\"base_kms_key\"\n )\n\n # self._alias = kms.Alias(\n # self, \"Alias\",\n # alias_name=\"base_kms_key\",\n # target_key=self._kms\n # )\n\n CfnOutput(self, f\"{prefix_name.capitalize()}KmsName\", value=self._kms.key_arn)\n # CfnOutput(self, \"BaseKmsArn\", value=self._alias.key_arn)\n","repo_name":"smarkin-repository/cdk_experiments","sub_path":"base_account_setup/base_account_setup/base_kms_stack.py","file_name":"base_kms_stack.py","file_ext":"py","file_size_in_byte":999,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"}
+{"seq_id":"34446651714","text":"\"\"\" This module contains the error-related constants and classes. \"\"\"\n\nfrom collections import defaultdict, namedtuple, MutableMapping\nfrom copy import copy\nfrom .utils import compare_paths_lt, quote_string\n\n\n\"\"\"\nError definition constants\n\nEach distinguishable error is defined as a two-value-tuple that holds\na *unique* error id as integer and the rule as string that can cause it.\nThe attributes are accessible as properties ``id`` and ``rule``.\nThe names do not contain a common prefix as they are supposed to be referenced\nwithin the module namespace, e.g. errors.CUSTOM\n\"\"\"\n\nErrorDefinition = namedtuple('cerberus_error', 'code, rule')\n\n# custom\nCUSTOM = ErrorDefinition(0x00, None)\n\n# existence\nDOCUMENT_MISSING = ErrorDefinition(0x01, None) # issues/141\nDOCUMENT_MISSING = \"document is missing\"\nREQUIRED_FIELD = ErrorDefinition(0x02, 'required')\nUNKNOWN_FIELD = ErrorDefinition(0x03, None)\nDEPENDENCIES_FIELD = ErrorDefinition(0x04, 'dependencies')\nDEPENDENCIES_FIELD_VALUE = ErrorDefinition(0x05, 'dependencies')\nEXCLUDES_FIELD = ErrorDefinition(0x06, 'excludes')\n\n# shape\nDOCUMENT_FORMAT = ErrorDefinition(0x21, None) # issues/141\nDOCUMENT_FORMAT = \"'{0}' is not a document, must be a dict\"\nEMPTY_NOT_ALLOWED = ErrorDefinition(0x22, 'empty')\nNOT_NULLABLE = ErrorDefinition(0x23, 'nullable')\nBAD_TYPE = ErrorDefinition(0x24, 'type')\nBAD_TYPE_FOR_SCHEMA = ErrorDefinition(0x25, 'schema')\nITEMS_LENGTH = ErrorDefinition(0x26, 'items')\nMIN_LENGTH = ErrorDefinition(0x27, 'minlength')\nMAX_LENGTH = ErrorDefinition(0x28, 'maxlength')\n\n\n# color\nREGEX_MISMATCH = ErrorDefinition(0x41, 'regex')\nMIN_VALUE = ErrorDefinition(0x42, 'min')\nMAX_VALUE = ErrorDefinition(0x43, 'max')\nUNALLOWED_VALUE = ErrorDefinition(0x44, 'allowed')\nUNALLOWED_VALUES = ErrorDefinition(0x45, 'allowed')\nFORBIDDEN_VALUE = ErrorDefinition(0x46, 'forbidden')\nFORBIDDEN_VALUES = ErrorDefinition(0x47, 'forbidden')\n\n# other\nNORMALIZATION = ErrorDefinition(0x60, None)\nCOERCION_FAILED = ErrorDefinition(0x61, 'coerce')\nRENAMING_FAILED = ErrorDefinition(0x62, 'rename_handler')\nREADONLY_FIELD = ErrorDefinition(0x63, 'readonly')\nSETTING_DEFAULT_FAILED = ErrorDefinition(0x64, 'default_setter')\n\n# groups\nERROR_GROUP = ErrorDefinition(0x80, None)\nMAPPING_SCHEMA = ErrorDefinition(0x81, 'schema')\nSEQUENCE_SCHEMA = ErrorDefinition(0x82, 'schema')\nKEYSCHEMA = ErrorDefinition(0x83, 'keyschema')\nVALUESCHEMA = ErrorDefinition(0x84, 'valueschema')\nBAD_ITEMS = ErrorDefinition(0x8f, 'items')\n\nLOGICAL = ErrorDefinition(0x90, None)\nNONEOF = ErrorDefinition(0x91, 'noneof')\nONEOF = ErrorDefinition(0x92, 'oneof')\nANYOF = ErrorDefinition(0x93, 'anyof')\nALLOF = ErrorDefinition(0x94, 'allof')\n\n\n\"\"\" SchemaError messages \"\"\"\n\nSCHEMA_ERROR_DEFINITION_TYPE = \\\n \"schema definition for field '{0}' must be a dict\"\nSCHEMA_ERROR_MISSING = \"validation schema missing\"\n\n\n\"\"\" Error representations \"\"\"\n\n\nclass ValidationError:\n \"\"\" A simple class to store and query basic error information. \"\"\"\n def __init__(self, document_path, schema_path, code, rule, constraint,\n value, info):\n self.document_path = document_path\n \"\"\" The path to the field within the document that caused the error.\n Type: :class:`tuple` \"\"\"\n self.schema_path = schema_path\n \"\"\" The path to the rule within the schema that caused the error.\n Type: :class:`tuple` \"\"\"\n self.code = code\n \"\"\" The error's identifier code. Type: :class:`int` \"\"\"\n self.rule = rule\n \"\"\" The rule that failed. Type: `string` \"\"\"\n self.constraint = constraint\n \"\"\" The constraint that failed. \"\"\"\n self.value = value\n \"\"\" The value that failed. \"\"\"\n self.info = info\n \"\"\" May hold additional information about the error.\n Type: :class:`tuple` \"\"\"\n\n def __eq__(self, other):\n \"\"\" Assumes the errors relate to the same document and schema. \"\"\"\n return hash(self) == hash(other)\n\n def __hash__(self):\n \"\"\" Expects that all other properties are transitively determined. \"\"\"\n return hash(self.document_path) ^ hash(self.schema_path) \\\n ^ hash(self.code)\n\n def __lt__(self, other):\n if self.document_path != other.document_path:\n return compare_paths_lt(self.document_path, other.document_path)\n else:\n return compare_paths_lt(self.schema_path, other.schema_path)\n\n def __repr__(self):\n return \"{class_name} @ {memptr} ( \" \\\n \"document_path={document_path},\" \\\n \"schema_path={schema_path},\" \\\n \"code={code},\" \\\n \"constraint={constraint},\" \\\n \"value={value},\" \\\n \"info={info} )\"\\\n .format(class_name=self.__class__.__name__, memptr=hex(id(self)), # noqa\n document_path=self.document_path,\n schema_path=self.schema_path,\n code=hex(self.code),\n constraint=quote_string(self.constraint),\n value=quote_string(self.value),\n info=self.info)\n\n @property\n def child_errors(self):\n \"\"\"\n A list that contains the individual errors of a bulk validation error.\n \"\"\"\n return self.info[0] if self.is_group_error else None\n\n @property\n def definitions_errors(self):\n \"\"\" Dictionary with errors of an *of-rule mapped to the index of the\n definition it occurred in. Returns :obj:`None` if not applicable.\n \"\"\"\n if not self.is_logic_error:\n return None\n\n result = defaultdict(list)\n for error in self.child_errors:\n i = error.schema_path[len(self.schema_path)]\n result[i].append(error)\n return result\n\n @property\n def is_group_error(self):\n \"\"\" ``True`` for errors of bulk validations. \"\"\"\n return bool(self.code & ERROR_GROUP.code)\n\n @property\n def is_logic_error(self):\n \"\"\" ``True`` for validation errors against different schemas with\n *of-rules. \"\"\"\n return bool(self.code & LOGICAL.code - ERROR_GROUP.code)\n\n @property\n def is_normalization_error(self):\n \"\"\" ``True`` for normalization errors. \"\"\"\n return bool(self.code & NORMALIZATION.code)\n\n\nclass ErrorList(list):\n \"\"\" A list for :class:`~cerberus.errrors.ValidationError` instances that\n can be queried with the ``in`` keyword for a particular error code. \"\"\"\n def __contains__(self, error_definition):\n for code in (x.code for x in self):\n if code == error_definition.code:\n return True\n return False\n\n\nclass ErrorTreeNode(MutableMapping):\n __slots__ = ('descendants', 'errors', 'parent_node', 'path', 'tree_root')\n\n def __init__(self, path, parent_node):\n self.parent_node = parent_node\n self.tree_root = self.parent_node.tree_root\n self.path = path[:len(self.parent_node.path) + 1]\n self.errors = ErrorList()\n self.descendants = {}\n\n def __add__(self, error):\n self.add(error)\n return self\n\n def __delitem__(self, key):\n del self.descendants[key]\n\n def __iter__(self):\n return iter(self.errors)\n\n def __getitem__(self, item):\n return self.descendants.get(item)\n\n def __len__(self):\n return len(self.errors)\n\n def __setitem__(self, key, value):\n self.descendants[key] = value\n\n def __str__(self):\n return str(self.errors) + ',' + str(self.descendants)\n\n @property\n def depth(self):\n return len(self.path)\n\n @property\n def tree_type(self):\n return self.tree_root.tree_type\n\n def add(self, error):\n error_path = self._path_of_(error)\n\n key = error_path[self.depth]\n if key not in self.descendants:\n self[key] = ErrorTreeNode(error_path, self)\n\n if len(error_path) == self.depth + 1:\n self[key].errors.append(error)\n self[key].errors.sort()\n if error.is_group_error:\n for child_error in error.info[0]:\n self.tree_root += child_error\n else:\n self[key] += error\n\n def _path_of_(self, error):\n return getattr(error, self.tree_type + '_path')\n\n\nclass ErrorTree(ErrorTreeNode):\n \"\"\" Base class for :class:`~cerberus.errors.DocumentErrorTree` and\n :class:`~cerberus.errors.SchemaErrorTree`. \"\"\"\n def __init__(self, errors=[]):\n self.parent_node = None\n self.tree_root = self\n self.path = ()\n self.errors = []\n self.descendants = {}\n for error in errors:\n self += error\n\n def add(self, error):\n \"\"\" Add an error to the tree.\n\n :param error: :class:`~cerberus.errors.ValidationError`\n \"\"\"\n if not self._path_of_(error):\n self.errors.append(error)\n self.errors.sort()\n else:\n super(ErrorTree, self).add(error)\n\n def fetch_errors_from(self, path):\n \"\"\" Returns all errors for a particular path.\n\n :param path: :class:`tuple` of :term:`hashable` s.\n :rtype: :class:`~cerberus.errors.ErrorList`\n \"\"\"\n node = self.fetch_node_from(path)\n if node is not None:\n return node.errors\n else:\n return ErrorList()\n\n def fetch_node_from(self, path):\n \"\"\" Returns a node for a path.\n\n :param path: Tuple of :term:`hashable` s.\n :rtype: :class:`~cerberus.errors.ErrorTreeNode` or :obj:`None`\n \"\"\"\n context = self\n for key in path:\n context = context[key]\n if context is None:\n return None\n return context\n\n\nclass DocumentErrorTree(ErrorTree):\n \"\"\" Implements a dict-like class to query errors by indexes following the\n structure of a validated document. \"\"\"\n tree_type = 'document'\n\n\nclass SchemaErrorTree(ErrorTree):\n \"\"\" Implements a dict-like class to query errors by indexes following the\n structure of the used schema. \"\"\"\n tree_type = 'schema'\n\n\nclass BaseErrorHandler:\n \"\"\" Base class for all error handlers.\n Subclasses are identified as error-handlers with an instance-test. \"\"\"\n def __init__(self, *args, **kwargs):\n \"\"\" Optionally initialize a new instance. \"\"\"\n pass\n\n def __call__(self, errors):\n \"\"\" Returns errors in a handler-specific format.\n\n :param errors: An object containing the errors.\n :type errors: :term:`iterable` of\n :class:`~cerberus.errors.ValidationError` instances or a\n :class:`~cerberus.Validator` instance\n \"\"\"\n raise NotImplementedError\n\n def __iter__(self):\n \"\"\" Be a superhero and implement an iterator over errors. \"\"\"\n raise NotImplementedError\n\n def add(self, error):\n \"\"\" Add an error to the errors' container object of a handler.\n\n :param error: The error to add.\n :type error: :class:`~cerberus.errors.ValidationError`\n \"\"\"\n raise NotImplementedError\n\n def emit(self, error):\n \"\"\" Optionally emits an error in the handler's format to a stream.\n Or light a LED, or even shut down a power plant.\n\n :param error: The error to emit.\n :type error: :class:`~cerberus.errors.ValidationError`\n \"\"\"\n pass\n\n def end(self, validator):\n \"\"\" Gets called when a validation ends.\n\n :param validator: The calling validator.\n :type validator: :class:`~cerberus.Validator` \"\"\"\n pass\n\n def extend(self, errors):\n \"\"\" Adds all errors to the handler's container object.\n\n :param errors: The errors to add.\n :type errors: :term:`iterable` of\n :class:`~cerberus.errors.ValidationError` instances\n \"\"\"\n for error in errors:\n self.add(error)\n\n def start(self, validator):\n \"\"\" Gets called when a validation starts.\n\n :param validator: The calling validator.\n :type validator: :class:`~cerberus.Validator`\n \"\"\"\n pass\n\n\nclass ToyErrorHandler(BaseErrorHandler):\n def __call__(self, *args, **kwargs):\n raise RuntimeError('This is not supposed to happen.')\n\n def clear(self):\n pass\n\n\nclass BasicErrorHandler(BaseErrorHandler):\n \"\"\" Models cerberus' legacy. Returns a :class:`dict`. \"\"\"\n messages = {0x00: \"{0}\",\n\n 0x01: \"document is missing\",\n 0x02: \"required field\",\n 0x03: \"unknown field\",\n 0x04: \"field '{0}' is required\",\n 0x05: \"depends on these values: {constraint}\",\n 0x06: \"{0} must not be present with '{field}'\",\n\n 0x21: \"'{0}' is not a document, must be a dict\",\n 0x22: \"empty values not allowed\",\n 0x23: \"null value not allowed\",\n 0x24: \"must be of {constraint} type\",\n 0x25: \"must be of dict type\",\n 0x26: \"length of list should be {constraint}, it is {0}\",\n 0x27: \"min length is {constraint}\",\n 0x28: \"max length is {constraint}\",\n\n 0x41: \"value does not match regex '{constraint}'\",\n 0x42: \"min value is {constraint}\",\n 0x43: \"max value is {constraint}\",\n 0x44: \"unallowed value {value}\",\n 0x45: \"unallowed values {0}\",\n 0x46: \"unallowed value {value}\",\n 0x47: \"unallowed values {0}\",\n\n 0x61: \"field '{field}' cannot be coerced: {0}\",\n 0x62: \"field '{field}' cannot be renamed: {0}\",\n 0x63: \"field is read-only\",\n 0x64: \"default value for '{field}' cannot be set: {0}\",\n\n 0x81: \"mapping doesn't validate subschema: {0}\",\n 0x82: \"one or more sequence-items don't validate: {0}\",\n 0x83: \"one or more keys of a mapping don't validate: \"\n \"{0}\",\n 0x84: \"one or more values in a mapping don't validate: {0}\",\n 0x85: \"one or more sequence-items don't validate: {0}\",\n\n 0x91: \"one or more definitions validate\",\n 0x92: \"none or more than one rule validate\",\n 0x93: \"no definitions validate\",\n 0x94: \"one or more definitions don't validate\"\n }\n\n def __init__(self, tree=None):\n self.tree = {} if tree is None else tree\n\n def __call__(self, errors=None):\n if errors is not None:\n self.clear()\n self.extend(errors)\n return self.tree\n\n def add(self, error):\n if error.code not in self.messages and not error.is_group_error:\n return\n elif error.is_group_error:\n self.insert_group_error(error)\n else:\n field = error.document_path[-1] if error.document_path else None\n self.insert_error(error.document_path,\n self.format_message(field, error))\n\n def clear(self):\n self.tree = {}\n\n def format_message(self, field, error):\n return self.messages[error.code]\\\n .format(*error.info, constraint=error.constraint,\n field=field, value=error.value)\n\n def insert_error(self, path, node):\n \"\"\" Adds an error or sub-tree to :attr:tree.\n\n :param path: Path to the error.\n :type path: Tuple of strings and integers.\n :param node: An error message or a sub-tree.\n :type node: String or dictionary.\n \"\"\"\n field = path[0]\n if len(path) == 1:\n if field in self.tree:\n self.tree[field].append(node)\n else:\n self.tree[field] = [node]\n elif len(path) >= 1:\n if field not in self.tree:\n self.tree[field] = [{}]\n subtree = self.tree[field][-1]\n\n if subtree:\n new = self.__class__(tree=copy(subtree))\n else:\n new = self.__class__()\n new.insert_error(path[1:], node)\n subtree.update(new.tree)\n\n def insert_group_error(self, error):\n if error.is_logic_error:\n self.insert_logic_error(error)\n\n for error in error.child_errors:\n if error.is_group_error:\n self.insert_group_error(error)\n else:\n field = error.document_path[-1] if error.document_path else None\n self.insert_error(error.document_path,\n self.format_message(field, error))\n\n def insert_logic_error(self, error):\n path = error.document_path + (error.rule, )\n self.insert_error(path, self.format_message(None, error))\n for i in error.definitions_errors:\n for child_error in error.definitions_errors[i]:\n field = child_error.document_path[-1]\n path = child_error.document_path[:-1] + \\\n ('definition %s' % i, field)\n self.insert_error(path, self.format_message(field, child_error)) # noqa\n\n def start(self, validator):\n self.clear()\n\n\nclass SchemaErrorHandler(BasicErrorHandler):\n messages = BasicErrorHandler.messages.copy()\n messages[0x03] = \"unknown rule\"\n","repo_name":"dfci/matchminer-api","sub_path":"cerberus1/errors.py","file_name":"errors.py","file_ext":"py","file_size_in_byte":17351,"program_lang":"python","lang":"en","doc_type":"code","stars":15,"dataset":"github-code","pt":"77"}
+{"seq_id":"35340950350","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"\ntest_django-table-select-widget\n------------\n\nTests for `django-table-select-widget` models module.\n\"\"\"\n\nimport django\nfrom django import forms\nfrom django.test import TestCase\n\nfrom model_mommy import mommy\n\nfrom table_select_widget import TableSelectMultiple\n\nfrom .models import Choice\n\n\nclass TestTableSelectWidget(TestCase):\n def setUp(self):\n mommy.make(\n \"Choice\",\n name=\"Choice 1\",\n description=\"Choice 1 description\",\n choice_type__name=\"Foo type\",\n )\n\n maxDiff = None\n\n def test_widget(self):\n class ChoiceForm(forms.Form):\n choice_field = forms.ModelMultipleChoiceField(\n queryset=Choice.objects.all(),\n widget=TableSelectMultiple(\n item_attrs=[\n 'name',\n 'description',\n ],\n ),\n )\n render = ChoiceForm().as_p()\n self.assertHTMLEqual(\n '
'\n ' '\n '
'\n ' '\n '
'\n '
'\n '
Name
'\n '
Description
'\n '
'\n ' '\n ' '\n '
'\n '
'\n ' '\n '
'\n '
Choice 1
Choice 1 description
'\n '
'\n ' '\n '
'\n ' '\n '
'.format(\"required\" if django.VERSION > (1,10,0) else \"\"),\n render,\n )\n\n def test_widget_datatables(self):\n class ChoiceForm(forms.Form):\n choice_field = forms.ModelMultipleChoiceField(\n queryset=Choice.objects.all(),\n widget=TableSelectMultiple(\n item_attrs=['name'],\n enable_datatables=True,\n ),\n )\n render = ChoiceForm().as_p()\n self.assertTrue(\"$('#choice_field').DataTable({\" in render)\n\n def test_widget_datatables_options(self):\n \"\"\" Test setting additional options \"\"\"\n class ChoiceForm(forms.Form):\n choice_field = forms.ModelMultipleChoiceField(\n queryset=Choice.objects.all(),\n widget=TableSelectMultiple(\n item_attrs=['name'],\n enable_datatables=True,\n datatable_options={\n 'language': {'url': 'foo.js'},\n },\n ),\n )\n render = ChoiceForm().as_p()\n self.assertTrue('\"language\": {\"url\": \"foo.js\"}' in render)\n\n def test_widget_bootstrap(self):\n class ChoiceForm(forms.Form):\n choice_field = forms.ModelMultipleChoiceField(\n queryset=Choice.objects.all(),\n widget=TableSelectMultiple(\n item_attrs=['name'],\n bootstrap_style=True,\n ),\n )\n render = ChoiceForm().as_p()\n self.assertTrue(\"form-check-input\" in render)\n self.assertTrue(\"table table-sm table-bordered\" in render)\n\n def test_widget_shift_select(self):\n class ChoiceForm(forms.Form):\n choice_field = forms.ModelMultipleChoiceField(\n queryset=Choice.objects.all(),\n widget=TableSelectMultiple(\n item_attrs=['name'],\n enable_shift_select=True,\n ),\n )\n render = ChoiceForm().as_p()\n self.assertTrue(\"$.fn.shiftClick = function () {\" in render)\n\n def test_widget_related(self):\n \"\"\" Test, that function on related field is called \"\"\"\n class ChoiceForm(forms.Form):\n choice_field = forms.ModelMultipleChoiceField(\n queryset=Choice.objects.all(),\n widget=TableSelectMultiple(\n item_attrs=[('choice_type__get_name', 'Type')],\n ),\n )\n render = ChoiceForm().as_p()\n self.assertTrue(\"
Type
\" in render)\n self.assertTrue(\"
Type: Foo type
\" in render)\n\n def test_widget_none(self):\n \"\"\" If value of variable is null, render it as blank cell \"\"\"\n mommy.make(\n \"Choice\",\n name=\"Choice 2\",\n description=None,\n )\n class ChoiceForm(forms.Form):\n choice_field = forms.ModelMultipleChoiceField(\n queryset=Choice.objects.all(),\n widget=TableSelectMultiple(\n item_attrs=['description'],\n ),\n )\n render = ChoiceForm().as_p()\n self.assertTrue(\"
\" in render)\n","repo_name":"willardmr/DjangoTableSelectMultipleWidget","sub_path":"tests/test_widget.py","file_name":"test_widget.py","file_ext":"py","file_size_in_byte":5528,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"77"}
+{"seq_id":"39554429093","text":"import gc\nimport logging\nimport math\nimport sys\nfrom concurrent import futures\nfrom functools import partial\n\nimport boto3\nimport numpy as np\nimport rasterio\nfrom PIL import Image, ImageDraw\nfrom sat_giffer import settings\nfrom rasterio import transform\nfrom rasterio.session import AWSSession\nfrom rasterio.vrt import WarpedVRT\nfrom rasterio.warp import calculate_default_transform, Resampling\n\nsession = rasterio.Env(\n AWSSession(aws_access_key_id=settings.AWS_KEY, aws_secret_access_key=settings.AWS_SECRET)) if 'test' not in \\\n sys.argv[0] else None\nMAX_WORKERS = 2\n\n\ndef get_cropped_data_from_bucket(band, key, bounds, vrt_params, out_crs):\n \"\"\"\n Recovered the data for a given band for a given scene\n :param band: Number of the band of interest\n :param key: Tile location on AWS\n :param bounds: bounding box of the area of interest\n :param vrt_params: meta dictionary for resulting fle\n :param out_crs: output coordinate system\n :return: the cropped data from the band for the bounds\n \"\"\"\n f = key + 'B0%s.jp2' % band\n with session:\n with rasterio.open(f) as src:\n logging.info('Getting data for file {f}'.format(f=f))\n vrt_transform, vrt_width, vrt_height = get_vrt_transform(src, bounds, bounds_crs=out_crs)\n vrt_width = round(vrt_width)\n vrt_height = round(vrt_height)\n vrt_params.update(\n dict(transform=vrt_transform, width=vrt_width, height=vrt_height)\n )\n with WarpedVRT(src, **vrt_params) as vrt:\n logging.info('Getting data from bucket for {f}'.format(f=f))\n data = vrt.read(\n out_shape=(1, vrt_height, vrt_width),\n resampling=Resampling.bilinear,\n indexes=[1],\n )\n gc.collect()\n return data\n\n\ndef rgb_for_key(key, bounds=None, vrt_params=None, out_crs=None):\n \"\"\"\n Loops over Blue, Green and Red Sentinel bands to build a color image\n :param key:\n :param bounds:\n :param vrt_paramsdjango-extensions==2.1.5:\n :param out_crs:\n :return:\n \"\"\"\n bands = ['2', '3', '4']\n logging.info('Getting data for key {key}'.format(key=key))\n _worker = partial(get_cropped_data_from_bucket, key=key, bounds=bounds, vrt_params=vrt_params, out_crs=out_crs)\n with futures.ProcessPoolExecutor(max_workers=3) as executor:\n try:\n data = np.concatenate(list(executor.map(_worker, bands)))\n except:\n return\n gc.collect()\n reshaped_data = np.zeros((data.shape[1], data.shape[2], data.shape[0]))\n for i in range(3):\n reshaped_data[:, :, abs(i - 2)] = data[i, :, :]\n return reshaped_data\n\n\ndef get_vrt_transform(src, bounds, bounds_crs='epsg:3857'):\n \"\"\"Calculate VRT transform.\n Attributes\n ----------\n src : rasterio.io.DatasetReader\n Rasterio io.DatasetReader object\n bounds : list\n Bounds (left, bottom, right, top)\n bounds_crs : str\n Coordinate reference system string (default \"epsg:3857\")\n Returns\n -------\n vrt_transform: Affine\n Output affine transformation matrix\n vrt_width, vrt_height: int\n Output dimensions\n \"\"\"\n dst_transform, _, _ = calculate_default_transform(src.crs,\n bounds_crs,\n src.width,\n src.height,\n *src.bounds)\n w, s, e, n = bounds\n vrt_width = math.ceil((e - w) / dst_transform.a)\n vrt_height = math.ceil((s - n) / dst_transform.e)\n\n vrt_transform = transform.from_bounds(w, s, e, n, vrt_width, vrt_height)\n\n return vrt_transform, vrt_width, vrt_height\n\n\ndef get_utm_srid(lat, lon):\n \"\"\"\n Calculate which utm zone the AOI should fall into\n :param lat: Latitude in WGS84\n :param lon: Longitude in WGS84\n :return: Integer EPSG code\n \"\"\"\n return int(32700 - round((45 + lat) / 90, 0) * 100 + round((183 + lon) / 6, 0))\n\n\ndef make_gif(keys, data, toa):\n \"\"\"\n Combine the data into a single array\n :param keys: Location of the tiles on AWS\n :param data: The image arrays\n :param toa: toa True/False\n :return: Data with dates embedded on the gif\n \"\"\"\n drawn = []\n for fn, i in zip(keys, data):\n if i is None:\n continue\n if len(np.where(i[:, :, 2] == 0)[0]) > i[:, :, 2].size * 0.8:\n continue\n if len(np.where(i[:, :, 2] > 2000)[0]) < i[:, :, 2].size * 0.2:\n i = np.hstack((np.zeros((i.shape[0], 100, 3)), i))\n im = Image.fromarray(np.clip((i * 255 / 2000), 0, 255).astype(np.uint8))\n draw = ImageDraw.Draw(im)\n if toa:\n draw.text((20, 50), '%s' % '-'.join(fn.split('/')[-5:-2]), fill=(255, 255, 255, 255))\n else:\n draw.text((20, 50), '%s' % '-'.join(fn.split('/')[-6:-3]), fill=(255, 255, 255, 255))\n drawn.append(np.array(im))\n return drawn\n\n\ndef upload_file_to_s3(body):\n \"\"\"\n Uploads a given file to s3\n :param body: filename\n :return: None\n \"\"\"\n s3_client = boto3.Session(settings.AWS_KEY, settings.AWS_SECRET).client('s3', region_name='eu-central-1')\n s3_client.upload_file(Filename='gifs/%s.gif' % body, Bucket='sat-giffer', Key='gifs/%s.gif' % body,\n ExtraArgs={'ACL': 'public-read'})\n\n\ndef get_s3_urls(first_tile, search_results, toa):\n \"\"\"\n Get a filtered list of S3 URIs given a tile id and search results\n :param first_tile: first tile to appear in the search\n :param search_results: full results of the search\n :param toa: whether to attempt to retrieve toa/boa data\n :return: list of s3 URIs\n \"\"\"\n if toa:\n keys = [i['properties']['s3URI'] for i in search_results if\n first_tile in i['properties']['s3URI']]\n else:\n keys = [i['properties']['s3URI'].replace('l1c', 'l2a') + 'R10m/' for i in search_results if\n first_tile in i['properties']['s3URI']]\n return keys\n\n\ndef get_data_for_keys(bounds, keys, out_crs, vrt_params):\n \"\"\"\n Get RGB data from AWS given a list of keys\n :param bounds: bounding box of AOI\n :param keys: List of S3 URIS\n :param out_crs: output crs\n :param vrt_params: params for transformation\n :return: the data array\n \"\"\"\n with futures.ProcessPoolExecutor(max_workers=MAX_WORKERS) as executor:\n _worker = partial(rgb_for_key, bounds=bounds, vrt_params=vrt_params, out_crs=out_crs)\n data = list(executor.map(_worker, keys))\n gc.collect()\n return data\n","repo_name":"JamesOConnor/sat_giffer","sub_path":"src/giffer.py","file_name":"giffer.py","file_ext":"py","file_size_in_byte":6793,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"77"}
+{"seq_id":"36477757734","text":"from baconlang.baconlang_syntax_error import BACONLangSyntaxError\n\n\ndef parse(raw):\n symbols = [\n symbol.strip()\n for symbol in raw.replace(\"[\", \"[,\").replace(\"]\", \",]\").split(\",\")\n ]\n\n for idx, symbol in enumerate(symbols):\n if symbol is \"[\" or symbol is \"]\":\n continue\n\n if len(symbol) and symbol[0] is '\"' and symbol[-1] is '\"':\n symbols[idx] = symbol[1:-1]\n\n else:\n # Only valid strings are legal symbols\n raise BACONLangSyntaxError(symbol)\n\n return symbols\n","repo_name":"baconlang/python","sub_path":"baconlang/parse.py","file_name":"parse.py","file_ext":"py","file_size_in_byte":553,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"77"}
+{"seq_id":"3945605319","text":"from django.conf.urls import url\nfrom . import views\n\napp_name=\"UniversityInfo\"\n\nurlpatterns = [\n\n url(r'^$', views.DepartmentList,name=\"dept_list\"),\n url(r'^about/$', views.About,name=\"about\"),\n url(r'^contact/$', views.Contact,name=\"contact\"),\n url(r'^department/(?P[\\w-]+)/$', views.DepartmentDetail,name=\"dept_detail\"),\n url(r'^department/(?P[\\w-]+)/(?P[\\w-]+)/$', views.StudentList,name=\"student_list\"),\n url(r'^department/(?P[\\w-]+)/(?P[\\w-]+)/(?P[\\w-]+)/$', views.StudentDetail,name=\"student_detail\"),\n]\n","repo_name":"raselcse07/University","sub_path":"src/UniversityInfo/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":604,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"}
+{"seq_id":"28129676287","text":"import numpy as np\n\n\ndef getTV_unit_test(shufIdx, X, y, Xp, yp) :\n\tidx = np.argwhere(shufIdx == 100)\n\tidx = np.asscalar(idx)\n\tassert(np.array_equal(X[idx], Xp[100]))\n\tassert(np.array_equal(y[idx], yp[100]))\n\ndef getTrainValidSet(X, y, r) :\n # X should have size [dataNumximgSizeximgSizeximgChan]\n # y should have size [dataNumxnb_class]\n assert(X.shape[0] == y.shape[0])\n # shufIdx = np.random.permutation(X.shape[0])\n shufIdx = range(X.shape[0])\n np.random.shuffle(shufIdx)\n Xp = X\n yp = y\n X = X[shufIdx]\n y = y[shufIdx]\n # getTV_unit_test(shufIdx, X, y, Xp, yp)\n\n mid= int(np.floor(X.shape[0]*(1.-r)))\n # Return values: X_trian, X_valid, y_train, y_valid\n return (X[0:mid], X[mid:], y[0:mid], y[mid:])\n\ndef parsePara(paraFile=\"./paras\") :\n\tpara = dict()\n\twith open(\"./paras\", \"r\") as pf :\n\t\tfor l in pf :\n\t\t\t# Remove white space\n\t\t\tl = l.replace(\" \", \"\")\n\t\t\t# Remove comments\n\t\t\tcont = l.split(\"#\")\n\n\t\t\tif cont[0] != \"\" :\n\t\t\t\tcontArr = cont[0].split(\"=\")\n\t\t\t\tnum = contArr[1]\n\t\t\t\tnum = float(num) if \".\" in num else int(num)\n\t\t\t\tpara[contArr[0]] = num\n\n\treturn para\n","repo_name":"andrewccchan/ML2016","sub_path":"hw3/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":1111,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"}
+{"seq_id":"44479302615","text":"import sqlite3\n\n\ndef cursor_and_conn_to_db():\n tournament_table = \"pokemon_tournament.db\"\n conn = sqlite3.connect(tournament_table)\n cursor = conn.cursor()\n return cursor, conn\n\n\ndef close_conn(conn):\n conn.commit()\n conn.close()\n\n\ndef create_tables():\n cursor, conn = cursor_and_conn_to_db()\n\n # Create the Players table if it doesn't exist\n cursor.execute(\n \"\"\"\n CREATE TABLE IF NOT EXISTS Players (\n id INTEGER PRIMARY KEY,\n name TEXT NOT NULL,\n created TIMESTAMP DEFAULT CURRENT_TIMESTAMP,\n updated TIMESTAMP DEFAULT CURRENT_TIMESTAMP\n )\n \"\"\"\n )\n\n cursor.execute(\n \"\"\"\n CREATE TABLE IF NOT EXISTS Tournaments (\n id INTEGER PRIMARY KEY,\n created TIMESTAMP DEFAULT CURRENT_TIMESTAMP,\n updated TIMESTAMP DEFAULT CURRENT_TIMESTAMP\n )\n \"\"\"\n )\n\n cursor.execute(\n \"\"\"\n CREATE TABLE IF NOT EXISTS Rounds (\n round_id INTEGER PRIMARY KEY,\n tournament_id INTEGER,\n player1_id INTEGER,\n player2_id INTEGER,\n winner_id INTEGER,\n loser_id INTEGER,\n round_number INTEGER,\n round_date TIMESTAMP DEFAULT CURRENT_TIMESTAMP,\n FOREIGN KEY (tournament_id) REFERENCES Tournaments (id),\n FOREIGN KEY (player1_id) REFERENCES Players (id),\n FOREIGN KEY (player2_id) REFERENCES Players (id),\n FOREIGN KEY (winner_id) REFERENCES Players (id),\n FOREIGN KEY (loser_id) REFERENCES Players (id)\n )\n \"\"\"\n )\n\n close_conn(conn)\n\n\ndef get_player_name(player_id):\n cursor, conn = cursor_and_conn_to_db()\n player = cursor.execute(\n \"SELECT name FROM Players WHERE id = ?\", (player_id,)\n ).fetchone()\n player_name = player[0] if player else None\n\n close_conn(conn)\n return player_name\n\n\ndef add_players(players: list):\n cursor, conn = cursor_and_conn_to_db()\n\n for name in players:\n # Check if the player already exists\n cursor.execute(\"SELECT id FROM Players WHERE name = ?\", (name,))\n existing_player = cursor.fetchone()\n\n if not existing_player:\n cursor.execute(\"INSERT INTO Players (name) VALUES (?)\", (name,))\n\n close_conn(conn)\n\n\ndef init_db():\n create_tables()\n\n players = [\"Nathan\", \"Angelina\", \"Gma V\", \"Gwen\", \"Toby\", \"Louis\"]\n add_players(players)\n\n\ndef get_player_stats(player_id):\n cursor, conn = cursor_and_conn_to_db()\n cursor.execute(\n \"\"\"\n SELECT\n wins,\n losses,\n CASE\n WHEN wins = 0 AND losses = 0 THEN 0\n WHEN losses = 0 THEN 100\n ELSE CAST(wins AS FLOAT) / (wins + losses) * 100\n END AS win_loss_ratio\n FROM (\n SELECT\n IFNULL(COUNT(CASE WHEN winner_id = ? THEN 1 END), 0) AS wins,\n IFNULL(COUNT(CASE WHEN loser_id = ? THEN 1 END), 0) AS losses\n FROM Rounds\n WHERE (player1_id = ? OR player2_id = ?)\n AND (winner_id IS NOT NULL OR loser_id IS NOT NULL)\n AND tournament_id IS NOT NULL\n )\n \"\"\",\n (player_id, player_id, player_id, player_id),\n )\n\n wins, losses, win_loss_ratio = cursor.fetchone()\n close_conn(conn)\n return wins, losses, win_loss_ratio\n\n\ndef get_players():\n cursor, conn = cursor_and_conn_to_db()\n\n cursor.execute(\"SELECT id, name FROM Players\")\n players = cursor.fetchall()\n\n close_conn(conn)\n return players\n\n\ndef print_all_tournaments():\n cursor, conn = cursor_and_conn_to_db()\n\n cursor.execute(\"SELECT * FROM Tournaments\")\n tournaments = cursor.fetchall()\n\n print(\"List of all tournaments:\")\n for tournament_info in tournaments:\n print(tournament_info)\n\n close_conn(conn)\n\n\ndef create_tournament_and_return_id():\n cursor, conn = cursor_and_conn_to_db()\n\n cursor.execute(\"INSERT INTO Tournaments DEFAULT VALUES\")\n tournament_id = cursor.lastrowid\n\n close_conn(conn)\n\n return tournament_id\n\n\ndef get_tournament_rankings(tournament_id):\n cursor, conn = cursor_and_conn_to_db()\n cursor.execute(\n \"\"\"\n SELECT\n p.name,\n IFNULL(COUNT(CASE WHEN r.winner_id = p.id THEN 1 END), 0) AS wins,\n IFNULL(COUNT(CASE WHEN r.loser_id = p.id THEN 1 END), 0) AS losses,\n CASE\n WHEN IFNULL(COUNT(CASE WHEN r.loser_id = p.id THEN 1 END), 0) = 0 THEN 0\n ELSE IFNULL(COUNT(CASE WHEN r.winner_id = p.id THEN 1 END), 0) / IFNULL(COUNT(CASE WHEN r.loser_id = p.id THEN 1 END), 0)\n END AS win_loss_ratio\n FROM Players p\n LEFT JOIN Rounds r ON p.id = r.winner_id OR p.id = r.loser_id\n WHERE r.tournament_id = ?\n GROUP BY p.id, p.name\n ORDER BY wins DESC, losses ASC\n \"\"\",\n (tournament_id,),\n )\n\n rankings = []\n for row in cursor.fetchall():\n player_name, wins, losses, win_loss_ratio = row\n rankings.append((player_name, wins, losses, win_loss_ratio))\n\n close_conn(conn)\n return rankings\n\n\ndef get_rounds_without_winner():\n cursor, conn = cursor_and_conn_to_db()\n\n cursor.execute(\n \"\"\"\n SELECT round_id, round_number, player1_id, player2_id\n FROM Rounds\n WHERE winner_id IS NULL\n \"\"\"\n )\n\n rounds = cursor.fetchall()\n\n close_conn(conn)\n return rounds\n\n\ndef create_round(tournament_id, round_number, player1_id, player2_id):\n cursor, conn = cursor_and_conn_to_db()\n cursor.execute(\n \"\"\"\n INSERT INTO Rounds (tournament_id, round_number, player1_id, player2_id)\n VALUES (?, ?, ?, ?)\n \"\"\",\n (tournament_id, round_number, player1_id, player2_id),\n )\n\n close_conn(conn)\n\n\ndef update_winner(round_id, winner_id, loser_id):\n cursor, conn = cursor_and_conn_to_db()\n\n cursor.execute(\n \"UPDATE Rounds SET winner_id = ?, loser_id = ? WHERE round_id = ?\",\n (winner_id, loser_id, round_id),\n )\n\n close_conn(conn)\n\n\ndef print_all_rounds():\n cursor, conn = cursor_and_conn_to_db()\n\n cursor.execute(\"SELECT * FROM Rounds\")\n rounds = cursor.fetchall()\n\n print(\"List of all rounds:\")\n for round_info in rounds:\n print(round_info)\n\n close_conn(conn)\n\n\ndef debug():\n print(\"Rounds\")\n print_all_rounds()\n print(\"Tournaments\")\n print_all_tournaments()\n","repo_name":"GearsandKeys/Tournament_CLI","sub_path":"data_access.py","file_name":"data_access.py","file_ext":"py","file_size_in_byte":6448,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"}
+{"seq_id":"44250012387","text":"import subprocess\nimport openai\nfrom dotenv import load_dotenv\nimport ast\nimport sys\nimport re\nimport os\n\n# Load the OpenAI API key from the .env file\nload_dotenv()\nopenai.api_key = os.getenv(\"OPENAI_API_KEY\")\n\n# Function to generate code using GPT-3.5-turbo\ndef generate_code(prompt):\n # Create a chat completion with the OpenAI API\n response = openai.ChatCompletion.create(\n model=\"gpt-3.5-turbo\",\n messages=[\n {\"role\": \"system\", \"content\": \"You are a professional full stack developer.\"},\n {\"role\": \"user\", \"content\": prompt},\n ]\n )\n # Return the content of the response\n return response['choices'][0]['message']['content']\n\n# Function to validate generated code\ndef validate_code(code):\n try:\n # Parse the code using ast.parse to validate its syntax\n ast.parse(code)\n return True\n except SyntaxError as e:\n print(f\"Syntax error in generated code: {e}\")\n if not fix_code(code, e):\n return False\n return validate_code(code)\n\n\"\"\"\nGiven a piece of code and an error message, this function extracts the line number from the error message,\nconstructs a prompt for ChatGPT with the code and the error message, sends the prompt to ChatGPT, and extracts\nthe fixed code from the response. It then checks if the fixed code is valid by compiling it, and returns the\nfixed code if it is valid, or False otherwise.\n:param code: A string representing the code to be fixed.\n:param error: An error message indicating the error in the code.\n:return: Either a string representing the fixed code, or False.\n\"\"\"\ndef fix_code(code, error):\n # Extract line number from error message\n line_num = int(re.search(r\"line (\\d+)\", str(error)).group(1))\n\n # Construct prompt for ChatGPT\n prompt = f\"Fix the error in line {line_num}: {error}\\nCode:\\n{code}\"\n\n # Send prompt to ChatGPT\n response = openai.Completion.create(\n engine=\"davinci-codex\",\n prompt=prompt,\n max_tokens=1024,\n n=1,\n stop=None,\n temperature=0.8,\n )\n\n # Extract fixed code from response\n fixed_code = response.choices[0].text.strip()\n\n # Check if the fixed code is valid\n try:\n compile(fixed_code, \"\", \"exec\")\n except SyntaxError:\n return False\n\n # Return fixed code\n return fixed_code\n\n\n\n# Function to execute the generated code\ndef execute_code(file_name):\n try:\n # Execute the code with subprocess.check_output\n output = subprocess.check_output([sys.executable, file_name], stderr=subprocess.STDOUT)\n return output\n except subprocess.CalledProcessError as e:\n error_message = e.output.decode()\n print(f\"An error occurred while executing the code: {error_message}\")\n sys.exit(1)\n\n# Function to create a Python file and write the generated code to it\ndef create_and_run_file(code):\n file_name = input(\"Enter the name of the Python file you'd like to create (e.g., script.py): \")\n with open(file_name, 'w') as f:\n f.write(code)\n print(f\"Created file '{file_name}'\")\n return file_name\n\n# Main function\ndef main():\n # Print the contents of the current directory\n print(\"Current directory contents:\")\n for file_name in os.listdir():\n print(f\" {file_name}\")\n \n # Ask the user for their initial prompt\n initial_prompt = input(\"Please enter your initial request for the script to create and run: \")\n \n # Generate the code\n code = generate_code(initial_prompt)\n\n # Print the generated code\n print(\"Generated Code:\")\n print(\"----------------\")\n print(code)\n print(\"----------------\")\n\n # Validate the generated code\n if not validate_code(code):\n print(\"Code validation failed.\")\n sys.exit(1)\n\n # Ask the user for approval to execute the code\n approval = input(\"Do you approve execution of this code? (yes/no): \")\n\n if approval.lower() == \"yes\":\n # Create a file with the generated code\n file_name = create_and_run_file(code)\n # Execute the generated code\n result = execute_code(file_name)\n # Print the output of the code execution\n print('Output:', result.decode())\n else:\n print(\"Execution cancelled.\")\n\n# Run the main function\nif __name__ == \"__main__\":\n main()\n","repo_name":"TheSnowGuru/InterpreterGPT","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4323,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"77"}
+{"seq_id":"37795083122","text":"#Exercício Python 082: Crie um programa que vai ler vários números e colocar em uma lista. Depois disso, crie duas listas extras que vão conter apenas os valores pares e os valores ímpares digitados, respectivamente. Ao final, mostre o conteúdo das três listas geradas.\n\n\noriginal_list = []\nwhile True:\n try:\n original_list.append(int(input('Enter a valid number: ')))\n except:\n print('Invalid character typed. Please, try again!')\n continue\n option = ' '\n while option not in 'YyNn':\n option = str(input('Would you like to continue? [Y/N]: '))\n if option in 'Nn':\n break\neven_values = []\nodd_values = []\nfor i, v in enumerate(original_list):\n if v % 2 == 0:\n even_values.append(v)\n elif v % 2 == 1:\n odd_values.append(v)\nprint(original_list)\nprint(even_values)\nprint(odd_values)","repo_name":"Matheusfarmaceutico/Exercicios-Python","sub_path":"Exercícios do Guanabara sendo refeitos em 2022/Revisaoguanabara/ex82.py","file_name":"ex82.py","file_ext":"py","file_size_in_byte":856,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"}
+{"seq_id":"42696432547","text":"#####\n# ogo_fe_pistoia_tabulate.py\n#\n# This script collects all the FE Pistoia results from the specified directory.\n#\n#####\n#\n# Andrew Michalski\n# University of Calgary\n# Biomedical Engineering Graduate Program\n# September 6, 2019\n# Modified to Py3: March 25, 2020\n#####\n\nscript_version = 1.0\n\n##\n# Import the required modules\nimport os\nimport sys\nimport glob\nimport argparse\nimport time\nimport pandas as pd\nfrom datetime import date\nfrom collections import OrderedDict\n\n\nfrom ogo.util.echo_arguments import echo_arguments\n\n##\ndef fePistoiaTab(args):\n\n parser = argparse.ArgumentParser(\n description=\"\"\"This script collects all the FE Pistoia results from the specified directory. INPUT: Data_Directory; OUTPUT: TXT file of results\"\"\")\n\n parser.add_argument(\"analysis_directory\",\n help = \"Path/To/Data/Directory\")\n\n parser.add_argument(\"--model\", type = int,\n default = 1,\n help = \"Set the bone value to analyze. 1 = RT_FEMUR_SF;\\n2 = RT_FEMUR_SLS;\\n3 = LT_FEMUR_SF;\\n4 = LT_FEMUR_SLS;\\n6 = L4_SPINE_VC.\\n(Default: %(default)s)\")\n\n ##\n # Collect the input arguments\n args = parser.parse_args()\n directory = args.analysis_directory\n model = args.model\n\n ##\n # Read the files in the Directory\n files = []\n\n if model == 1:\n model_filename ='*_RT_FEMUR_SF_PISTOIA.txt'\n model_filename2 ='_RT_FEMUR_SF_PISTOIA.txt'\n fileName = \"RT_FEMUR_SF_PISTOIA_Results.txt\"\n model_type = 'RT FEMUR SF Failure Load (N)'\n elif model == 2:\n model_filename = '*_RT_FEMUR_SLS_PISTOIA.txt'\n model_filename2 = '_RT_FEMUR_SLS_PISTOIA.txt'\n fileName = \"RT_FEMUR_SLS_PISTOIA_Results.txt\"\n model_type = 'RT FEMUR SLS Failure Load (N)'\n elif model == 3:\n model_filename ='*_LT_FEMUR_SF_PISTOIA.txt'\n model_filename2 ='_LT_FEMUR_SF_PISTOIA.txt'\n fileName = \"LT_FEMUR_SF_PISTOIA_Results.txt\"\n model_type = 'LT FEMUR SF Failure Load (N)'\n elif model == 4:\n model_filename = '*_LT_FEMUR_SLS_PISTOIA.txt'\n model_filename2 = '_LT_FEMUR_SLS_PISTOIA.txt'\n fileName = \"LT_FEMUR_SLS_PISTOIA_Results.txt\"\n model_type = 'LT FEMUR SLS Failure Load (N)'\n elif model == 6:\n model_filename = '*_L4_FE_PISTOIA.txt'\n model_filename2 = '_L4_FE_PISTOIA.txt'\n fileName = \"L4_SPINE_FE_PISTOIA_Results.txt\"\n model_type = 'L4 SPINE Failure Load (N)'\n else:\n print(\"Model value set is not defined. Ending script.\")\n sys.exit()\n\n ##\n # Create dataframe for output\n df = pd.DataFrame(columns = ['ID', model_type])\n\n ##\n # Read and extract the data\n os.chdir(directory)\n files = sorted(glob.glob(model_filename))\n k = 0\n for i in files:\n ID = i.replace(model_filename2, \"\")\n\n # Check to see if the file is empty and add to talbe if needed\n if os.stat(i).st_size == 0:\n x_fl = \"\"\n y_fl = \"\"\n z_fl = \"\"\n if model == 1:\n df.loc[k] = [ID, y_fl]\n if model == 2:\n df.loc[k] = [ID, z_fl]\n if model == 3:\n df.loc[k] = [ID, y_fl]\n if model == 4:\n df.loc[k] = [ID, z_fl]\n if model == 6:\n df.loc[k] = [ID, z_fl]\n k = k + 1\n continue\n\n # open each txt file and extract the failure loads\n lines = [line.rstrip('\\n') for line in open(i)]\n lines = lines[11].replace(\" \", \"\")\n lines = lines.replace(\"Failureload(RF*factor):\", \"\")\n index_1 = lines.find(\"E\")\n index_2 = lines.find(\"E\", index_1 + 1)\n index_3 = lines.find(\"E\", index_2 + 1)\n indices = [index_1, index_2, index_3]\n x_fl = lines[0:index_1+4]\n y_fl = lines[index_1+4:index_2+4]\n z_fl = lines[index_2+4:]\n\n\n if model == 1:\n df.loc[k] = [ID, y_fl]\n if model == 2:\n df.loc[k] = [ID, z_fl]\n if model == 3:\n df.loc[k] = [ID, y_fl]\n if model == 4:\n df.loc[k] = [ID, z_fl]\n if model == 6:\n df.loc[k] = [ID, z_fl]\n k = k + 1\n\n ##\n # Write Output TXT file of dataframe\n df.to_csv(fileName, sep = '\\t', index = False, header = True)\n\n print(\"Script Complete.\")\n\n\ndef main():\n description = '''\n This script collects all the FE Pistoia results from the specified directory. \n \n INPUT: Data_Directory; \n OUTPUT: TXT file of results\n \n '''\n\n\n # Setup argument parsing\n parser = argparse.ArgumentParser(\n formatter_class=argparse.RawTextHelpFormatter,\n prog=\"ogoFePistoiaTabulate\",\n description=description\n )\n\n parser.add_argument(\"analysis_directory\",\n help = \"Path/To/Data/Directory\")\n\n parser.add_argument(\"--model\", type = int,\n default = 1,\n help = \"Set the bone value to analyze. 1 = RT_FEMUR_SF;\\n2 = RT_FEMUR_SLS;\\n3 = LT_FEMUR_SF;\\n4 = LT_FEMUR_SLS;\\n6 = L4_SPINE_VC.\\n(Default: %(default)s)\")\n\n\n # Parse and display\n args = parser.parse_args()\n \n print(echo_arguments('fe_Pistoia_Tabulate', vars(args)))\n\n # Run program\n fePistoiaTab(args)\n\nif __name__ == '__main__':\n main()","repo_name":"Bonelab/Ogo","sub_path":"ogo/cli/ref/FePistoiaTabulate.py","file_name":"FePistoiaTabulate.py","file_ext":"py","file_size_in_byte":5210,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"}
+{"seq_id":"36779879770","text":"import math\nfrom Algorithms.valManip import *\n\nclass Search(object):\n \"\"\"binary search of an array\"\"\"\n\n def bSearchAnimeList(list, animeName=None, anime_id=None):\n\n if(animeName is not None):\n \n target = valManip.makeCompareable(animeName)\n \n\n return Search.linearSearch(list, animeName=target)\n\n elif(anime_id is not None):\n\n start = 0\n middle = math.floor(len(list['entries'])/2)\n end = len(list['entries']) - 1\n counter = 0\n\n target = anime_id\n\n while(True):\n middle = (start + end) // 2\n midpoint = list['entries'][middle]['media']['id']\n\n if(midpoint < target):\n start = middle + 1\n elif(midpoint > target):\n end = middle - 1\n else:\n return middle\n counter += 1\n\n if(counter >= len(list['entries'])): #performs a linear search if binary search is not working\n return Search.linearSearch(list, anime_id=target)\n\n pass\n\n def linearSearch(list, animeName=None, anime_id=None):\n \n listLen = len(list['entries'])\n\n if(animeName is not None):\n\n for x in range(0, listLen):\n listVal = list['entries'][x]['media']['title']['userPreferred']\n\n if(valManip.makeCompareable(listVal) == valManip.makeCompareable(animeName)): #returns index if value is found\n #print(\"OMG THE BINARY SEARCH BROKE!!!\")\n return list['entries'][x]\n\n elif(anime_id is not None):\n \n for x in range(0, listLen):\n listVal = list['entries'][x]['media']['id']\n\n if(listVal == anime_id): #returns index if value is found\n #print(\"OMG THE BINARY SEARCH BROKE!!!\")\n return list['entries'][x]\n \n return None\n","repo_name":"backedman/animeScores","sub_path":"Algorithms/Search.py","file_name":"Search.py","file_ext":"py","file_size_in_byte":1976,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"}
+{"seq_id":"22549548431","text":"# -*- coding: utf-8 -*-\n# @Time : 2021/6/15 9:20\n# @Author : Saiterlz from lanzhou\n# @Email : kinekok@163.com\n# @File : create_nameTBC.py\n# @Software: PyCharm\ndef replaceFomat(text: str, word: str, n: int, reverse=False):\n \"\"\"\n 对文本中的指定单词进行格式化的替换/替回\n :param text: 要替换的文本\n :param word: 目标单词\n :param n: 目标单词的序号\n :param reverse: 是否进行替回\n :return: 替换后的文本\n \"\"\"\n # 构造【中间变量】\n new_text = text[:]\n fmt = \"<{}>\".format(n)\n # 替换\n if reverse is False:\n new_text = new_text.replace(word, fmt) # 格式化替换\n return new_text\n # 替回\n elif reverse is True:\n new_text = new_text.replace(fmt, word) # 去格式化替换\n return new_text\n # 要求非法,引发异常\n else:\n raise TypeError\n\n\ndef replaceMulti(text: str, olds: list, news: list):\n \"\"\"\n 一次替换多组字符串\n :param text: 要替换的文本\n :param olds: 旧字符串列表\n :param news: 新字符串列表\n :return: 替换后的文本\n \"\"\"\n if len(olds) != len(news):\n raise IndexError\n else:\n new_text = text[:]\n # 格式化替换\n i = 0 # 单词计数器\n for word in olds:\n i += 1\n new_text = replaceFomat(new_text, word, i)\n # 去格式化替回\n i = 0 # 归零\n for word in news:\n i += 1\n new_text = replaceFomat(new_text, word, i, True)\n # 返回替换好的文本\n return new_text\n\n\ndef test2(strtext):\n temp = strtext.strip()\n olds = ['\"', \",\", \"[\", \"]\"]\n news = [\"\", \"\", \"\", \"\"]\n result = replaceMulti(temp, olds, news)\n strfull = result.split('=')\n print(strfull)\n strFormat = strfull[1].strip() + ':' + strfull[0].strip() +'\\n'\n return strFormat\n\n\nadd_txt = []\nwith open('nameTBCold.txt', mode='r', encoding='utf-8') as f:\n for i in f.readlines():\n print(i)\n with open('nameTBC2.txt', mode='a+', encoding='utf-8') as h:\n h.write(test2(i))\n","repo_name":"saiterlz/TSM_Export_Excel-master","sub_path":"create_nameTBC.py","file_name":"create_nameTBC.py","file_ext":"py","file_size_in_byte":2120,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"}
+{"seq_id":"30999973543","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\n\nimport cv2\nimport numpy as np\n\n\n# In[2]:\n\n\nimg1 = np.zeros((300, 300), dtype=\"uint8\")\ncv2.rectangle(img1, (100, 100), (250, 250), 255, -1)\ncv2.imshow(\"pic.jpg\", img1)\n\n\n# In[3]:\n\n\nimg2 = np.zeros((300, 300), dtype=\"uint8\")\ncv2.circle(img2, (150, 150), 90, 255, -1)\ncv2.imshow(\"car.webp\", img2)\n \n\n\n# In[4]:\n\n\nrect_and_circle = cv2.bitwise_and(img1,img2)\ncv2.imshow(\"AND operation\",rect_and_circle)\n \n\n\n# In[5]:\n\n\nrect_or_circle = cv2.bitwise_or(img1,img2)\ncv2.imshow(\"OR operation\",rect_or_circle)\n\n\n# In[6]:\n\n\nrect_xor_circle = cv2.bitwise_xor(img1,img2)\ncv2.imshow(\"XOR Operation\",rect_xor_circle)\n\n\n# In[7]:\n\n\nrect_xor_circle2 = cv2.bitwise_xor(img1,img2)\ncv2.imshow(\"XOR Operation\",rect_xor_circle2)\n\n\n# In[8]:\n\n\nrect_xor_circle2 = cv2.bitwise_xor(img1,img2)\ncv2.imshow(\"NOT Operation\",rect_xor_circle)\n\n\n# In[9]:\n\n\ncv2.waitKey(0)\ncv2.destroyAllWindows()\n\n","repo_name":"harishnk443/image-processing","sub_path":"BITWISE OPERATION.py","file_name":"BITWISE OPERATION.py","file_ext":"py","file_size_in_byte":910,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"}
+{"seq_id":"42546163700","text":"\n# imports Adventure() class from respective file\nfrom miniA5Adventurer import Adventurer\n\n# main execution outlines the program route by calling default values.\n# A name is given and health is increased and the new values are displayed.\n\n\ndef start():\n\n myAdventurer = Adventurer()\n\n print(\"The default values are:\", myAdventurer.name, myAdventurer.health)\n\n myAdventurer.name = \"Balin\"\n\n myAdventurer.gainLevel()\n\n print(\"New values are:\", myAdventurer.name, myAdventurer.health)\n\n\nstart()\n","repo_name":"ZilRahman/PythonCourse","sub_path":"A5/miniA5Contents/miniA5Start.py","file_name":"miniA5Start.py","file_ext":"py","file_size_in_byte":507,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"}
+{"seq_id":"71836374649","text":"from matplotlib import transforms\nimport numpy as np\nimport torch \nfrom torchvision import transforms, datasets\nfrom torch.utils.data import random_split\nimport matplotlib.pyplot as plt\nfrom torchvision.datasets import ImageFolder\nimport torch.nn as nn\nimport torch.nn.functional as F \n\n\nclass Shift_Net(nn.Module):\n def __init__(self, pars):\n super(Shift_Net, self).__init__()\n ks=(5,5)\n ps=np.int32(5)\n self.mid_layer=256\n # Two successive convolutional layers.\n # Two pooling layers that come after convolutional layers.\n # Two dropout layers.\n self.conv1 = nn.Conv2d(3, 32, kernel_size=ks[1],padding='same')\n self.pool1= nn.MaxPool2d(kernel_size=10,stride=2)\n self.conv2 = nn.Conv2d(32, 64, kernel_size=ks[1],padding='same')\n self.drop2 = nn.Dropout2d(p=0.2)\n self.pool2=nn.MaxPool2d(kernel_size=5,stride=2)\n self.conv3 = nn.Conv2d(64, 64, kernel_size=ks[1],padding='same')\n self.pool3=nn.MaxPool2d(kernel_size=2,stride=3)\n self.drop_final=nn.Dropout(p=0.2)\n self.total_pars = 0\n\n self.first=True\n if self.first:\n self.forward(torch.zeros((1,)+pars.inp_dim))\n \n self.optimizer = torch.optim.SGD(self.parameters(), lr = 0.001)\n self.criterion=nn.CrossEntropyLoss()\n \n def forward(self, x):\n x = self.conv1(x)\n \n # Apply relu to a pooled conv1 layer.\n x = F.relu(self.pool1(x))\n # Apply relu to a pooled conv2 layer with a drop layer inbetween.\n x = self.drop2(F.relu(self.pool2(self.conv2(x))))\n x = F.relu(self.pool3(self.conv3(x)))\n if self.first:\n self.first=False\n self.inp=x.shape[1]*x.shape[2]*x.shape[3]\n # Compute dimension of output of x and setup a fully connected layer with that input dim \n # pars.mid_layer output dim. Then setup final 3 node output layer.\n print('input dimension to fc1',self.inp)\n if self.mid_layer is not None:\n self.fc1 = nn.Linear(self.inp, self.mid_layer)\n self.fc_final = nn.Linear(self.mid_layer, 3)\n else:\n self.fc1=nn.Identity()\n self.fc_final = nn.Linear(self.inp, 3)\n x = x.reshape(-1, self.inp)\n x = self.fc1(x)\n x = self.fc_final(x)\n return x\n \n # Run the network on the data, compute the loss, compute the predictions and compute classification rate/\n def get_acc_and_loss(self, data, targ):\n output = self.forward(data)\n loss = self.criterion(output, targ)\n pred = torch.max(output,1)[1]\n #print(f'Prediction is {pred}')\n correct = torch.eq(pred,targ).sum()\n \n return loss,correct\n \n # Compute classification and loss and then do a gradient step on the loss.\n def run_grad(self,data,targ):\n self.optimizer.zero_grad()\n loss, correct=self.get_acc_and_loss(data,targ)\n loss.backward()\n self.optimizer.step()\n \n return loss, correct\n\n\n# An object containing the relevant parameters for running the experiment.\nclass par(object):\n def __init__(self):\n self.batch_size=1000\n self.step_size=.001\n self.num_epochs=20\n self.numtrain=55000\n self.minimizer=\"Adam\"\n self.data_set=\"mnist\"\n self.model_name=\"model\"\n self.dropout=0.\n self.dim=32\n self.pool_size=2\n self.kernel_size=5\n self.mid_layer=256\n self.use_gpu=False\n ","repo_name":"jsjung00/rps","sub_path":"model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":3563,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"}
+{"seq_id":"21297996340","text":"\"\"\"\nMR-FSK Modulator\n\"\"\"\n\nimport numpy as np\nfrom enum import Enum\nfrom ..tools.bits import from_bitstring, to_binary_array, check_binary_array\nfrom ..tools import operations\n\nfrom colorama import Fore\n\n\nclass Modulation(Enum):\n FSK2 = 1\n FSK4 = 2\n\n\n# SFD Values as a function of modulation, phyMRFSKSFD and coded/uncoded\n# Key is (modulation, phyMRFSKSFD, is_coded)\n# See tables 131 and 132 of 802.15.4g-2012\nSFD = {\n # Table 131\n (Modulation.FSK2, 0, True): from_bitstring('0110 1111 0100 1110'),\n (Modulation.FSK2, 0, False): from_bitstring('1001 0000 0100 1110'),\n (Modulation.FSK2, 1, True): from_bitstring('0110 0011 0010 1101'),\n (Modulation.FSK2, 1, False): from_bitstring('0111 1010 0000 1110'),\n # Table 132\n (Modulation.FSK4, 0, True): from_bitstring('0111 1101 1111 1111 0111 0101 1111 1101'),\n (Modulation.FSK4, 0, False): from_bitstring('1101 0111 0101 0101 0111 0101 1111 1101'),\n (Modulation.FSK4, 1, True): from_bitstring('0111 1101 0101 1111 0101 1101 1111 0111'),\n (Modulation.FSK4, 1, False): from_bitstring('0111 1111 1101 1101 0101 0101 1111 1101'),\n}\n\n# preamble field as a function of modulation\nPREAMBLE_SEQUENCE = {\n Modulation.FSK2: np.tile(np.array([0, 1]), 4),\n Modulation.FSK4: np.tile(np.array([0, 1, 1, 1]), 4)\n}\n\nPHR_LENGTH = 2\n\n# Tail bits based on the memory state of the RSC encoder\nRSC_TAIL_BITS = {\n 0b000: np.array([0, 0, 0]),\n 0b001: np.array([1, 0, 0]),\n 0b010: np.array([1, 1, 0]),\n 0b011: np.array([0, 1, 0]),\n 0b100: np.array([1, 1, 1]),\n 0b101: np.array([0, 1, 1]),\n 0b110: np.array([0, 0, 1]),\n 0b111: np.array([1, 0, 1]),\n}\n# Tail bits for the NRNSC encoder\nNRNSC_TAIL_BITS = np.array([0, 0, 0])\n\n\nclass Mr_fsk_modulator:\n def __init__(self,\n symbolRate : int,\n FSKModulationIndex : int,\n phyMRFSKSFD : int,\n modulation : str,\n phyFSKFECEnabled : bool,\n phyFSKFECScheme : int,\n macFCSType : int,\n phyFSKScramblePSDU : bool,\n phyFSKFECInterleavingRSC : bool,\n phyFSKPreambleLength : int = 4,\n verbose = False):\n \"\"\"\n Creates an instance of a MR-FSK modulator\n\n Parameters\n ----------\n symbolRate : int\n Number of symbols per second, if a float is supplied it will be converted to int\n FSKModulationIndex : float\n FSK Modulation index\n phyMRFSKSFD : int\n Selection of the SFD group (See table 131 of 802.15.4g)\n phyFSKPreambleLength : int\n Length of the preamble\n modulation : str\n Modulation type : \"2FSK\" or \"4FSK\"\n phyFSKFECEnabled : bool\n Enable FEC encoding (True) or not (False)\n phyFSKFECScheme : int\n Configures the FEC mode. 0 for NRNSC and 1 for RSC\n macFCSType : int\n Lengths of the FCS 0 -> 4, 1 -> 2\n FCS Type describing the length of transmitted FCS.\n phyFSKScramblePSDU : bool\n Enable (True) or disable (False) the whitening of the PSDU\n phyFSKFECInterleavingRSC : bool\n Enable (True) interleaving for RSC or disable (False)\n\n \"\"\"\n # Checks\n if isinstance(symbolRate, float):\n symbolRate = int(symbolRate)\n elif not isinstance(symbolRate, int):\n raise TypeError(\"symbolRate must be an integer\")\n if symbolRate <= 0:\n raise ValueError(\"symbolRate must be a positive integer value\")\n\n if not (isinstance(FSKModulationIndex, float) or isinstance(FSKModulationIndex, int)):\n raise TypeError(\"FSKModulationIndex must be a number\")\n if not (0.25 <= FSKModulationIndex <= 2.5):\n raise ValueError(f\"FSKModulationIndex ({FSKModulationIndex}) must be between 0.25 and 2.5\")\n\n if isinstance(phyMRFSKSFD, int):\n if phyMRFSKSFD not in [0, 1]:\n raise ValueError(\"phyMRFSKSFD should be 0 or 1\")\n else:\n raise TypeError(\"Invalid phyMRFSKSFD type. It should be int\")\n\n if isinstance(phyFSKPreambleLength, int):\n if not (4 <= phyFSKPreambleLength <= 1000):\n raise ValueError(\n \"phyFSKPreambleLength value is invalid. The range is 4-1000 (See Table 71)\")\n else:\n raise TypeError(\"phyFSKPreambleLength must be an integer\")\n\n if isinstance(modulation, str):\n if modulation not in [\"2FSK\", \"4FSK\"]:\n raise ValueError(\n \"Invalid modulation type. It should be \\\"2FSK\\\" or \\\"4FSK\\\"\")\n else:\n raise TypeError(\"Invalid modulation type. It should be str\")\n\n if not isinstance(phyFSKFECEnabled, bool):\n raise TypeError(\"phyFSKFECEnabled should be of type bool\")\n\n if not isinstance(phyFSKFECScheme, int):\n raise TypeError(\"phyFSKFECScheme should be of type int\")\n elif phyFSKFECScheme not in [0, 1]:\n raise ValueError(\"phyFSKFECScheme should be 0 or 1\")\n\n if not isinstance(macFCSType, int):\n raise TypeError(\"FCS_length should be of type int\")\n elif macFCSType not in [0, 1]:\n raise ValueError(\"FCS_length should be 0 or 1\")\n\n if isinstance(phyFSKScramblePSDU, int):\n phyFSKScramblePSDU = bool(phyFSKScramblePSDU)\n elif not isinstance(phyFSKScramblePSDU, bool):\n raise TypeError(\"phyFSKScramblePSDU should be of type bool\")\n\n if not isinstance(phyFSKFECInterleavingRSC, bool):\n raise TypeError(\"phyFSKFECInterleavingRSC should be of type bool\")\n\n self._symbol_rate = symbolRate\n self._FSKModulationIndex = FSKModulationIndex\n self._macFCSType = macFCSType\n self._phyFSKFECEnabled = phyFSKFECEnabled\n self._phyMRFSKSFD = phyMRFSKSFD\n self._phyFSKPreambleLength = phyFSKPreambleLength\n self._modulation = Modulation.FSK2 if modulation == \"2FSK\" else Modulation.FSK4\n self._phyFSKScramblePSDU = phyFSKScramblePSDU\n self._phyFSKFECInterleavingRSC = phyFSKFECInterleavingRSC\n self._phyFSKFECScheme = phyFSKFECScheme\n self._verbose = verbose\n\n def _bin(self, number, width=8, MSB_first=True):\n \"\"\"\n Converts a number to binary representation with LSB first\n\n Parameters\n ----------\n number : int\n\n Returns\n -------\n output : ndarray\n Array of bits\n \"\"\"\n\n return np.array([int(x) for x in np.binary_repr(number, width)[::(1 if MSB_first else -1)]])\n\n def _SHR(self):\n \"\"\"\n Returns SHR bitstream\n\n Returns\n -------\n signal : ndarray\n SHR bitstream\n \"\"\"\n\n signal = np.concatenate([\n np.tile(PREAMBLE_SEQUENCE[self._modulation],\n self._phyFSKPreambleLength),\n SFD[(self._modulation, self._phyMRFSKSFD, self._phyFSKFECEnabled)]\n ]).astype(int)\n return signal\n\n def _PHR_mode_switch(self, modeSwitchParameterEntry, new_mode_fec, PAGE, MOD, MD):\n \"\"\"\n Returns a PHR bitstream for mode_switch\n\n modeSwitchParameterEntry : int\n Mode switch operation (0-3)\n new_mode_fec : bool\n Signal that the packet following is encoded using FEC\n\n \"\"\"\n\n signal = np.zeros([PHR_LENGTH], dtype=int)\n # See figure 115\n signal[0] = 1 # MS\n signal[1:2+1] = self._bin(modeSwitchParameterEntry, 2)\n signal[3] = new_mode_fec # FEC\n signal[4] = PAGE # New Mode\n signal[5] = self._bin(MOD, 2) # New Mode\n signal[7:10+1] = self._bin(MD, 4) # New Mode\n PC = np.logical_xor.reduce(signal[:10+1])\n # BCH(15,11) code\n g = np.poly1d([1, 0, 0, 1, 1])\n\n print(Fore.RED + f\"Warning : BCH checksum isn't implemented (replaced with zeros)\" + Fore.RESET)\n B = np.array([0, 0, 0, 0])\n signal[11:14+1] = self._bin(B, 4)\n signal[15] = PC\n\n return signal\n\n def _PHR(self, message_length):\n \"\"\"\n Returns PHR bitstream\n\n Parameters\n ----------\n message_length : int\n Length of the PSDU (prior to FEC encoding) in octets\n\n\n Returns\n -------\n signal : ndarray\n SHR bitstream\n \"\"\"\n\n signal = np.zeros([PHR_LENGTH * 8], dtype=int)\n # See Figure 114\n signal[0] = 0 # MS\n signal[1:2+1] = 0 # Reserved\n signal[3] = self._macFCSType # FCS type\n signal[4] = 1 if self._phyFSKScramblePSDU else 0 # DW\n signal[5:] = self._bin(message_length, 11) # L\n\n return signal\n\n def _FEC(self, data, tail = True, pad = True):\n \"\"\"\n Apply FEC encoding to the data (PHR + PSDU) and appends tail and pad bits\n\n Parameters\n ----------\n data : ndarray\n Bitstream of the message to encode\n tail : bool\n Enable tail (True by default)\n pad : bool\n Enable pad (True by default)\n \"\"\"\n\n def M_iter_RSC(M, bi):\n # Extract bits\n M0, M1, M2 = (M >> 2) & 0b001, (M >> 1) & 0b001, M & 0b001\n # output values\n bi = int(bi)\n ui0 = bi\n ui1 = (bi ^ M0 ^ M1 ^ M2) ^ M1 ^ M2\n # Update M\n M0, M1, M2 = bi ^ M0 ^ M1 ^ M2, M0, M1\n\n return (M0 << 2) | (M1 << 1) | M2, ui0, ui1\n\n def M_iter_NRNSC(M, bi):\n # Extract bits\n M0, M1, M2 = (M >> 2) & 0b001, (M >> 1) & 0b001, M & 0b001\n bi = int(bi)\n ui0 = not (bi ^ M0 ^ M1 ^ M2)\n ui1 = not (bi ^ M1 ^ M2)\n\n M0, M1, M2 = bi ,M0, M1\n\n return (M0 << 2) | (M1 << 1) | M2, ui0, ui1\n\n\n # PAD_BITS are derived from figures 121 and 122. It looks like they could be set arbitrarily\n if (data.size//8) % 2 == 0:\n # is even\n # L_PAD = 13\n PAD_BITS = np.array([0, 1, 0, 1, 1, 0, 0, 0, 0, 1, 0, 1, 1])\n else:\n # L_PAD = 5\n PAD_BITS = np.array([0, 1, 0, 1, 1])\n\n M = 0b000\n\n encoded_PHR_PSDU = []\n for bi in data:\n # Split M into its bit values\n if self._phyFSKFECScheme:\n M, ui0, ui1 = M_iter_RSC(M, bi)\n else:\n M, ui0, ui1 = M_iter_NRNSC(M, bi)\n\n\n encoded_PHR_PSDU.append(ui1)\n encoded_PHR_PSDU.append(ui0)\n\n # Add tails bits and pad bits\n TAIL_BITS = RSC_TAIL_BITS[M] if self._phyFSKFECScheme else NRNSC_TAIL_BITS\n if tail:\n for bi in TAIL_BITS:\n if self._phyFSKFECScheme:\n M, ui0, ui1 = M_iter_RSC(M, bi)\n else:\n M, ui0, ui1 = M_iter_NRNSC(M, bi)\n\n encoded_PHR_PSDU.append(ui1)\n encoded_PHR_PSDU.append(ui0)\n if pad:\n for bi in PAD_BITS:\n if self._phyFSKFECScheme:\n M, ui0, ui1 = M_iter_RSC(M, bi)\n else:\n M, ui0, ui1 = M_iter_NRNSC(M, bi)\n\n encoded_PHR_PSDU.append(ui1)\n encoded_PHR_PSDU.append(ui0)\n\n return np.array(encoded_PHR_PSDU).astype(np.uint8)\n\n def _interleaver(self, data):\n \"\"\"\n Applies interleaver to the data\n\n Parameters\n ----------\n data : ndarray\n Input message (bitstream)\n\n Returns\n -------\n output : ndarray\n \"\"\"\n output = np.zeros_like(data)\n\n # NOTE : Each permutation is applied on a pair of bits\n\n BLOCK_SIZE = 16\n\n k = np.arange(BLOCK_SIZE, dtype=int)\n t = (15 - 4 * np.mod(k, 4) - np.floor(k / 4)).astype(int)\n\n for i, block in enumerate(data.reshape(-1, BLOCK_SIZE * 2)):\n new_data = np.zeros_like(block)\n for ki, ti in zip(k, t):\n new_data[ti*2:ti*2 + 2] = block[ki*2:ki*2 + 2]\n output[i*BLOCK_SIZE*2:(i+1)*BLOCK_SIZE*2] = new_data\n\n return output\n\n def _FSKModulator(self, message : np.ndarray, samplesPerSymbol : int):\n \"\"\"\n FSK modulation of the given message.\n\n # 2FSK modulation : \n the symbols are placed at +- df.\n 0 -> -fdev\n 1 -> +fdev\n \n # 4FSK modulation :\n the symbols are placed at +- df and +- df/3\n\n 01 -> -fdev\n 00 -> -fdev/3\n 10 -> +fdev/3\n 11 -> +fdev\n\n Parameters\n ----------\n message : np.ndarray\n Message bitstream\n samplesPerSymbol : int\n Number of IQ samples per symbol\n\n Returns\n -------\n output : ndarray\n Complex output signal\n f : float\n Sampling frequency\n \"\"\"\n # Frequency deviation (from the center)\n deltaF = self._symbol_rate * self._FSKModulationIndex / 2\n if self._modulation == Modulation.FSK2:\n fdev = deltaF\n else:\n fdev = 3 * deltaF\n\n mod = {\n # 2FSK\n '0' : -fdev,\n '1' : +fdev,\n # 4FSK\n '01' : -fdev,\n '00' : -fdev/3,\n '10' : +fdev/3,\n '11' : +fdev\n }\n\n step = 1 if self._modulation == Modulation.FSK2 else 2\n # Create a frequency deviation signal\n freqs = []\n for val in message.reshape(-1,step):\n key = ''.join([str(x) for x in val])\n freqs.append(mod[key])\n\n # Generate I and Q from the frequency deviation\n \n # Symbol period\n Ts = 1/self._symbol_rate\n\n f = np.repeat(freqs, samplesPerSymbol)\n dt = (Ts / samplesPerSymbol)\n t = np.arange(0, dt * f.size, dt)\n\n IQ = np.exp(2*np.pi*f*1j*t)\n\n return IQ, 1/dt\n\n\n\n def bitsToIQ(self, bits):\n \"\"\"\n Encodes the given binary message with MR-FSK modulator\n\n Parameters\n ----------\n bits : ndarray or list\n Message to encode (PSDU)\n\n Returns\n -------\n signal : ndarray\n output bitstream\n f : float\n signal frequency\n \"\"\"\n bits = check_binary_array(bits)\n\n self._PHR_PSDU = np.concatenate(\n [self._PHR(bits.size // 8), bits])\n\n # Symbol_length is the number of bits coded for a single symbol. If FEC is disabled, there's one bit per symbol. If FEC is enabled, there's two bits per symbol\n\n if self._phyFSKFECEnabled:\n symbol_length = 2\n self._PHR_PSDU_encoded = self._FEC(self._PHR_PSDU)\n\n if self._phyFSKFECInterleavingRSC:\n self._PHR_PSDU_interleaved = self._interleaver(\n self._PHR_PSDU_encoded)\n self._PHR_PSDU = self._PHR_PSDU_interleaved\n else:\n # Data is unchanged (encoding only)\n self._PHR_PSDU = self._PHR_PSDU_encoded\n else:\n # Do not change anything\n symbol_length = 1\n\n # Apply data whitening (or not)\n if self._phyFSKScramblePSDU:\n PSDU_start = PHR_LENGTH * symbol_length * 8\n\n self._PHR_PSDU_scrambled = self._PHR_PSDU.copy()\n\n self._PHR_PSDU_scrambled[PSDU_start:] = operations.scrambler(\n self._PHR_PSDU_scrambled[PSDU_start:])\n self._PHR_PSDU = self._PHR_PSDU_scrambled\n # Generate output signal\n \n self._binarySignal = np.concatenate([\n self._SHR(),\n self._PHR_PSDU\n ])\n\n # TODO : change this\n samplesPerSymbol = 20\n\n return self._FSKModulator(self._binarySignal, samplesPerSymbol)\n\n def bytesToIQ(self, bytes):\n \"\"\"\n Encodes the given message (list of bytes) with MR-FSK modulator\n\n Parameters\n ----------\n bytes : ndarray or list or bytes\n Message to encode (PSDU) as a list of bytes\n\n Returns\n -------\n signal : ndarray\n output bitstream\n f : float\n signal frequency\n \"\"\"\n message_bin = to_binary_array(bytes)\n return self.bitsToIQ(message_bin)\n\n def modeSwitchToIQ(self, modeSwitchParameterEntry, new_mode_fec):\n \"\"\"\n\n new_mode_fec : bool\n Signal that the packet following\n\n modeSwitchParameterEntry : int\n Mode switch operation (0-3)\n \"\"\"\n\n if isinstance(modeSwitchParameterEntry, int):\n if not (0 <= modeSwitchParameterEntry <= 3):\n raise ValueError(\n \"Invalid modeSwitchParameterEntry value. It should be between 0 and 3\")\n else:\n raise TypeError(\n \"Invalid modeSwitchParameterEntry type. It should be int\")\n\n def _print_verbose(self, message: str):\n \"\"\"\n Prints additionnal information if the verbose flag is True\n \"\"\"\n if(self._verbose):\n print(message)\n","repo_name":"SebastienDeriaz/sun_phy","sub_path":"sun_phy/mr_fsk/mr_fsk_modulator.py","file_name":"mr_fsk_modulator.py","file_ext":"py","file_size_in_byte":17002,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"77"}
+{"seq_id":"1718887827","text":"import random\nimport pennylane as qml\nimport json\nimport sys\nfrom pennylane import numpy as np\nfrom time import localtime, strftime\nfrom plotting import plot_curves\nfrom data import load_mnist, mnist_apn_generator\nfrom data import load_mnist_ae\nfrom data import load_breast_cancer_lju, bc_apn_generator\nfrom data import load_moons_dataset, moons_apn_generator\nfrom evaluation import evaluate\n\n\nwith open('hyperparameters.json') as json_file:\n hp = json.load(json_file)\nprint(hp)\n\nif len(sys.argv) == 2:\n hp[\"output_qubits\"] = int(sys.argv[1])\n\nstarting_time = strftime(\"%Y-%m-%d_%H-%M-%S\", localtime())\n\nnp.random.seed(hp[\"seed\"])\nrandom.seed(hp[\"seed\"])\n\n\ndef circuit(params, data):\n qml.templates.embeddings.AngleEmbedding(\n features=data, wires=range(hp[\"data_qubits\"]), rotation=\"X\"\n )\n\n for layer in range(hp[\"layers\"]):\n for wire in range(hp[\"qubits\"]):\n qml.RX(params[layer][wire][0], wires=wire)\n qml.RY(params[layer][wire][1], wires=wire)\n for wire in range(0, hp[\"qubits\"] - 1, 2):\n qml.CZ(wires=[wire, wire + 1])\n for wire in range(1, hp[\"qubits\"] - 1, 2):\n qml.CZ(wires=[wire, wire + 1])\n return [qml.expval(qml.PauliZ(i)) for i in range(hp[\"output_qubits\"])]\n # return [qml.expval(qml.PauliZ(2*x) @ qml.PauliZ((2*x)+1))\n # for x in range(hp[\"output_qubits\"])]\n\n\ndef triplet_loss(params, qNode, anchor, positive, negative, alpha):\n a_value = qNode(params, anchor)\n p_value = qNode(params, positive)\n n_value = qNode(params, negative)\n\n dist_a_p = np.linalg.norm(a_value - p_value)**2\n dist_a_n = np.linalg.norm(a_value - n_value)**2\n\n return max(dist_a_p - dist_a_n + alpha, 0.0)\n\n\ndef train():\n assert(hp[\"dataset\"] in [\"mnist\", \"mnist_ae\", \"bc\", \"moons\"])\n dev = qml.device('default.qubit', wires=hp[\"qubits\"], shots=hp[\"shots\"])\n \n qNode = qml.QNode(func=circuit, device=dev)\n\n stepsize = hp[\"start_stepsize\"]\n optimizer = qml.AdamOptimizer(stepsize)\n\n def cost_fn(params):\n return triplet_loss(params, qNode, anchor,\n positive, negative, hp[\"alpha\"])\n\n params = np.random.uniform(low=-np.pi, high=np.pi,\n size=(hp[\"layers\"], hp[\"qubits\"], 2)\n )\n\n if hp[\"dataset\"] == \"mnist\":\n train_x, train_y, test_x, test_y = load_mnist(seed=hp[\"seed\"],\n train_size=hp[\"train_size\"],\n test_size=hp[\"test_size\"],\n classes=hp[\"classes\"],\n wires=hp[\"data_qubits\"]\n )\n\n apn_generator = mnist_apn_generator(train_x,\n train_y,\n n_cls=len(hp[\"classes\"])\n )\n elif hp[\"dataset\"] == \"mnist_ae\":\n train_x, train_y, test_x, test_y = load_mnist_ae(train_size=hp[\"train_size\"],\n test_size=hp[\"test_size\"],\n classes=hp[\"classes\"],\n wires=hp[\"data_qubits\"]\n )\n\n apn_generator = mnist_apn_generator(train_x,\n train_y,\n n_cls=len(hp[\"classes\"])\n )\n elif hp[\"dataset\"] == \"bc\":\n train_x, train_y, test_x, test_y = load_breast_cancer_lju(hp[\"train_size\"],\n hp[\"test_size\"]\n )\n apn_generator = bc_apn_generator(train_x,\n train_y\n )\n elif hp[\"dataset\"] == \"moons\":\n train_x, train_y, test_x, test_y = load_moons_dataset(hp[\"train_size\"],\n hp[\"test_size\"]\n )\n apn_generator = moons_apn_generator(train_x,\n train_y\n )\n hp[\"classes\"] = [\"Moon 1\", \"Moon 2\"]\n\n accuracys = []\n dbis = []\n losses = []\n gradients = []\n\n for step in range(hp[\"steps\"] + 1):\n anchor, positive, negative = next(apn_generator)\n\n params, c = optimizer.step_and_cost(cost_fn, params)\n\n print(f\"step {step:{len(str(hp['steps']))}}| cost {c:8.5f}\")\n\n losses.append(c)\n\n if step % hp[\"grads_every\"] == 0:\n g, _ = optimizer.compute_grad(cost_fn, (params,), {}, None)\n gradients.append(np.average(np.abs(g)))\n # print(\"Gradients\", g[0][0, 0, 0])\n # return g[0][0, 0, 0]\n\n if step % hp[\"test_every\"] == 0:\n accuracy, dbi = evaluate(hp[\"dataset\"], train_x, train_y,\n test_x, test_y,\n qNode, params, step,\n hp[\"classes\"], hp[\"output_qubits\"]\n )\n accuracys.append((step, accuracy))\n dbis.append((step, dbi))\n print(\"Accuracys:\\n\", accuracys)\n\n # if (step+1) % hp[\"update_sz_every\"] == 0:\n # stepsize *= hp[\"sz_factor\"]\n # optimizer.stepsize = stepsize\n # print(\"Updated stepsize to\", stepsize)\n\n if accuracys:\n print(\"Accuracys:\\n\", accuracys)\n top_acc = max(np.array(accuracys)[:, 1])\n print(\"Maximum: \", top_acc)\n hp[\"top_acc\"] = float(top_acc)\n\n if dbis:\n print(\"DBIs:\\n\", dbis)\n top_dbi = min(np.array(dbis)[:, 1])\n print(\"Minimum:\", top_dbi)\n hp[\"top_dbi\"] = float(top_dbi)\n\n if gradients:\n print(\"Gradients Avg: \", np.average(gradients))\n\n name = f\"{starting_time}_{hp['output_qubits']}\"\n with open(f\"./trainings/{name}.json\", \"w\") as json_file:\n json.dump(hp, json_file)\n np.savez(f\"./trainings/{name}.npz\",\n accuracys=accuracys,\n dbis=dbis,\n losses=losses,\n gradients=gradients,\n params=params\n )\n\n # plot_curves(np.array(accuracys),\n # np.array(dbis),\n # np.array(losses),\n # f\"Qubits: {hp['qubits']}, \" +\n # f\"Layers: {hp['layers']}, \" +\n # f\"Classes: {hp['classes']}, \" +\n # f\"Output_dim: {hp['output_qubits']}\"\n # )\n \n\nif __name__ == \"__main__\":\n # n_layers = [5, 25, 50, 75, 100]\n # n_qubits = [4, 6, 8, 10, 12]\n # seeds = list(range(100))\n\n # gradients = np.zeros((len(n_layers), len(n_qubits), len(seeds)))\n\n # for il, l in enumerate(n_layers):\n # for iq, q in enumerate(n_qubits):\n # for js, s in enumerate(seeds):\n # hp[\"layers\"] = l\n # hp[\"qubits\"] = q\n # hp[\"seeds\"] = s\n # print(l, q, s)\n # grad = train()\n # gradients[il, iq, js] = grad\n\n # np.savez(f\"./trainings/{starting_time}_grads.npz\", grads=gradients,\n # n_layers=np.array(n_layers),\n # n_qubits=np.array(n_qubits),\n # seeds=np.array(seeds))\n # print(gradients)\n\n train()\n","repo_name":"cirKITers/quantum-triplet-loss","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":7773,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"}
+{"seq_id":"16653818936","text":"#vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4\nfrom matplotlib import pyplot as plt\nimport numpy as np\nfrom matplotlib import rc\nfrom os.path import expanduser\nfrom pandas import DataFrame\nfrom collections import defaultdict\n\nBASE_PATH = expanduser('./fig')\n\ndef plot_detection_timeliness_hist_1hr(X):\n plt.clf()\n t = np.arange(min(X), max(X)+1, 60) # 6 hours period\n plt.hist(X)\n plt.ylabel(r'frequency')\n plt.xlabel(r'time (minute)')\n plt.xticks(t)\n # plt.title(r\"\\TeX\\ is Number \"\n # r\"$\\displaystyle\\sum_{n=1}^\\infty\\frac{-e^{i\\pi}}{2^n}$!\",\n # fontsize=16, color='gray')\n plt.tight_layout()\n fig = plt.gcf()\n fig.savefig('{}/{}'.format(BASE_PATH,\n 'graph_detection_timeliness_hist_1hr.pdf'))\n\ndef plot_detection_timeliness_hist(X):\n X /= 3600\n plt.clf()\n t = np.arange(min(X), max(X)+1, 12) # 6 hours period\n plt.hist(X)\n plt.ylabel(r'frequency')\n plt.xlabel(r'time (hr)')\n plt.xticks(t)\n # plt.title(r\"\\TeX\\ is Number \"\n # r\"$\\displaystyle\\sum_{n=1}^\\infty\\frac{-e^{i\\pi}}{2^n}$!\",\n # fontsize=16, color='gray')\n plt.tight_layout()\n fig = plt.gcf()\n fig.savefig('{}/{}'.format(BASE_PATH,\n 'graph_detection_timeliness_hist.pdf'))\n\ndef plot_detection_timeliness(X):\n X /= 3600\n plt.clf()\n t = np.arange(min(X), max(X)+1, 12) # 6 hours period\n X_ = np.sort(X)\n X_ /= X_.sum()\n CY = np.cumsum(X_)\n plt.plot(np.sort(X),CY,'r')\n plt.ylabel(r'empirical cdf')\n plt.xlabel(r'time (hr)')\n plt.xticks(t)\n # plt.title(r\"\\TeX\\ is Number \"\n # r\"$\\displaystyle\\sum_{n=1}^\\infty\\frac{-e^{i\\pi}}{2^n}$!\",\n # fontsize=16, color='gray')\n plt.tight_layout()\n fig = plt.gcf()\n fig.savefig('{}/{}'.format(BASE_PATH,\n 'graph_detection_timeliness.pdf'))\n\ndef plot_detection_accuracy(X):\n plt.clf()\n df2 = DataFrame(X, columns=['detected', 'not detected'])\n ax = df2.plot(kind='bar', stacked=True, color='k');\n ax.set_ylabel('# compromised user')\n ax.set_xlabel('incident id')\n plt.tight_layout()\n plt.yticks([1,2,3,4])\n for container in ax.containers:\n if container.get_label() == 'detected':\n plt.setp(container, color='0.75')\n else:\n plt.setp(container, color='0')\n # ax.legend()\n plt.legend(loc='upper left')\n fig = plt.gcf()\n fig.savefig('{}/{}'.format(BASE_PATH,\n 'graph_detection_accuracy.pdf'))\n \n\n# def plot_sequencing_accuracy(X, idx, name):\n# \"\"\"\n# length of the array must equal length of the idx\n# \"\"\"\n# df2 = DataFrame(X, index=idx,columns=['correct', 'incorrect'])\n# df2.plot(kind='bar', stacked=True)\n\n# fig = plt.gcf()\n# fig.savefig('{}/{}'.format(BASE_PATH,\n# 'graph_sequencing_accuracy_%s.pdf' % name))\n\n \ndef plot_sequencing_accuracy(conf_arr, idx, name, fontsize=12):\n norm_conf = []\n for i in conf_arr:\n a = 0\n tmp_arr = []\n a = sum(i,0)\n for j in i:\n tmp_arr.append(float(j)/float(a))\n norm_conf.append(tmp_arr)\n plt.clf()\n fig = plt.figure()\n ax = fig.add_subplot(111)\n ax.xaxis.tick_top()\n [ax.grid(False) for ax in fig.axes]\n res = ax.imshow(np.array(norm_conf), origin='upper', cmap=plt.cm.jet_r, interpolation='nearest')\n for i, cas in enumerate(conf_arr):\n for j, c in enumerate(cas):\n plt.text(i, j, c, horizontalalignment='center', verticalalignment='center',fontsize=fontsize, color='white')\n cb = fig.colorbar(res)\n width = len(conf_arr)\n height = len(conf_arr[0])\n plt.xticks(range(width), idx[:width])\n plt.yticks(range(height), idx[:height])\n fig.savefig('{}/{}'.format(BASE_PATH,\n 'graph_sequencing_accuracy_%s.pdf' % name))\n\ndef plot_detection_accuracy_helper(result):\n # detection accuracy\n d = defaultdict()\n for row in result.iterrows():\n r = row[1]\n if (d.has_key(r.incident) == False):\n d[r.incident] = defaultdict()\n \n if (d[r.incident].has_key('hit') == False):\n d[r.incident]['hit'] = 0\n \n if (d[r.incident].has_key('miss') == False):\n d[r.incident]['miss'] = 0\n\n if r.accuracy == 1:\n d[r.incident]['hit'] += 1\n else:\n d[r.incident]['miss'] += 1\n \n detection_accuracy = []\n for incident in d:\n detection_accuracy.append([d[incident]['hit'], d[incident]['miss']])\n \n plot_detection_accuracy(np.array(detection_accuracy, dtype=int))\n \ndef plot_detection_timeliness_helper(result):\n # detection accuracy\n plot_detection_timeliness(np.array(result.detection_timeliness, dtype=float))\n # print zip(result.num_event, result.labeling_time)\n # print result.detection_timeliness, result.preemption_timeliness, result.attack_duration\n \ndef plot_preemption_cdf(df):\n df_filtered = df[df['accuracy'] == 1]\n df = df_filtered\n # raise SystemExit\n \n plt.clf()\n \n X = np.array([x/3600 if x >=0 and x/3600 < 48 else 48 for x in df.preemption_timeliness], dtype=float)\n try:\n plt.hist(X, cumulative=True, label='preemption', color='0')\n except ValueError:\n pass\n \n plt.ylim(14,24)\n plt.xlabel('preemption timeliness (hr)')\n plt.ylabel('cumulative count')\n plt.legend(loc='upper left')\n fig = plt.gcf()\n [ax.grid(False) for ax in fig.axes]\n fig.savefig('{}/{}'.format(BASE_PATH,\n 'graph_preemption_cdf.pdf'))\n \ndef plot_detection_cdf(df):\n df_filtered = df[df['accuracy'] == 1]\n df = df_filtered\n # raise SystemExit\n \n plt.clf()\n \n X = np.array([x/3600 if x >=0 and x/3600 < 48 else 48 for x in df.detection_timeliness], dtype=float)\n try:\n ax = plt.hist(X, cumulative=True, label='detection', color='0')\n except ValueError:\n pass\n \n plt.ylim(14,24)\n plt.xlabel('detection timeliness (hr)')\n plt.ylabel('cumulative count')\n plt.legend(loc='upper left')\n fig = plt.gcf()\n [ax.grid(False) for ax in fig.axes]\n fig.savefig('{}/{}'.format(BASE_PATH,\n 'graph_detection_cdf.pdf'))\n \ndef plot_detection_and_preemption_timeline(df):\n plt.clf()\n timeline = []\n for row in df.iterrows():\n r = row[1]\n if r.detection_timeliness!= -1:\n sum = r.detection_timeliness + r.preemption_timeliness\n if (sum != 0):\n ratio = r.detection_timeliness/sum\n timeline.append(ratio)\n else:\n timeline.append(1)\n\n fig, ax = plt.subplots()\n\n count = 0\n for user in timeline:\n if count == 0:\n plt.bar(left=0, width=user, height=0.8, bottom=count, color='0.75', label='not_malicious')\n plt.bar(left=user, width=1.02-user, height=0.8, bottom=count, color='0', label='malicious')\n else:\n plt.bar(left=0, width=user, height=0.8, bottom=count, color='0.75')\n plt.bar(left=user, width=1.02-user, height=0.8, bottom=count, color='0')\n count = count + 1\n ax.set_yticks(np.arange(0, len(timeline), 5), minor=False)\n ax.set_xticks(np.arange(0, 2), minor=False)\n ax.grid(False)\n \n # for container in ax.containers:\n # if container.get_label() == 'malicious':\n # plt.setp(container, hatch='xxx')\n # else:\n # plt.setp(container, hatch='ooo')\n # ax.legend()\n \n plt.axis([0,1.03,-1,len(timeline)])\n plt.xlabel('time (normalized)')\n plt.ylabel('user id')\n plt.legend(loc='upper left')\n plt.tight_layout()\n fig = plt.gcf()\n fig.savefig('{}/{}'.format(BASE_PATH,\n 'graph_detection_and_preemption_timeline.pdf'))\n \ndef plot_labeling_time(df):\n plt.clf()\n plt.scatter(df.num_event, df.labeling_time)\n pars= np.polyfit(df.num_event, df.labeling_time, 1)\n fitted_y = np.polyval(pars,df.num_event)\n plt.plot(df.num_event, fitted_y, 'r--')\n plt.xlabel('number of events')\n plt.ylabel('labeling time (s)')\n\n # t = np.array(df.num_event)\n # xn = np.array(df.labeling_time)\n # #Linear regressison -polyfit - polyfit can be used other orders polys\n # (ar,br)=polyfit(t,xn,1)\n # xr=polyval([ar,br],t)\n # #compute the mean square error\n\n # t = (xr-xn)**2\n # n = len(t)\n # err=sqrt(t.sum()/n)\n # print len(t), err\n\n\n # print df.accuracy\n \n fig = plt.gcf()\n fig.savefig('{}/{}'.format(BASE_PATH,\n 'graph_labeling_time.pdf'))\n \ndef plot_result(result):\n \"\"\"\n result: list of incident_results\n [\n (user, accuraty (0 - not detected or 1 - detected), detection timeliness in second\n ]\n \"\"\"\n plot_detection_accuracy_helper(result)\n plot_detection_cdf(result)\n plot_preemption_cdf(result)\n plot_detection_and_preemption_timeline(result)\n plot_labeling_time(result)\n\n # # detection timeliness\n # detection_timeliness = []\n # for incident in result:\n # for incident_result in incident:\n # timeliness = incident_result[2]\n # if (timeliness > 0):\n # detection_timeliness.append(timeliness) # timeliness\n\n # plot_detection_timeliness(np.array(detection_timeliness, dtype=float))\n # plot_detection_timeliness_hist(np.array(detection_timeliness, dtype=float))\n # plot_detection_timeliness_hist_1hr(np.array([ x < 3600 for x in detection_timeliness], dtype=float))\n\ndef plot_variation(df):\n plt.clf()\n plt.hist(df.accuracy_list[0], color='k')\n plt.xlabel('detect at event number')\n plt.ylabel('count')\n plt.xticks([3, 4, 5, 6]) # TODO FIX THIS\n fig = plt.gcf()\n fig.savefig('{}/{}'.format(BASE_PATH,\n 'graph_variation.pdf'))\n","repo_name":"ncsa/AttackTagger","sub_path":"src/plot.py","file_name":"plot.py","file_ext":"py","file_size_in_byte":9865,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"}
+{"seq_id":"41051139713","text":"import math\nimport torch\nfrom overrides import overrides\nimport torch.distributed as dist\nimport vae_lm.nn.utils as util\nfrom torch_nlp_utils.metrics import Metric\n\n\nclass Average(Metric):\n \"\"\"Simple metric to average results over passed tensor values.\"\"\"\n\n def __init__(self) -> None:\n self._total_value = 0.0\n self._count = 0\n\n @overrides\n def __call__(self, value: torch.Tensor) -> None:\n _total_value = list(util.unwrap_to_tensors(value))[0]\n _count = 1\n if util.dist_available():\n device = util.int_to_device(\n -1 if dist.get_backend() != \"nccl\" else torch.cuda.current_device()\n )\n count = torch.tensor(_count, device=device)\n total_value = torch.tensor(_total_value, device=device)\n # Reduce from all processes\n dist.all_reduce(count, op=dist.ReduceOp.SUM)\n dist.all_reduce(total_value, op=dist.ReduceOp.SUM)\n _count = count.item()\n _total_value = total_value.item()\n self._count += _count\n self._total_value += _total_value\n\n @overrides\n def get_metric(self, reset: bool = False):\n \"\"\"Average of accumulated values.\"\"\"\n average_value = self._total_value / self._count if self._count > 0 else 0.0\n if reset:\n self.reset()\n return float(average_value)\n\n @overrides\n def reset(self):\n self._total_value = 0.0\n self._count = 0\n\n\nclass Perplexity(Average):\n \"\"\"\n Perplexity is a common metric used for evaluating how well a language model\n predicts a sample.\n\n Notes\n -----\n Assumes negative log likelihood loss of each batch (base e). Provides the\n average perplexity of the batches.\n \"\"\"\n\n @overrides\n def get_metric(self, reset: bool = False) -> float:\n \"\"\"The accumulated perplexity.\"\"\"\n average_loss = super().get_metric(reset)\n if average_loss == 0:\n return 0.0\n # Exponentiate the loss to compute perplexity\n return math.exp(average_loss)\n","repo_name":"Nemexur/nonauto-lm","sub_path":"vae_lm/training/metrics.py","file_name":"metrics.py","file_ext":"py","file_size_in_byte":2065,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"77"}
+{"seq_id":"41389947972","text":"from bd import obtener_conexion\r\n\r\ndef insertar_escuela(nombre,descripcion, estado, nombre1):\r\n conexion = obtener_conexion()\r\n with conexion.cursor() as cursor:\r\n cursor.execute(\"INSERT INTO ESCUELA(idEscuela,nombre,descripcion, estado, idFacultad) VALUES (%s,%s,%s, %s, %s)\",\r\n (obtener_ultimoid(),nombre,descripcion, estado, obtener_idfacultad(nombre1)))\r\n conexion.commit()\r\n conexion.close()\r\n\r\ndef obtener_idfacultad(nombre):\r\n conexion = obtener_conexion()\r\n id=None\r\n with conexion.cursor() as cursor:\r\n cursor.execute(\"SELECT idFacultad from FACULTAD WHERE nombre = %s\",\r\n (nombre))\r\n id = cursor.fetchone()\r\n conexion.close()\r\n return id\r\n\r\n\r\ndef obtener_ultimoid():\r\n conexion = obtener_conexion()\r\n id=None\r\n with conexion.cursor() as cursor:\r\n cursor.execute(\"SELECT COALESCE((MAX(idEscuela)),0)+1 as idEscuela from ESCUELA\")\r\n id = cursor.fetchone()\r\n conexion.close()\r\n return id\r\n\r\n\r\ndef listarFacultades():\r\n conexion = obtener_conexion()\r\n facultad=[]\r\n with conexion.cursor() as cursor:\r\n cursor.execute(\"SELECT nombre from FACULTAD\")\r\n facultad = cursor.fetchall()\r\n conexion.close()\r\n return facultad\r\n\r\n\r\ndef obtener_escuela():\r\n conexion = obtener_conexion()\r\n escuela = []\r\n with conexion.cursor() as cursor:\r\n cursor.execute(\"SELECT idEscuela,e.nombre,e.descripcion,CASE e.estado WHEN 'V' THEN 'Vigente' ELSE 'No vigente' END AS estado, f.nombre FROM ESCUELA e INNER JOIN FACULTAD f on e.idFacultad=f.idFacultad\")\r\n escuela = cursor.fetchall()\r\n print(escuela)\r\n conexion.close()\r\n return escuela\r\n\r\ndef obtener_escuela_index(limit,offset):\r\n conexion=obtener_conexion()\r\n escuela=[]\r\n with conexion.cursor() as cursor:\r\n cursor.execute(\"SELECT idEscuela,nombre,descripcion, CASE estado WHEN 'V' THEN 'Vigente' ELSE 'No vigente' END AS estado, idFacultad FROM ESCUELA limit {} offset {}\".format(limit, offset))\r\n escuela = cursor.fetchall()\r\n print(escuela)\r\n conexion.close()\r\n return escuela\r\n\r\ndef actualizar_escuela(nombre,descripcion, estado, nombre1,id):\r\n conexion = obtener_conexion()\r\n with conexion.cursor() as cursor:\r\n cursor.execute(\"UPDATE ESCUELA SET nombre= %s,descripcion= %s, estado= %s, idFacultad= %s WHERE idEscuela = %s\",\r\n (nombre,descripcion, estado, obtener_idfacultad(nombre1),id))\r\n conexion.commit()\r\n conexion.close()\r\n\r\n\r\ndef dar_baja(id):\r\n conexion = obtener_conexion()\r\n with conexion.cursor() as cursor:\r\n cursor.execute(\"UPDATE ESCUELA SET estado='N' WHERE idEscuela = %s\",\r\n (id))\r\n conexion.commit()\r\n conexion.close()\r\ndef dar_alta(id):\r\n conexion = obtener_conexion()\r\n with conexion.cursor() as cursor:\r\n cursor.execute(\"UPDATE ESCUELA SET estado='V' WHERE idEscuela = %s\",\r\n (id))\r\n conexion.commit()\r\n conexion.close()\r\ndef buscar_escuela(nombre):\r\n conexion = obtener_conexion()\r\n escuela = []\r\n with conexion.cursor() as cursor:\r\n cursor.execute(\"SELECT idEscuela,nombre,descripcion, estado, idFacultad FROM ESCUELA WHERE nombre LIKE ('%'||%s||'%')\", (nombre,))\r\n escuela = cursor.fetchall()\r\n conexion.close()\r\n return escuela\r\n\r\ndef buscar_escuela_id(id):\r\n conexion = obtener_conexion()\r\n escuela = []\r\n with conexion.cursor() as cursor:\r\n cursor.execute(\"SELECT idEscuela,e.nombre,e.descripcion, e.estado, f.nombre FROM ESCUELA e INNER JOIN FACULTAD f on e.idFacultad=f.idFacultad WHERE idEscuela = %s\", (id,))\r\n escuela = cursor.fetchone()\r\n conexion.close()\r\n return escuela\r\n\r\ndef eliminar_escuela(id):\r\n conexion = obtener_conexion()\r\n with conexion.cursor() as cursor:\r\n cursor.execute(\"DELETE FROM ESCUELA WHERE idEscuela = %s\", (id))\r\n conexion.commit()\r\n conexion.close()","repo_name":"BeafLee/PractiSoft","sub_path":"controladores/controlador_escuela.py","file_name":"controlador_escuela.py","file_ext":"py","file_size_in_byte":3975,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"}
+{"seq_id":"11349986226","text":"\n\n# Writte by Trent Balius, FNLCR, April 6, 2021\n\nimport sys \n\ndef main():\n file1 = sys.argv[1]\n file2 = sys.argv[2]\n \n print(\"input = %s\"%(file1))\n print(\"output = %s\"%(file2))\n \n fh1 = open(file1,'r')\n \n dic_id_count = {}\n dic_id_line = {}\n \n \n \n for line in fh1:\n sline = line.split()\n sid = sline[1]\n if sid in dic_id_count:\n dic_id_count[sid]=dic_id_count[sid]+1\n else: \n dic_id_count[sid]=1\n dic_id_line[sid] = line.strip()\n \n \n fh1.close()\n \n fh2 = open(file2,'w')\n for key in sorted(dic_id_line.keys()):\n fh2.write('%s\\n'%(dic_id_line[key]))\n fh2.close()\n \n # print the ids that have more than on entree in the file. \n \n for key in sorted(dic_id_count.keys()):\n if dic_id_count[key] > 1: \n print(key,dic_id_count[key])\n \nmain()\n","repo_name":"tbalius/teb_scripts_programs","sub_path":"zzz.scripts/make_smi_uniq.py","file_name":"make_smi_uniq.py","file_ext":"py","file_size_in_byte":864,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"}
+{"seq_id":"16469274227","text":"FILE_NAME = \"input.txt\"\nGAMMA = \"GAMMA\"\nEPSILON = \"EPSILON\"\n\ninput = open(FILE_NAME, \"r\")\nVECTOR_SIZE = len(input.readline()) - 1\n\ndef read_input():\n open(FILE_NAME, \"r\")\n return input.readlines()\n\nvalues = read_input()\n\ncounted_values = [0] * VECTOR_SIZE\nXOR_MASK = '1' * VECTOR_SIZE\n\ndef count_bits(value):\n for i in range(0, VECTOR_SIZE):\n if value[i] == '1':\n counted_values[i] = counted_values[i] + 1 \n\ndef calculate_gamma():\n gamma = \"\"\n for i in range (0, VECTOR_SIZE):\n if counted_values[i] >= len(values)/2:\n gamma = gamma + '1'\n else:\n gamma = gamma + '0'\n return gamma\n\ndef calculate_epsilon(gamma):\n return gamma ^ int(XOR_MASK,2)\n\ndef reset_bit_counter():\n for i in range(0, VECTOR_SIZE):\n counted_values[i] = 0\n\ndef count_all_vectors():\n for value in values:\n count_bits(value)\n\ndef eliminate_by_bit_criteria(i, criteria):\n indexes_to_delete = []\n for j in range(0,len(values)):\n if criteria[i] != (values[j])[i]:\n indexes_to_delete.append(j)\n\n for k in range(len(indexes_to_delete)-1, -1, -1):\n if(len(values) == 1):\n break\n del values[indexes_to_delete[k]]\n\ndef recount_and_eliminate(calculate_by):\n while len(values) > 1:\n for i in range(0, VECTOR_SIZE):\n reset_bit_counter()\n count_all_vectors()\n gamma = calculate_gamma()\n if(calculate_by == \"GAMMA\"):\n criteria = gamma\n else:\n epsilon = calculate_epsilon(int(gamma,2))\n criteria = str.zfill(str(bin(epsilon)).split('b')[1], VECTOR_SIZE)\n eliminate_by_bit_criteria(i, criteria)\n\n\ncount_all_vectors()\ngamma = int(calculate_gamma(),2)\nprint(\"gamma \" + str(gamma))\nepsilon = calculate_epsilon(gamma)\nprint(\"epsilon \" + str(epsilon))\n# part one solution\nprint(gamma * epsilon)\n\nvalues = (open(FILE_NAME, \"r\")).readlines()\n\nrecount_and_eliminate(GAMMA)\noxygen = int(values[0],2)\nprint(\"oxy \" + str(oxygen))\n\nvalues = (open(FILE_NAME, \"r\")).readlines()\n\nrecount_and_eliminate(EPSILON)\nco2 = int(values[0], 2)\nprint(\"co2 \" + str(co2))\n\n# part two solution\nprint(oxygen*co2)","repo_name":"antebm/advent-of-code-2021","sub_path":"day03/binary_diagnostic.py","file_name":"binary_diagnostic.py","file_ext":"py","file_size_in_byte":2204,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"}
+{"seq_id":"5467534638","text":"from PIL import Image, ImageDraw, ImageFont\r\nimport os\r\n\r\n# Open the PNG image file\r\nimage = Image.open('image/Participation.png')\r\n\r\n# Create a drawing object\r\ndraw = ImageDraw.Draw(image)\r\n\r\n# Define the font and size for the text\r\nfont = ImageFont.truetype('GreatVibes-Regular.ttf', 400)\r\n# Define the text to write on the image\r\ntext = input('Enter the name: ')\r\n\r\n# Calculate the size of the text\r\ntext_width, text_height = draw.textbbox((0, 0), text, font=font)[2:]\r\n\r\n# Calculate the position of the text\r\nx = (image.width - text_width) / 2\r\ny = 2300\r\n\r\n# Define the color of the text\r\ntext_color = (26, 53, 95)\r\n\r\n# Write the text on the image\r\ndraw.text((x, y), text, font=font, fill=text_color)\r\n\r\n# Save the modified image as a new PNG file with the user-provided name\r\noutput_file = f'image/{text}.png'\r\nimage.save(output_file)\r\n\r\n# Display the paths of the modified and original files\r\nprint(f'Image with text: {output_file}')\r\n\r\n","repo_name":"UsamaAslam711/Certificate-Generator","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":943,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"}
+{"seq_id":"46611617193","text":"#@profile\ndef decode_event(data,i=0):\n assert list(data[i:i+4])==[0xAA]*4\n iev=sum(b<<(j*8) for j,b in enumerate(data[i+4:i+ 8]))\n tev=sum(b<<(j*8) for j,b in enumerate(data[i+8:i+16]))\n i+=4*4\n hits=[]\n if data[i]&0xF0==0xE0: # chip empty frame\n assert data[i+2]==0xFF\n assert data[i+3]==0xFF\n i+=4\n elif data[i]&0xF0==0xA0: # chip header\n i+=2\n n=len(data)\n reg=None\n while i>9&0x3FE|(d^d>>1)&0x1,d>>1&0x1FF))\n data2=data[i+2]\n d+=1\n while data2:\n if data2&1:\n hits.append((d>>9&0x3FE|(d^d>>1)&0x1,d>>1&0x1FF))\n data2>>=1\n d+=1\n i+=3\n elif data0&0xC0==0x40: # data short\n d=reg<<14|(data0&0x3F)<<8|data[i+1]\n hits.append((d>>9&0x3FE|(d^d>>1)&0x1,d>>1&0x1FF))\n i+=2\n elif data0&0xE0==0xC0: # region header\n reg=data0&0x1F\n i+=1\n elif data0&0xF0==0xB0: # chip trailer\n i+=1\n i=(i+3)//4*4\n break\n elif data0==0xFF:\n i+=1\n else:\n raise ValueError('i=%d'%i)\n else:\n raise ValueError('i=%d'%i)\n assert list(data[i:i+4])==[0xBB]*4\n i+=4\n return hits,iev,tev,i\ndef main(d):\n nev=0\n i=0\n pbar=tqdm(total=len(d))\n while i\n#\n#\tExample:\n#\t\tMergeDataModule.py ..\\..\\Cortex-Command-Community-Project-Release Psyclones.rte\n#\n#\tWarning:\n#\t\tThis priimitive tool simply dumps all the contents of the ini files into single file to speed-up load times. \n#\t\tIt respects IncludeFile statetemnts and removes them from the output file.\n#\t\tIt's intended to use only for mod creators to speed up game loading time when they have to reload the game very often to test stuff.\n#\n\nimport sys\nimport os\n\ndef ParseFile(modpath, inifile, out, indent):\n\tprint(inifile)\n\t\n\tinput = open(os.path.join(modpath, inifile), 'r')\n\tlines = input.readlines()\n\t\n\tcurobject = dict()\n\t\n\tnextcommentmode = False\n\tcommentmode = False\n\t\n\tfor l in lines:\n\t\twriteline = True\n\t\n\t\t#Discard comments\n\t\tcmnts = l.split(\"//\")\n\t\tln = cmnts[0]\n\t\t\n\t\tcmnts = ln.split(\"/*\")\n\t\tln = cmnts[0]\n\t\tif len(cmnts) > 1:\n\t\t\tnextcommentmode = True\n\n\t\tcmnts = ln.split(\"*/\")\n\t\tif len(cmnts) > 1:\n\t\t\tln = cmnts[1]\n\t\t\tnextcommentmode = False\n\t\t\tcommentmode = False\n\n\t\tif not commentmode: \n\t\t\tv = ln.split(\"=\");\n\t\t\tt = list()\n\n\t\t\tfor i in range(0, len(v)):\n\t\t\t\tv[i] = v[i].strip()\n\t\t\t\n\t\t\tif len(v) > 1:\n\t\t\t\tv[0] = v[0].lower()\n\t\t\t\t\n\t\t\t\t#Parse included files\n\t\t\t\tif v[0] == \"includefile\":\n\t\t\t\t\tParseFile(modpath, v[1], out, True)\n\t\t\t\t\twriteline = False\n\n\t\tcommentmode = nextcommentmode\n\t\t\n\t\tif writeline:\n\t\t\tif indent:\n\t\t\t\tout.write(\"\\t\" + l);\n\t\t\telse:\n\t\t\t\tout.write(l);\n\n\tout.write(\"\\n\");\n\tinput.close()\n\nRootFolder = sys.argv[1]\nModName = sys.argv[2]\n\nOutputFile = os.path.join(RootFolder, ModName, \"MergedIndex.ini\");\n\nout = open(OutputFile, 'w')\n\nParseFile(RootFolder, os.path.join(ModName, \"Index.ini\"), out, False)\n\t\nout.close()","repo_name":"weegee720/Cortex-Command-Community-Project-Toolbox","sub_path":"Merger/MergeDataModule.py","file_name":"MergeDataModule.py","file_ext":"py","file_size_in_byte":1864,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"}
+{"seq_id":"1026799185","text":"\"\"\"\r\nСинтаксис\r\n\"\"\"\r\n\r\n# def foo(x):\r\n# return x \r\n\r\n# foo(123)\r\n\r\n# a = 1\r\n# a = 'one'\r\n\r\n# foo = lambda x: x \r\n# foo = 1\r\n\r\n# foo(123)\r\n\r\n\"\"\"\r\nПримеры использования\r\n\"\"\"\r\n\r\n\"\"\"\r\nФункция, которая не храниться в памяти\r\n\"\"\"\r\n\r\n\r\n\r\n# print((lambda x: x + 1)(10))\r\n\r\n# def plusOne(x):\r\n# return x + 1\r\n\r\n# if plusOne(int(input(': '))) == 11:\r\n# print('ты ввел число 10 а я прибавил к нему один ! и получил 11')\r\n\r\n\"\"\"\r\nФункция, аргумент\r\n\"\"\"\r\n\r\ndef foo(num, func):\r\n a = func(num)\r\n return a\r\n\r\n\r\nIsNumberEven = foo(10, lambda x: x ** 2)\r\n\r\nprint(IsNumberEven)\r\n\r\n","repo_name":"gabrielbodrug/python","sub_path":"func/lamdba.py","file_name":"lamdba.py","file_ext":"py","file_size_in_byte":694,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"}
+{"seq_id":"29085035238","text":"import sys\ninput = sys.stdin.readline\n\ndictionary = {}\nn = int(input())\nfor _ in range(n):\n s = input().rstrip()\n dictionary[s] = len(s)\n\nresult = list(sorted(dictionary.items(),key = lambda item: (item[1],item[0])))\n\nfor i in result:\n print(i[0])","repo_name":"spaceOfSoul/baekjunSolve_python","sub_path":"1181.py","file_name":"1181.py","file_ext":"py","file_size_in_byte":250,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"}
+{"seq_id":"1366417131","text":"from django.shortcuts import render, get_object_or_404\nfrom customers .models import Customer\nfrom products .models import Product, Category, Colour, Size, Material\nfrom sales .models import Sale, SaleItem\nfrom main .models import Brand\nfrom django.contrib.auth.decorators import login_required\n\n\n@login_required\ndef admin_index(request):\n template_name = 'app/index.html'\n context = {\n \"is_admin_index\" : True,\n }\n return render(request,template_name,context)\n\n\ndef customers_view(request):\n customers = Customer.objects.filter(is_active=True) \n template_name = 'app/customers-view.html'\n context = {\n \"is_customers_view\" : True,\n \"customers\" : customers,\n }\n return render(request,template_name,context)\n\n\ndef products_view(request):\n products = Product.objects.filter(is_active=True)\n template_name = 'app/products-view.html'\n context = {\n \"is_products_view\" : True,\n \"products\" : products,\n }\n return render(request,template_name,context)\n\n\ndef sales_list(request):\n sales_list = Sale.objects.filter(is_active=True) \n template_name = 'app/sales-list.html'\n context = {\n \"is_sale_list\" : True,\n \"sales_list\" : sales_list,\n }\n return render((request), template_name, context)\n\ndef category_view(request):\n categories = Category.objects.filter(is_active=True)\n template_name = 'app/category-view.html'\n context = {\n \"is_context_view\" : True,\n \"categories\" : categories,\n }\n return render(request,template_name,context)\n\ndef sale_view(request,pk):\n sale_view = get_object_or_404(Sale,pk=pk)\n template_name = 'app/sales-view.html'\n context = {\n \"is_sale_view\" : True,\n \"sale_view\" : sale_view,\n }\n return render((request), template_name, context)\n\n\ndef brands_view(request):\n brands = Brand.objects.filter(is_active=True)\n template_name = 'app/brands-view.html'\n context = {\n \"is_brands_view\" : True,\n \"brands\" : brands,\n }\n return render(request,template_name,context)\n\n\ndef colors_view(request):\n colors = Colour.objects.filter(is_active=True)\n template_name = 'app/colors-view.html'\n context = {\n \"is_colors_view\" : True,\n \"colors\" : colors,\n }\n return render(request,template_name,context)\n\n\ndef sizes_view(request):\n sizes = Size.objects.filter(is_active=True)\n\n template_name = 'app/size-view.html'\n context = {\n \"is_sizes_view\" : True,\n \"sizes\" : sizes,\n }\n return render(request,template_name,context)\n\n\ndef material_view(request):\n materials = Material.objects.filter(is_active=True)\n\n template_name = 'app/material-view.html'\n context = {\n \"is_material_view\" : True,\n \"materials\" : materials,\n }\n return render(request,template_name,context)\n\n","repo_name":"Ajmal-AJ/ecommerce","sub_path":"app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2832,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"}
+{"seq_id":"30609420577","text":"import csv\n\n#get user data\ndef retrieve_user_data():\n\tuser_data = []\n\twith open('users.csv', 'r') as f:\n\t\treader = csv.reader(f)\n\t\tfor row in reader:\n\t\t\tuser_data.append(row)\n\tuser_data.pop(0)\n\treturn user_data\n\n#get blockchain data\ndef retrieve_block_data():\n\tblock_data = []\n\twith open('block.csv', 'r') as f:\n\t\treader = csv.reader(f)\n\t\tfor row in reader:\n\t\t\tblock_data.append(row)\n\treturn block_data\n\n#write new user data into record\ndef write_user_data(s):\n\twith open('users.csv', 'w') as csvfile:\n\t\tfilewriter = csv.writer(csvfile, delimiter=',', quotechar='|', quoting=csv.QUOTE_MINIMAL)\n\t\tfor i in s:\n\t\t\tfilewriter.writerow(i)\n\n#write new blockchain data into record\ndef write_block_data(s):\n\twith open('block.csv', 'w') as csvfile:\n\t\tfilewriter = csv.writer(csvfile, delimiter=',', quotechar='|', quoting=csv.QUOTE_MINIMAL)\n\t\tfor i in s:\n\t\t\tfilewriter.writerow(i)\n\n#clean all the data in the file\ndef cleandata():\n\twith open('users.csv', 'w') as csvfile:\n\t\tfilewriter = csv.writer(csvfile, delimiter=',', quotechar='|', quoting=csv.QUOTE_MINIMAL)\n\t\tfilewriter.writerow([])\n","repo_name":"kevinyang372/Odera","sub_path":"create_user.py","file_name":"create_user.py","file_ext":"py","file_size_in_byte":1081,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"77"}
+{"seq_id":"5743671252","text":"import warnings\nwarnings.filterwarnings('ignore')\nimport numpy as np\nimport pandas as pd\nimport pickle\nfrom tqdm import tqdm\nimport matplotlib.pyplot as plt\n\nprint(\"\\nWelcome to the ultra cool bacterial dynamics simulator \\n\")\n\n# Solving the differential equation\ndef solve_model(rho, S, x_min, x_max, n, D_s, D_b, chi, r, k, lambd, t_c, x_l, q, beta):\n # Defining the step in space and time\n dx = (x_max - x_min)/n\n dt = dx**2 / (64*D_b)\n # print(dt)\n # print(t_c/dt)\n # Defining space\n x = np.linspace(x_min, x_max, n)\n # Array of derivatives\n drhodt = np.empty(n)\n dSdt = np.empty(n)\n # Defining time\n #t = np.arange(0, t_max, dt)\n \n # Array of solutions for density and concentration\n rhos = [rho]\n Ss = [S]\n tot_rho = [np.sum(rho*dx), np.sum(rho*dx)]\n tot_S = [np.sum(S*dx)]\n \n #Dirac delta function\n dirac = np.zeros(n)\n dirac[int(x_l/dx)] = 1\n \n gamma = r/k\n \n # Loop in time\n i = 0\n while i < 2000000:\n # if i % 1000 == 0:\n # print(f\"{i+1}/2000000. Substance is released in i = {int(t_c/dt)}\", end = \"\\r\")\n if np.abs(tot_rho[-1] - tot_rho[-2]) < 1e-7 and dt*i > 1.1*t_c:\n break\n i += 1\n #for i in range(len(t)):\n # Loop in space\n if dt*i < t_c:\n q_eff = 0\n else:\n q_eff = q\n S[n-1] = S[n-2]\n S[0] = S[1]\n rho[0] = D_b*rho[1]/(chi*(S[1] - S[0]) + D_b)\n rho[n-1] = D_b*rho[n-2]/(chi*(S[n-1] - S[n-2]) + D_b)\n for j in range(1,n-1):\n # Substance diffusion and degradetion\n dSdt[j] = D_s*((S[j+1] - S[j])/dx**2 - (S[j]-S[j-1])/dx**2) - lambd*S[j]*rho[j] + q_eff*dirac[j]\n # Chemotaxis\n chem = chi*(((rho[j+1] - rho[j-1])*(S[j+1] - S[j-1])/(4*dx**2)) + rho[j]*((S[j+1] - 2*S[j] + S[j-1])/dx**2))\n # Growth\n growth = r*rho[j]\n # Competition\n competition = gamma*rho[j]**2\n # Death by consumption\n death = lambd*beta*rho[j]*S[j]\n # Bacterial equation\n drhodt[j] = D_b*((rho[j+1] - rho[j])/dx**2 - (rho[j]-rho[j-1])/dx**2) + chem + growth - competition - death\n rho = rho + drhodt*dt\n S = S + dSdt*dt\n\n rhos.append(rho)\n Ss.append(S)\n tot_rho.append(np.sum(rho*dx))\n tot_S.append(np.sum(S*dx))\n return rhos, Ss, tot_rho, tot_S, i, dx\n\nn = 100\ncenter = 0\nsd = 0.4\nr = 0.05\nk = 0.8\nchi = 0.05\ngamma = r/k\nlambd = 0.03\nq = 0.6\nbeta = 0.5\n\nD_b = 1e-2\nD_s = 5e-2\n#t_final = 2200\nt_c = 200\n\n# Initial condition\nS = np.zeros(n)\nrho = np.random.uniform(0.05, 0.1, n)\n\nx_max = np.arange(1, 30, 0.5)\nq_s = np.arange(0.05, 1, 0.05)\n\ntot_rho_final = np.zeros((len(x_max), len(q_s)))\nn = 100\nfinal_profile_rho = {}\nfinal_profile_S = {}\ntot_rho_profile = {}\n\nprint('\\nReady to run simulations. Varying L and q \\n')\n\n#rhos, Ss, tot_rho, tot_S, idx, dx = solve_model(rho, S, 0, 10, n, D_s, D_b, chi, r, k, lambd, t_c, 5, 0.6, beta)\n\nfor i in range(len(x_max)):\n for j in range(len(q_s)):\n dx = 0.1\n \n n = int(x_max[i]/dx)\n\n print(f\"Run {i+1} of {len(x_max)} - Size: {x_max[i]} - Total steps: {n}\", end = \"\\r\")\n\n S = np.zeros(n)\n\n dt = dx**2 / (64*D_b)\n\n # Create the space\n x = np.arange(0, x_max[i], dx)\n\n # S[50] = 5\n rho = np.random.uniform(0.05, 0.1, n)\n rhos, Ss, tot_rho, tot_S, idx, dx = solve_model(rho, S, 0, x_max[i], n, D_s, D_b, chi, r, k, lambd, t_c, x_max[i]/2, q_s[j], beta)\n rhos = np.array(rhos)\n Ss = np.array(Ss)\n if (rhos < 0).any() == False:\n tot_rho_final[i,j] = tot_rho[-1]\n final_profile_rho[f\"{x_max[i]:.2f} - {q_s[j]:.2f}\"] = rhos[-1]\n final_profile_S[f\"{x_max[i]:.2f} - {q_s[j]:.2f}\"] = Ss[-1]\n tot_rho_profile[f\"{x_max[i]:.2f} - {q_s[j]:.2f}\"] = tot_rho\n else:\n print(f\"There was an error. x_max = {x_max[i]:1f}\")\n tot_rho_final[i,j] = np.nan\n final_profile_rho[f\"{x_max[i]:.2f} - {q_s[j]:.2f}\"] = np.nan\n final_profile_S[f\"{x_max[i]:.2f} - {q_s[j]:.2f}\"] = np.nan\n tot_rho_profile[f\"{x_max[i]:.2f} - {q_s[j]:.2f}\"] = np.nan\n\nprint(\"Simulations done! \\n\")\n\ntry:\n pd.DataFrame(final_profile_rho).to_csv('final_rho_profile_varying_L-q.csv')\n pd.DataFrame(final_profile_S).to_csv('final_S_profile_varying_L-q.csv')\n pd.DataFrame(tot_rho_profile).to_csv('temporal_N_profile_varying_L-q.csv')\nexcept:\n print('Could not save files as DataFrames, saving using Pickle instead')\n with open('final_rho_profile_varying_L-q.pkl', \"wb\") as fp:\n pickle.dump(final_profile_rho, fp)\n with open('final_S_profile_varying_L-q.pkl', \"wb\") as fp:\n pickle.dump(final_profile_S, fp)\n with open('temporal_N_profile_varying_L-q.pkl', \"wb\") as fp:\n pickle.dump(tot_rho_profile, fp)\n\nnp.savetxt('final_population_varying_L-q.txt', tot_rho_final)\n\nprint('DONE! :)')\n","repo_name":"henauts/QECO","sub_path":"Codes/bacterial_model_v1_verify_L-q.py","file_name":"bacterial_model_v1_verify_L-q.py","file_ext":"py","file_size_in_byte":4984,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"}
+{"seq_id":"28556872500","text":"from flask import Flask, request, jsonify\nimport pandas as pd\nimport requests\nimport os\nfrom os.path import exists\nimport io\nimport dropbox\nimport joblib\n\napp = Flask(__name__)\n\nmodel_path = 'model'\n\ndbx = dropbox.Dropbox('sl.BN0iICVUKpTKadJ3SPGjAgdxDe1LX0C2yILam1-C-7_yB_84NlvwPRt7GHwSK28bmnnVRzgqDLRILx3mX5B3IgaoE59C9geJoLWvZiUXReD6AazCGfbVtyiE9rbeH0Il2dzwo-E')\n\nif not os.path.isdir(model_path):\n\tos.makedirs(model_path)\n\n\ndef get_pipeline(): # Load pipeline\n\tpath = model_path+'/pipeline.joblib'\n\tif not exists(path):\n\t\tprint(path, 'does not exist')\n\t\tfilename = \"/pipeline.joblib\"\n\t\ts, r = dbx.files_download(filename)\n\t\twith open(model_path+'/pipeline.joblib', 'wb') as f:\n\t\t\tf.write(r.content)\n\tpipeline = joblib.load(model_path+'/pipeline.joblib')\n\treturn pipeline\n\n\npipeline = get_pipeline()\n\n\n@app.route('/api/predict', methods=['POST'])\ndef predict():\n\tdata = request.data\n\tio_val = io.StringIO(data.decode('utf-8'))\n\tdf = pd.read_csv(io_val, index_col=[0])\n\treturn jsonify(\n\t\t{'prediction': pipeline.predict_proba(df)[0].tolist()})\n\n\n@app.route(\"/\")\ndef home_view():\n\treturn \"
Hello World!
\"\n","repo_name":"DamienDous/scoringmodelflaskapi","sub_path":"app/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1112,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"}
+{"seq_id":"13687079744","text":"import collections\nimport os\nimport random\n\nfrom .utils import filter_dict_by_iqr\n\n\nBASE_DIR = os.path.dirname(__file__)\n\n\nclass Trimmer:\n def __init__(self, path):\n self._datasets_path = path\n self._females = collections.defaultdict(int)\n self._males = collections.defaultdict(int)\n self._load_users_stats()\n\n def remove_outliers(self):\n \"\"\"\n Remove outlier users (users with too many or few tweets).\n \"\"\"\n print('Removing outliers')\n self._females = filter_dict_by_iqr(self._females)\n self._males = filter_dict_by_iqr(self._males)\n\n def split_datasets(self, training_set_percentage):\n \"\"\"\n Splits both datasets (females.tsv and males.tsv) into training and test datasets.\n\n Params:\n training_set_percentage (float): Percentage of tweets that should go to training dataset.\n \"\"\"\n print('Trimming datasets')\n self._split_dataset(self._females, os.path.join(self._datasets_path, 'females.tsv'), training_set_percentage)\n self._split_dataset(self._males, os.path.join(self._datasets_path, 'males.tsv'), training_set_percentage)\n os.remove(os.path.join(self._datasets_path, 'females.tsv'))\n os.remove(os.path.join(self._datasets_path, 'males.tsv'))\n\n def _load_users_stats(self):\n \"\"\"\n Load user stats from generated datasets.\n \"\"\"\n with open(os.path.join(self._datasets_path, 'females.tsv'), 'r', encoding='utf-8') as file:\n for line in file:\n self._females[line.split('\\t')[0]] += 1\n\n with open(os.path.join(self._datasets_path, 'males.tsv'), 'r', encoding='utf-8') as file:\n for line in file:\n self._males[line.split('\\t')[0]] += 1\n\n def _split_dataset(self, users, dataset, training_set_percentage):\n \"\"\"\n Split a dataset into training and test datasets.\n\n Params:\n users (dict): Valid users for the dataset with tweets count as values.\n dataset (string): Path to dataset to split.\n training_set_percentage (float): Percentage of tweets that should go to training dataset.\n \"\"\"\n total_tweets = sum(list(users.values()))\n training_indexes = set(random.sample(range(total_tweets), int(total_tweets * training_set_percentage)))\n\n valid_tweets_processed = 0\n with open(dataset, 'r', encoding='utf-8') as file,\\\n open(dataset.replace('.tsv', '-training.tsv'), 'w+', encoding='utf-8') as training_file,\\\n open(dataset.replace('.tsv', '-test.tsv'), 'w+', encoding='utf-8') as test_file:\n for line in file:\n if line.split('\\t')[0] in users:\n if valid_tweets_processed in training_indexes:\n training_file.write(line)\n else:\n test_file.write(line)\n valid_tweets_processed += 1","repo_name":"davidmogar/lexgen","sub_path":"lexgen/trimmer.py","file_name":"trimmer.py","file_ext":"py","file_size_in_byte":2961,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"}
+{"seq_id":"42978823300","text":"# 作 者 :王建设\n# 开发时间 :2022/8/9 21:01\nimport os\nfilename = 'student.txt'\ndef main():\n while True:\n menu()\n choice = int(input('请选择'))\n if choice in range(8):\n if choice ==0:\n answer = input('您确定要退出系统吗?y/n')\n if answer=='y' or answer =='Y':\n print('谢谢您的使用!!!')\n break\n else:\n continue\n elif choice == 1:\n insert()\n elif choice == 2:\n search()\n elif choice == 3:\n delete()\n elif choice ==4:\n modify()\n elif choice ==5:\n sort()\n elif choice ==6:\n tital()\n elif choice == 7:\n show()\n\ndef menu():\n print(\"==================学生管理系统=================\")\n print('--------------------功能菜单-----------------')\n print('\\t\\t\\t\\t\\t\\t1.录入学生信息')\n print('\\t\\t\\t\\t\\t\\t2.查找学生信息')\n print('\\t\\t\\t\\t\\t\\t3.删除学生信息')\n print('\\t\\t\\t\\t\\t\\t4.修改学生信息')\n print('\\t\\t\\t\\t\\t\\t5.排序')\n print('\\t\\t\\t\\t\\t\\t6.统计学生总人数')\n print('\\t\\t\\t\\t\\t\\t7.显示所有学生信息')\n print('\\t\\t\\t\\t\\t\\t0.退出')\n print('------------------------------------------')\n\ndef insert():\n student_list=[]\n while True:\n id = input('请输入ID(如1001):')\n if not id:\n break\n name = input('请输入姓名')\n if not name:\n break\n\n try:\n english = int(input('请输入英语成绩'))\n python = int(input(\"请输入python成绩\"))\n java = int(input('请输入Java成绩'))\n except:\n print(\"输入无效,请重新输入\")\n continue\n\n #将录入的学生信息保存到字典中\n student = {'id':id,'name':name,'english':english,'python':python,'java':java}\n #将学生信息添加到列表中\n student_list.append(student)\n anwser = input('是否继续添加?y/n\\n')\n if anwser == 'y' or anwser =='Y':\n continue\n else:\n break\n\n #调用save()函数\n save(student_list)\n print('学生信息录入完毕!!!')\n\ndef save(lst):\n try:\n stu_txt = open(filename,'a',encoding ='utf-8')\n except:\n stu_txt = open(filename,'w',encoding ='utf-8')\n for item in lst:\n stu_txt.write(str(item)+'\\n')\n\ndef search():\n student_query =[]\n while True:\n id = ''\n name = ''\n if os.path.exists(filename):\n mode = input('按id查找请输入1,按姓名查找请输入2:')\n if mode == '1':\n id = input('请输入学生id')\n elif mode=='2':\n name = input('请输入学生姓名')\n else:\n print('您输入的信息有误,请重新输入')\n search()\n with open(filename,'r',encoding='utf-8') as rfile:\n student = rfile.readlines()\n for item in student:\n d = dict(eval(item))\n if id !='':\n if d['id'] == id:\n student_query.append(d)\n elif name!='':\n if d['name'] ==name:\n student_query.append(d)\n #显示查询结果\n show_student(student_query)\n #清空列表\n student_query.clear()\n anwser = input('是否继续查询?y/n\\n')\n if anwser == 'y' or anwser == 'Y':\n continue\n else:\n break\n else:\n print('暂未保存学生信息')\n return\ndef show_student(lst):\n if len(lst) == 0:\n print('没有查询到学生信息,无数据显示')\n return\n #定义标题显示格式\n format_title = '{:^6}\\t{:^12}\\t{:^8}\\t{:^10}\\t{:^10}\\t{:^8}'\n print(format_title.format('id','姓名','英语成绩','python成绩','Java成绩','总成绩'))\n #定义内容的显示格式\n format_data = '{:^6}\\t{:^12}\\t{:^8}\\t{:^10}\\t{:^10}\\t{:^8}'\n for item in lst:\n print(format_data.format(item.get('id'),\n item.get('name'),\n item.get('english'),\n item.get('python'),\n item.get('java'),\n int(item.get('english'))+int(item.get('python'))+int(item.get('java'))\n ))\n\n\ndef delete():\n while True:\n student_id = input('请输入要删除的学生id:')\n if student_id !='':\n if os.path.exists(filename):\n with open(filename,'r',encoding='utf-8') as file:\n student_old = file.readlines()\n else:\n student_old = []\n flag = False #标记是否删除\n if student_old:\n with open(filename ,'w',encoding='utf-8') as wfile:\n d={}\n for item in student_old:\n d= dict(eval(item))\n if d['id']!=student_id:\n wfile.write(str(d)+'\\n')\n else:\n flag = True\n if flag:\n print(f'id为{student_id}的学生信息已被删除')\n else:\n print(f'没有找到id为{student_id}的学生信息')\n else:\n print('无学生信息')\n break\n show() #删除之后要重新显示所有学生信息\n answer = input('是否继续删除?y/n\\n')\n if answer == 'y' or answer == 'Y':\n continue\n else:\n break\n\ndef modify():\n show()\n if os.path.exists(filename):\n with open(filename,'r',encoding='utf-8') as rfile:\n student_old = rfile.readlines()\n else:\n return\n student_id = input('请输入要修改的学生id')\n with open(filename,'w',encoding='utf8') as wfile:\n for item in student_old:\n d=dict(eval(item))\n if d['id'] == student_id:\n print(\"找到学生信息,可以修改他的相关信息了!\")\n while True:\n try:\n d['name'] = input('请输入姓名:')\n d['english'] = input('请输入英语成绩:')\n d['python'] = input('请输入python成绩:')\n d['java'] = input('请输入java成绩:')\n except:\n print('您的输入有误!!!')\n else:\n break\n wfile.write(str(d)+'\\n')\n print('修改成功!!!')\n else:\n wfile.write(str(d)+'\\n')\n answer = input('是否继续修改?y/n\\n')\n if answer == 'y' or answer == 'Y':\n modify()\n\ndef sort():\n show()\n if os.path.exists(filename):\n with open(filename,'r',encoding='utf-8') as rfile:\n student_list = rfile.readlines()\n student_new = []\n for item in student_list:\n d = dict(eval(item))\n student_new.append(d)\n else:\n return\n asc_or_desc = input('请选择(0.升序 1.降序)')\n if asc_or_desc == '0':\n asc_or_desc_bool =False\n elif asc_or_desc == '1':\n asc_or_desc_bool = True\n else:\n print('您输入的有误,请重新输入')\n sort()\n mode = input('请选择排序方法:1.按英语成绩排序;2.按python成绩排序;3.按java成绩排序 0.按总成绩排序')\n if mode == '1':\n student_new.sort(key=lambda x :int(x['english']),reverse = asc_or_desc_bool)\n elif mode == '2':\n student_new.sort(key=lambda x :int(x['python']),reverse = asc_or_desc_bool)\n elif mode =='3':\n student_new.sort(key=lambda x :int(x['java']),reverse = asc_or_desc_bool)\n elif mode == '0':\n student_new.sort(key=lambda x :int(x['english'])+int(x['python'])+int(x['java']),reverse = asc_or_desc_bool)\n else:\n print('您的输入有误,请重新输入!!!')\n sort()\n show_student(student_new)\ndef tital():\n if os.path.exists(filename):\n with open(filename,'r',encoding='utf-8') as rfile:\n students = rfile.readlines()\n if students:\n print(f'一共有{len(students)}名学生')\n else:\n print('还没有录入学生信息')\n else:\n print('暂未保存数据信息')\n\ndef show():\n student_list = []\n if os.path.exists(filename):\n with open(filename,'r',encoding='utf-8') as rfile:\n students = rfile.readlines()\n for item in students:\n student_list.append(eval(item))\n if student_list:\n show_student(student_list)\n else:\n print('暂未保存数据信息!!!')\n\nmain()\n","repo_name":"Jianjianw6/python_studentsysytem","sub_path":"studentsystem/stusystem.py","file_name":"stusystem.py","file_ext":"py","file_size_in_byte":9214,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"}
+{"seq_id":"671641169","text":"import socket\nfrom message import Message\n\n\nclass ThrustMQProducer:\n\n def __init__(self, host=\"localhost\", port=1888):\n self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self.sock.connect((host, port))\n\n def send_message(self, message):\n self.sock.sendall(message.bucket_id.to_bytes(8, byteorder='little'))\n self.sock.sendall(message.length.to_bytes(4, byteorder='little'))\n self.sock.sendall(message.data)\n\n def send(self, messages):\n if not isinstance(messages, list):\n messages = [messages]\n\n batch_size = len(messages)\n\n self.sock.sendall(batch_size.to_bytes(4, byteorder='little'))\n\n for message in messages:\n self.send_message(message)\n\n result = self.sock.recv(message.length)\n","repo_name":"rambler-digital-solutions/thrustmq","sub_path":"clients/python/thrustmq/producer.py","file_name":"producer.py","file_ext":"py","file_size_in_byte":798,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"77"}
+{"seq_id":"16608721026","text":"from adoctor_cli.base_cmd import BaseCommand\nfrom aops_utils.restful.helper import make_check_url\nfrom aops_utils.conf.constant import CHECK_GET_RESULT\nfrom aops_utils.time_utils import time_transfer\nfrom aops_utils.validate import name_check, str_split\nfrom aops_utils.cli_utils import add_page, add_access_token, add_query_args\nfrom aops_utils.cli_utils import add_start_and_end, request_without_print, pretty_json\n\n\nclass CheckCommand(BaseCommand):\n \"\"\"\n Description: start the check part\n Attributes:\n sub_parse: Subcommand parameters\n params: Command line parameters\n \"\"\"\n\n def __init__(self):\n \"\"\"\n Description: Instance initialization\n \"\"\"\n super().__init__()\n self.add_subcommand(sub_command='check',\n help_desc=\"check operations\")\n self.sub_parse.add_argument(\n '--host_list',\n nargs='?',\n type=str,\n help='host ips')\n\n self.sub_parse.add_argument(\n '--check_items',\n nargs='?',\n type=str,\n help='ckeck items')\n\n add_start_and_end(self.sub_parse)\n add_access_token(self.sub_parse)\n add_query_args(self.sub_parse, ['check_item', 'start', 'end'])\n add_page(self.sub_parse)\n\n def do_command(self, params):\n \"\"\"\n Description: Executing command\n Args:\n params: Command line parameters\n \"\"\"\n self.manage_requests_check(params)\n\n @staticmethod\n def manage_requests_check(params):\n \"\"\"\n Description: Executing check command\n Args:\n params: Command line parameters\n Returns:\n dict: body of response\n \"\"\"\n\n hosts = str_split(params.host_list)\n checks = str_split(params.check_items)\n name_check(hosts)\n name_check(checks)\n time_list = time_transfer(params.start, params.end)\n\n pyload = {\n \"time_range\": [time_list[0], time_list[1]],\n \"check_items\": checks,\n \"host_list\": hosts,\n \"page\": params.page,\n \"per_page\": params.per_page\n }\n if params.sort is not None:\n pyload['sort'] = params.sort\n pyload['direction'] = params.direction\n check_url, header = make_check_url(CHECK_GET_RESULT)\n result = request_without_print('POST', check_url, pyload, header, params.access_token)\n print(pretty_json(result))\n","repo_name":"openeuler-mirror/A-Ops","sub_path":"adoctor-cli/adoctor_cli/commands/check_cmd.py","file_name":"check_cmd.py","file_ext":"py","file_size_in_byte":2487,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"77"}
+{"seq_id":"25136034471","text":"# imports\nimport torch\nfrom transformers import AutoTokenizer\nfrom trl import PPOTrainer, PPOConfig, AutoModelForCausalLMWithValueHead, create_reference_model\nfrom trl.core import respond_to_batch\n\n# get models\nmodel = AutoModelForCausalLMWithValueHead.from_pretrained('gpt2')\nmodel_ref = create_reference_model(model)\n\ntokenizer = AutoTokenizer.from_pretrained('gpt2')\ntokenizer.pad_token = tokenizer.eos_token\n\n# initialize trainer\nppo_config = PPOConfig(\n batch_size=1,\n)\n\n# encode a query\nquery_txt = \"This morning I went to the \"\nquery_tensor = tokenizer.encode(query_txt, return_tensors=\"pt\")\n\n# get model response\nresponse_tensor = respond_to_batch(model, query_tensor)\n\n# create a ppo trainer\nppo_trainer = PPOTrainer(ppo_config, model, model_ref, tokenizer)\n\n# define a reward for response\n# (this could be any reward such as human feedback or output from another model)\nreward = [torch.tensor(1.0)]\n\n# train model for one step with ppo\ntrain_stats = ppo_trainer.step([query_tensor[0]], [response_tensor[0]], reward)\n\nfrom pprint import pprint\npprint(train_stats)\n\n\n","repo_name":"corradomio/python_projects","sub_path":"check_hf_llma/check_trl.py","file_name":"check_trl.py","file_ext":"py","file_size_in_byte":1079,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"}
+{"seq_id":"8155116266","text":"import re\nimport hashlib\nimport inspect\nimport collections\n\nfrom base64 import b64decode\n\nimport idiokit\nfrom idiokit.xmlcore import Element, Elements\n\n\ndef _replace_non_xml_chars(unicode_obj, replacement=u\"\\ufffd\"):\n return _NON_XML.sub(replacement, unicode_obj)\n\n\n_NON_XML = re.compile(u\"[\\x00-\\x08\\x0b\\x0c\\x0e-\\x1f\\ud800-\\udfff\\ufffe\\uffff]\", re.U)\n\n\ndef _normalize(value):\n \"\"\"Return the value converted to unicode. Raise a TypeError if the\n value is not a string.\n\n >>> _normalize(\"a\")\n u'a'\n >>> _normalize(u\"b\")\n u'b'\n >>> _normalize(1)\n Traceback (most recent call last):\n ...\n TypeError: expected a string value, got the value 1 of type int\n\n When converting str objects the default encoding is tried, and an\n UnicodeDecodeError is raised if the value can not bot converted.\n\n >>> _normalize(\"\\\\xe4\")\n Traceback (most recent call last):\n ...\n UnicodeDecodeError: ...\n \"\"\"\n\n if isinstance(value, basestring):\n return unicode(value)\n\n name = type(value).__name__\n module = inspect.getmodule(value)\n if module is not None and module.__name__ != \"__builtin__\":\n name = module.__name__ + \".\" + name\n msg = \"expected a string value, got the value %r of type %s\" % (value, name)\n raise TypeError(msg)\n\n\nEVENT_NS = \"abusehelper#event\"\n\n\ndef _unicode_quote(string):\n r\"\"\"\n >>> _unicode_quote(u\"a\")\n u'a'\n >>> _unicode_quote(u\"=\")\n u'\"=\"'\n >>> _unicode_quote(u\"\\n\")\n u'\"\\n\"'\n \"\"\"\n\n if _UNICODE_QUOTE_CHECK.search(string):\n return u'\"' + _UNICODE_QUOTE.sub(r'\\\\\\g<0>', string) + u'\"'\n return string\n\n\n_UNICODE_QUOTE_CHECK = re.compile(r'[\\s\"\\\\,=]', re.U)\n_UNICODE_QUOTE = re.compile(r'[\"\\\\]', re.U)\n\n\ndef _unicode_parse_part(string, start):\n match = _UNICODE_PART.match(string, start)\n quoted, unquoted = match.groups()\n end = match.end()\n\n if quoted is not None:\n return _UNICODE_UNQUOTE.sub(\"\\\\1\", quoted), end\n if unquoted is not None:\n return unquoted, end\n return u\"\", end\n\n\n_UNICODE_UNQUOTE = re.compile(r'\\\\(.)', re.U)\n_UNICODE_PART = re.compile(r'\\s*(?:(?:\"((?:\\\\.|[^\"])*)\")|([^\\s\"=,]+)|)\\s*', re.U)\n\n\nclass Event(object):\n __slots__ = [\"_attrs\"]\n\n _UNDEFINED = object()\n\n @classmethod\n def _itemize(cls, *args, **keys):\n result = dict()\n\n for obj in args + (keys,):\n if type(obj) == Event:\n for key, values in obj._attrs.iteritems():\n if key not in result:\n result[key] = values.copy()\n else:\n result[key].update(values)\n continue\n\n if hasattr(obj, \"iteritems\"):\n obj = obj.iteritems()\n elif hasattr(obj, \"items\"):\n obj = obj.items()\n\n for key, values in obj:\n if isinstance(values, basestring):\n values = (_normalize(values),)\n else:\n values = (_normalize(x) for x in values)\n\n key = _normalize(key)\n if key not in result:\n result[key] = set(values)\n else:\n result[key].update(values)\n\n return result\n\n @classmethod\n def from_unicode(cls, string):\n r\"\"\"\n >>> event = Event({\"a\": \"b\"})\n >>> Event.from_unicode(unicode(event)) == event\n True\n\n >>> event = event.union({u'=': u'\"'})\n >>> Event.from_unicode(unicode(event)) == event\n True\n\n Regression test: Check that character escaping\n doesn't mess up parsing.\n\n >>> event = Event({\n ... u\"x\": u\"\\\\\",\n ... u\"y\": u\"b\"\n ... })\n >>> Event.from_unicode(ur'x=\"\\\\\", \"y\"=b') == event\n True\n \"\"\"\n\n string = string.strip()\n if not string:\n return cls()\n\n attrs = collections.defaultdict(list)\n\n index = 0\n length = len(string)\n while True:\n key, index = _unicode_parse_part(string, index)\n if index >= length:\n raise ValueError(\"unexpected string end\")\n if string[index] != u\"=\":\n raise ValueError(\"unexpected character %r at index %d\" %\n (string[index], index))\n index += 1\n\n value, index = _unicode_parse_part(string, index)\n attrs[key].append(value)\n\n if index >= length:\n return cls(attrs)\n\n if string[index] != u\",\":\n raise ValueError(\"unexpected character %r at index %d\" %\n (string[index], index))\n index += 1\n\n @classmethod\n def from_elements(self, elements):\n \"\"\"Yield events parsed from XML element(s).\n\n >>> element = Element(\"message\")\n >>> list(Event.from_elements(element))\n []\n >>> element.add(Element(\"event\", xmlns=EVENT_NS))\n >>> list(Event.from_elements(element)) == [Event()]\n True\n\n >>> event = Event({u\"\\\\uffff\": u\"\\\\x05\"}) # include some forbidden XML chars\n >>> element = Element(\"message\")\n >>> element.add(event.to_elements())\n >>> list(Event.from_elements(element)) == [Event({u\"\\\\ufffd\": u\"\\\\ufffd\"})]\n True\n \"\"\"\n\n # Future event format\n for event_element in elements.children(\"e\", EVENT_NS):\n attrs = collections.defaultdict(list)\n for key_element in event_element.children(\"k\").with_attrs(\"a\"):\n key = b64decode(key_element.get_attr(\"a\")).decode(\"utf-8\")\n for value_element in key_element.children(\"v\").with_attrs(\"a\"):\n value = b64decode(value_element.get_attr(\"a\")).decode(\"utf-8\")\n attrs[key].append(value)\n yield Event(attrs)\n\n # Legacy event format\n for event_element in elements.children(\"event\", EVENT_NS):\n attrs = collections.defaultdict(list)\n for attr in event_element.children(\"attr\").with_attrs(\"key\", \"value\"):\n key = attr.get_attr(\"key\")\n value = attr.get_attr(\"value\")\n attrs[key].append(value)\n yield Event(attrs)\n\n def __init__(self, *args, **keys):\n \"\"\"\n Regression test: Keep the the correct internal encoding in the\n copy/merge constructor.\n\n >>> event = Event({u\"\\xe4\": u\"\\xe4\"})\n >>> Event(event).items()\n ((u'\\\\xe4', u'\\\\xe4'),)\n \"\"\"\n\n self._attrs = self._itemize(*args, **keys)\n\n def union(self, *args, **keys):\n \"\"\"Return a new event that contains all key-value pairs from\n appearing in the original event and/or Event(*args, **keys).\n\n >>> sorted(Event(a=[\"1\", \"2\"]).union(a=[\"1\", \"3\"]).items())\n [(u'a', u'1'), (u'a', u'2'), (u'a', u'3')]\n \"\"\"\n\n return type(self)(self, *args, **keys)\n\n def difference(self, *args, **keys):\n \"\"\"Return a new event that contains all key-value pairs\n from the original event except those also appearing in\n Event(*args, **keys).\n\n >>> sorted(Event(a=[\"1\", \"2\"]).difference(a=[\"1\", \"3\"]).items())\n [(u'a', u'2')]\n \"\"\"\n\n other = self._itemize(*args, **keys)\n result = dict()\n for key, values in self._attrs.iteritems():\n diff = values.difference(other.get(key, ()))\n if diff:\n result[key] = diff\n return type(self)(result)\n\n def add(self, key, value, *values):\n \"\"\"Add value(s) for a key.\n\n >>> event = Event()\n >>> event.add(\"key\", \"1\")\n >>> event.values(\"key\")\n (u'1',)\n\n More than one value can be added with one call.\n\n >>> event = Event()\n >>> event.add(\"key\", \"1\", \"2\")\n >>> sorted(event.values(\"key\"))\n [u'1', u'2']\n\n Key-value pairs is already contained by the event are ignored.\n\n >>> event = Event()\n >>> event.add(\"key\", \"1\")\n >>> event.values(\"key\")\n (u'1',)\n >>> event.add(\"key\", \"1\")\n >>> event.values(\"key\")\n (u'1',)\n \"\"\"\n\n self.update(key, (value,) + values)\n\n def update(self, key, values):\n \"\"\"Update the values of a key.\n\n >>> event = Event()\n >>> event.update(\"key\", [\"1\", \"2\"])\n >>> sorted(event.values(\"key\"))\n [u'1', u'2']\n\n The event will not be modified if there are no values to add.\n\n >>> event = Event()\n >>> event.update(\"key\", [])\n >>> event.contains(\"key\")\n False\n \"\"\"\n\n key = _normalize(key)\n if key not in self._attrs:\n self._attrs[key] = set()\n self._attrs[key].update(_normalize(value) for value in values)\n\n def discard(self, key, value, *values):\n \"\"\"Discard some value(s) of a key.\n\n >>> event = Event()\n >>> event.add(\"key\", \"1\", \"2\", \"3\")\n >>> event.discard(\"key\", \"1\", \"3\")\n >>> event.values(\"key\")\n (u'2',)\n\n Values that don't exist for the given key are silently ignored.\n\n >>> event = Event()\n >>> event.add(\"key\", \"2\")\n >>> event.discard(\"key\", \"1\", \"2\")\n >>> event.values(\"key\")\n ()\n \"\"\"\n\n key = _normalize(key)\n if key not in self._attrs:\n return\n valueset = self._attrs[key]\n valueset.difference_update(_normalize(value) for value in (value,) + values)\n if not valueset:\n del self._attrs[key]\n\n def clear(self, key):\n \"\"\"Clear all values of a key.\n\n >>> event = Event()\n >>> event.add(\"key\", \"1\")\n >>> event.clear(\"key\")\n >>> event.contains(\"key\")\n False\n\n Clearing keys that do not exist does nothing.\n\n >>> event = Event()\n >>> event.clear(\"key\")\n \"\"\"\n\n key = _normalize(key)\n self._attrs.pop(key, None)\n\n def _unkeyed(self):\n for values in self._attrs.itervalues():\n for value in values:\n yield value\n\n def _iter(self, key, parser, filter):\n if key is self._UNDEFINED:\n values = set(self._unkeyed())\n else:\n key = _normalize(key)\n values = self._attrs.get(key, ())\n\n if parser is not None:\n parsed = (parser(x) for x in values)\n\n if filter is not None:\n return (x for x in parsed if filter(x))\n else:\n return (x for x in parsed if x is not None)\n\n if filter is not None:\n return (x for x in values if filter(x))\n\n return values\n\n def pop(self, key, parser=None, filter=None):\n \"\"\"Pop value(s) of a key and clear them.\n >>> event = Event()\n >>> event.add(\"key\", \"y\", \"x\", \"1.2.3.4\")\n >>> sorted(event.pop(\"key\"))\n [u'1.2.3.4', u'x', u'y']\n >>> event.contains(\"key\")\n False\n\n Perform parsing, validation and filtering by passing in\n parsing and filtering functions. Only values that match\n are cleared from the event. Values that do not match\n are preserved.\n\n >>> def int_parse(string):\n ... try:\n ... return int(string)\n ... except ValueError:\n ... return None\n >>> event = Event()\n >>> event.add(\"key\", \"1\", \"a\")\n >>> sorted(event.pop(\"key\", parser=int_parse))\n [1]\n >>> sorted(event.values(\"key\"))\n [u'a']\n \"\"\"\n\n key = _normalize(key)\n values = tuple(self._attrs.get(key, ()))\n\n if parser is not None:\n parsed = ((parser(x), x) for x in values)\n else:\n parsed = ((x, x) for x in values)\n\n if filter is not None:\n filtered = ((x, y) for (x, y) in parsed if filter(x))\n else:\n filtered = ((x, y) for (x, y) in parsed if x is not None)\n\n results = []\n\n for x, y in filtered:\n self.discard(key, y)\n results.append(x)\n\n return tuple(results)\n\n def values(self, key=_UNDEFINED, parser=None, filter=None):\n \"\"\"Return a tuple of event values (for a specific key, if\n given).\n\n >>> event = Event(key=[\"1\", \"2\"], other=[\"3\", \"4\"])\n >>> sorted(event.values())\n [u'1', u'2', u'3', u'4']\n >>> sorted(event.values(\"key\"))\n [u'1', u'2']\n\n Perform parsing, validation and filtering by passing in\n parsing and filtering functions (by default all None objects\n are filtered when a parsing function has been given).\n\n >>> import socket\n >>> def ipv4(string):\n ... try:\n ... return socket.inet_ntoa(socket.inet_aton(string))\n ... except socket.error:\n ... return None\n >>> event = Event(key=[\"1.2.3.4\", \"abba\"], other=\"10.10.10.10\")\n >>> event.values(\"key\", parser=ipv4)\n ('1.2.3.4',)\n >>> sorted(event.values(parser=ipv4))\n ['1.2.3.4', '10.10.10.10']\n \"\"\"\n\n return tuple(self._iter(key, parser, filter))\n\n def value(self, key=_UNDEFINED, default=_UNDEFINED,\n parser=None, filter=None):\n \"\"\"Return one event value (for a specific key, if given).\n\n The value can be picked either from the values of some\n specific key or amongst event values.\n\n >>> event = Event(key=\"1\", other=\"2\")\n >>> event.value(\"key\")\n u'1'\n >>> event.value() in [u\"1\", u\"2\"]\n True\n\n A default return value can be defined in case no suitable\n value is available:\n\n >>> event = Event()\n >>> event.value(\"key\", \"default value\")\n 'default value'\n >>> event.value(default=\"default value\")\n 'default value'\n\n KeyError is raised if no suitable values are available and no\n default is given.\n\n >>> event = Event()\n >>> event.value()\n Traceback (most recent call last):\n ...\n KeyError: 'no value available'\n >>> event.value(\"somekey\")\n Traceback (most recent call last):\n ...\n KeyError: 'somekey'\n\n As with .values(...), parsing and filtering functions can be\n given, and they will be used to modify the results.\n\n >>> def int_parse(string):\n ... try:\n ... return int(string)\n ... except ValueError:\n ... return None\n >>> event = Event(key=[\"1\", \"a\"])\n >>> event.value(parser=int_parse)\n 1\n >>> event.value(\"key\", parser=int_parse)\n 1\n >>> event.value(\"other\", parser=int_parse)\n Traceback (most recent call last):\n ...\n KeyError: 'other'\n \"\"\"\n\n for value in self._iter(key, parser, filter):\n return value\n\n if default is self._UNDEFINED:\n if key is self._UNDEFINED:\n raise KeyError(\"no value available\")\n raise KeyError(key)\n return default\n\n def contains(self, key=_UNDEFINED, value=_UNDEFINED,\n parser=None, filter=None):\n \"\"\"Return whether the event contains a key-value pair (for\n specific key and/or value, if given).\n\n >>> event = Event()\n >>> event.contains() # Does the event contain any values at all?\n False\n\n >>> event = event.union(key=\"1\")\n >>> event.contains()\n True\n >>> event.contains(\"key\") # Any value for key \"key\"?\n True\n >>> event.contains(value=\"1\") # Value \"1\" for any key?\n True\n >>> event.contains(\"key\", \"1\") # Value \"1\" for key \"key\"?\n True\n >>> event.contains(\"other\", \"2\") # Value \"2\" for key \"other\"?\n False\n\n Parsing and filtering functions can be given to modify the results.\n\n >>> def int_parse(string):\n ... try:\n ... return int(string)\n ... except ValueError:\n ... return None\n >>> event.contains(parser=int_parse) # Any int value for any key?\n True\n >>> event.contains(\"key\", parser=int_parse) # Any int value for \"key\"?\n True\n >>> event.contains(value=1, parser=int_parse) # Value 1 for any key?\n True\n >>> event = event.union(other=\"x\")\n >>> event.contains(\"other\", parser=int_parse)\n False\n \"\"\"\n\n if key is self._UNDEFINED:\n values = set(self._unkeyed())\n else:\n key = _normalize(key)\n values = self._attrs.get(key, ())\n\n if parser is not None:\n parsed = (parser(x) for x in values)\n\n if filter is not None:\n filtered = (x for x in parsed if filter(x))\n else:\n filtered = (x for x in parsed if x is not None)\n elif filter is not None:\n filtered = (x for x in values if filter(x))\n else:\n filtered = values\n\n for filtered_value in filtered:\n if value is self._UNDEFINED or value == filtered_value:\n return True\n return False\n\n def items(self, parser=None, filter=None):\n \"\"\"Return a tuple of key-value pairs contained by the event.\n\n >>> event = Event()\n >>> event.items()\n ()\n >>> event = event.union(key=\"1\", other=[\"x\", \"y\"])\n >>> sorted(event.items())\n [(u'key', u'1'), (u'other', u'x'), (u'other', u'y')]\n\n Parsing and filtering functions can be given to modify the results.\n\n >>> def int_parse(string):\n ... try:\n ... return int(string)\n ... except ValueError:\n ... return None\n >>> event.items(parser=int_parse)\n ((u'key', 1),)\n\n The order of the key-value pairs is undefined.\n \"\"\"\n\n result = list()\n\n for key, values in self._attrs.iteritems():\n for value in values:\n if parser is not None:\n value = parser(value)\n if filter is not None and not filter(value):\n continue\n if filter is None and value is None:\n continue\n result.append((key, value))\n\n return tuple(result)\n\n def keys(self, parser=None, filter=None):\n \"\"\"Return a tuple of keys with at least one value.\n\n >>> event = Event()\n >>> event.keys()\n ()\n >>> event = event.union(key=\"1\", other=[\"x\", \"y\"])\n >>> sorted(event.keys())\n [u'key', u'other']\n\n Parsing and filtering functions can be given to modify the\n results.\n\n >>> def int_parse(string):\n ... try:\n ... return int(string)\n ... except ValueError:\n ... return None\n >>> sorted(event.keys(parser=int_parse))\n [u'key']\n \"\"\"\n\n return tuple(key for key in self._attrs\n if self.contains(key, parser=parser, filter=filter))\n\n def to_elements(self, include_body=True):\n element = Element(\"event\", xmlns=EVENT_NS)\n\n for key, value in self.items():\n key = _replace_non_xml_chars(key)\n value = _replace_non_xml_chars(value)\n attr = Element(\"attr\", key=key, value=value)\n element.add(attr)\n\n if not include_body:\n return element\n\n body = Element(\"body\")\n body.text = _replace_non_xml_chars(unicode(self))\n return Elements(body, element)\n\n def __reduce__(self):\n return self.__class__, (self._attrs,)\n\n def __eq__(self, other):\n if not isinstance(other, Event):\n return NotImplemented\n return other._attrs == self._attrs\n\n def __ne__(self, other):\n value = self.__eq__(other)\n if value is NotImplemented:\n return NotImplemented\n return not value\n\n def __unicode__(self):\n \"\"\"Return an unicode representation of the event.\n\n >>> unicode(Event())\n u''\n >>> unicode(Event({\"a,\": \"b\"}))\n u'\"a,\"=b'\n\n The specific order of the key-value pairs is undefined.\n \"\"\"\n\n return u\", \".join(_unicode_quote(key) + u\"=\" + _unicode_quote(value)\n for (key, value) in self.items())\n\n def __repr__(self):\n attrs = dict()\n for key, value in self.items():\n attrs.setdefault(key, list()).append(value)\n return self.__class__.__name__ + \"(\" + repr(attrs) + \")\"\n\n\ndef hexdigest(event, func=hashlib.sha1):\n \"\"\"Return a hexadecimal digest string created by from the given event's\n key-value pairs.\n\n The result is guaranteed to be the same for two events e1 and e2 when\n e1 == e2. Key-value insertion order does not affect the result.\n\n >>> e1 = Event()\n >>> e1.add(\"a\", \"b\")\n >>> e1.add(\"x\", \"y\")\n >>>\n >>> e2 = Event()\n >>> e2.add(\"x\", \"y\")\n >>> e2.add(\"a\", \"b\")\n >>>\n >>> hexdigest(e1) == hexdigest(e2)\n True\n\n The result is not guaranteed to be different for two events e1 and e2\n when e1 != e2. However such a collision is usually exceedingly unlikely\n when a good hashing algorithm is used. SHA1 is the default, but can be\n changed by passing in an algorithm implementation with a compatible\n interface. For example, algorithms defined in the standard 'hashlib'\n library are compatible.\n\n >>> import hashlib\n >>> hexdigest(Event(a=\"b\"), hashlib.md5)\n '51a8ca876645d37e29419694f6396fbc'\n\n The default hashing algorithm is NOT guaranteed to be SHA1 forever. If you\n want to guarantee that the hexdigest is always created using e.g. SHA1,\n pass the hash function explicitly as the second parameter:\n\n >>> import hashlib\n >>> hexdigest(Event(a=\"b\"), hashlib.sha1)\n 'edf6294fc1d3f9fe8be4a2d5626788bcfde05e62'\n \"\"\"\n\n result = func()\n\n for key, value in sorted(event.items()):\n result.update(key.encode(\"utf-8\"))\n result.update(\"\\xc0\")\n result.update(value.encode(\"utf-8\"))\n result.update(\"\\xc0\")\n\n return result.hexdigest()\n\n\ndef stanzas_to_events():\n return idiokit.map(Event.from_elements)\n\n\ndef events_to_elements():\n return idiokit.map(lambda x: (x.to_elements(),))\n","repo_name":"abusesa/abusehelper","sub_path":"abusehelper/core/events.py","file_name":"events.py","file_ext":"py","file_size_in_byte":22236,"program_lang":"python","lang":"en","doc_type":"code","stars":112,"dataset":"github-code","pt":"77"}
+{"seq_id":"21900236162","text":"from __future__ import print_function\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\nfrom torchvision import datasets, transforms\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport TestModel\npretrained_model = \"lenet_mnist_model.pth\"\nclass Net(nn.Module):\n def __init__(self):\n super(Net, self).__init__()\n self.conv1 = nn.Conv2d(1, 10, kernel_size=5)\n self.conv2 = nn.Conv2d(10, 20, kernel_size=5)\n self.conv2_drop = nn.Dropout2d()\n self.fc1 = nn.Linear(320, 50)\n self.fc2 = nn.Linear(50, 10)\n\n def forward(self, x):\n x = F.relu(F.max_pool2d(self.conv1(x), 2))\n x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2))\n x = x.view(-1, 320)\n x = F.relu(self.fc1(x))\n x = F.dropout(x, training=self.training)\n x = self.fc2(x)\n return F.log_softmax(x, dim=1)\n\n# 初始化输入数据并赋值\ndef getInitModel():\n model = Net()\n model.load_state_dict(torch.load(pretrained_model, map_location='cpu'))\n model.eval()\n print(model)\n return model","repo_name":"tokisamu/AdversarialExampleGennerator","sub_path":"TestModel.py","file_name":"TestModel.py","file_ext":"py","file_size_in_byte":1110,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"}
+{"seq_id":"73285180727","text":"# Classes to handle asynchronous downloads\n\nfrom threading import Thread\n\nfrom requests import exceptions, get\n\n\nclass DownloadCommand:\n def __init__(self, url, callback, *args, **kwargs):\n self.callback = callback\n self.args = args\n self.kwargs = kwargs\n self.timeout = 5\n self.timestamp = None\n self.error = None\n self.url = url\n self.response = None\n\n\nclass AsyncDownloadService:\n def execute(self, command, response_handler):\n def _callback_with_args(response, **kwargs):\n command.response = response\n response_handler(command)\n\n kwargs = {\"command\": command, \"callback\": _callback_with_args}\n\n thread = Thread(target=AsyncDownloadService.download, kwargs=kwargs)\n thread.start()\n\n @staticmethod\n def download(command, callback):\n kwargs = {\"timeout\": command.timeout, \"hooks\": {\"response\": callback}}\n\n try:\n get(command.url, **kwargs)\n except exceptions.RequestException as e:\n command.error = \"Connection error \" + str(e)\n callback(None)\n\n\nclass DownloadService:\n def execute(self, command, response_handler):\n try:\n command.response = get(command.url, timeout=command.timeout)\n response_handler(command)\n except exceptions.RequestException as e:\n command.error = \"Connection error \" + str(e)\n","repo_name":"bluppfisk/coindicator","sub_path":"src/coin/downloader.py","file_name":"downloader.py","file_ext":"py","file_size_in_byte":1422,"program_lang":"python","lang":"en","doc_type":"code","stars":152,"dataset":"github-code","pt":"77"}
+{"seq_id":"39287263673","text":"import RPi.GIPO as GPIO\nimport time\nimport pyautogui as pag\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport spiUtils as su\n\nGPIO.setmode(GPIO.BCM)\n\nclass Button:\n def __init__(self, pinNum, keyToPress):\n self.pin = pinNum\n self.key = keyToPress\n GPIO.setup(pinNum, GPIO.IN, pull_up_down=GPIO.PUD_DOWN)\n GPIO.add_event_detect(pinNUM, GPIO.RISING, callback=pushButton)\n \n def pushButton(self, channel):\n pag.press(key)\n \n# setup buttons\n # A button (pin 24)\naPin = 24\naButton = 'a'\n#GPIO.setup(aPin, GPIO.IN, pull_up_down=GPIO.PUD_DOWN)\n # B button (pin 25)\nbPin = 25\nbButton = 'b'\n#GPIO.setup(bPin, GPIO.IN, pull_up_down=GPIO.PUD_DOWN)\n\na = Button(aPin, aButton)\nb = Button(bPin, bButton)\n\n# setup potentiometer\npot = su.readADC(channel=1)\n\ni=0\nfor i in range(0, 1000):\n # fits potentiometer into range (-1 <-> 1)\n plusMinusValue = (su.readADC(channel=1)/511.5)-1\n print(plusMinusValue)\n time.sleep(.1)\n i=i+1\n pag.move(200*plusMinusValue, 0)\n\n","repo_name":"benbrokaw/Capstone","sub_path":"capstone.py","file_name":"capstone.py","file_ext":"py","file_size_in_byte":1026,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"}
+{"seq_id":"22543791872","text":"from contextlib import contextmanager\nimport os\nimport sqlite3\nfrom typing import Generator\n\nimport pytest\n\nfrom app.cache.persistent_cache import PersistentCache\nfrom app.items import Item\n\n\n@contextmanager\ndef open_cursor() -> Generator[sqlite3.Cursor, None, None]:\n connection = sqlite3.connect(\"test.db\")\n cursor = connection.cursor()\n yield cursor\n connection.commit()\n cursor.close()\n connection.close()\n\n\n@pytest.fixture(scope=\"module\", autouse=True)\ndef verify_cache_removed():\n db_name = \"test.db\"\n if os.path.exists(db_name):\n os.remove(db_name)\n yield\n if os.path.exists(db_name):\n os.remove(db_name)\n\n\n@pytest.fixture(scope=\"function\")\ndef cache():\n db_name = \"test.db\"\n cache = PersistentCache(db_name = db_name)\n yield cache\n os.remove(db_name)\n assert not os.path.exists(db_name)\n\n\ndef test__persistent_cache__add(cache: PersistentCache[Item]):\n # Act\n item = Item(\"key\", \"title\", 1)\n with cache:\n cache.add(item.item_id, item)\n \n # Assert\n with open_cursor() as cursor:\n cursor.execute(\"SELECT * FROM items\")\n actual = cursor.fetchall()\n\n assert len(actual) == 1\n assert actual[0][0] == \"key\"\n\n\ndef test__persistent_cache__count(cache: PersistentCache[Item]):\n # Arrange\n item = Item(\"key\", \"value_1\", 1)\n with cache:\n cache.add(item.item_id, item)\n item.title = \"value_2\"\n with cache:\n cache.add(item.item_id, item)\n\n # Act\n with cache:\n actual = cache.count(\"key\")\n\n # Assert\n assert actual == 2\n\n\ndef test__persistent_cache__count_empty(cache: PersistentCache[Item]):\n # Act\n with cache:\n actual = cache.count(\"key\")\n\n # Assert\n assert actual == 0\n\n\ndef test__persistent_cache__head(cache: PersistentCache[Item]):\n # Arrange\n item = Item(\"key\", \"value_1\", 1)\n with cache:\n cache.add(item.item_id, item)\n item.title = \"value_2\"\n with cache:\n cache.add(item.item_id, item)\n\n # Act\n with cache:\n actual = cache.head(\"key\")\n\n # Assert\n assert isinstance(actual, Item)\n assert actual.item_id == \"key\"\n assert actual.title == \"value_2\"\n\n\ndef test__persistent_cache__head_empty(cache: PersistentCache[Item]):\n with pytest.raises(Exception):\n with cache:\n cache.head(\"key\")\n\n\ndef test__persistent_cache__list(cache: PersistentCache[Item]):\n # Arrange\n item = Item(\"key\", \"value_1\", 1)\n with cache:\n cache.add(item.item_id, item)\n item.title = \"value_2\"\n with cache:\n cache.add(item.item_id, item)\n\n # Act\n with cache:\n actual = list(cache.list())\n\n # Assert\n assert len(actual) == 2\n assert set([item.title for item in actual]) == {\"value_1\", \"value_2\"}\n","repo_name":"python-spokane/journey-to-the-pythonic-peak","sub_path":"tests/cache/persistent_cache_test.py","file_name":"persistent_cache_test.py","file_ext":"py","file_size_in_byte":2762,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"}
+{"seq_id":"30823622590","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat May 7 14:15:21 2016\n\n@author: pikashoes\n\"\"\"\nimport random\n\ncountries = ['China', 'South Korea', 'United States', 'Brazil', 'Argentina', 'Russia',\n 'France', 'Denmark', 'Philippines', 'Taiwan', 'Norway', 'Australia',\n 'Mexico', 'Canada', 'Germany', 'Vietnam', 'Poland', 'Hong Kong', 'Spain', 'Sweden',\n 'Netherlands', 'United Kingdom', 'Singapore', 'Turkey', 'Finland', 'Lithuania', 'Estonia',\n 'Belgium', 'Thailand', 'Armenia', 'Ukraine', 'Greece', 'Chile', 'Czech Republic', 'Japan',\n 'Hungary', 'Uruguay', 'Peru', 'Israel', 'Colombia', 'Portugal', 'Kazakhstan', 'Indoensia',\n 'Afghanistan', 'Croatia', 'South Africa', 'Costa Rica', 'Iraq', 'Slovenia', 'Serbia',\n 'Panama', 'Macao', 'Italy', 'Ecuador', 'Malta', 'Iceland', 'Luxembourg']\n\n\ndef main():\n final_file = \"\"\n teamlist = []\n with open('PlayerBITeam.txt', 'r') as file:\n for line in file:\n random_country = random.randint(0, 56)\n line = line.replace(\"\\n\", \"\")\n new_line = line.split('|')\n if new_line[1] not in teamlist:\n final_file += new_line[1] + \"|\" + countries[random_country] + \"\\n\"\n teamlist.append(new_line[1])\n \n a = open('CurrentTeam.txt', 'w')\n a.write(final_file)\n a.close()\n\nmain()","repo_name":"pikashoes/mpcs-databases","sub_path":"Python/CurrentTeam.py","file_name":"CurrentTeam.py","file_ext":"py","file_size_in_byte":1298,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"}
+{"seq_id":"71223059130","text":"def solution(clothes):\n dic = dict()\n for cloth in clothes: #초기화\n dic[cloth[1]] = 0\n \n for cloth in clothes:\n dic[cloth[1]] += 1\n print(dic)\n \n \n \n ls = list(dic.values())\n print(ls)\n \n answer = 1\n for num in ls:\n answer *= (num+1)\n return answer-1\n\n'''\n1. (headgear의 수 + 1) 1을 더 해주는 이유는 headgear를 착용하지 않을 수도 있기 때문입니다.\n2. (eyewear의 수 + 1 ) 1을 더 해주는 이유는 eyewear를 착용하지 않을 수도 있기 때문입니다.\n3. 두 수는 각각 독립적이기 때문에 1번 2번의 수를 곱하고 - 1 (모두 안입는 경우는 존재하지 않으므로)\n'''\n","repo_name":"confettimimy/Python-for-coding-test","sub_path":"• 프로그래머스/해시/위장.py","file_name":"위장.py","file_ext":"py","file_size_in_byte":700,"program_lang":"python","lang":"ko","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"}
+{"seq_id":"15471964677","text":"# -*- coding: UTF-8 -*-\n\n__author__ = 'Bruce Frank Wong'\n\n\nfrom typing import List, Optional, Union\nimport csv\nfrom pathlib import Path\nfrom enum import Enum\nimport datetime as dt\nfrom contextlib import closing\n\nimport pandas as pd\nfrom tqsdk import TqApi, TqAuth, TqSim\nfrom tqsdk.tools import DataDownloader\n\nfrom ...utility import CONFIGS, PACKAGE_PATH\n\n\nclass Symbol:\n def __init__(self, exchange: str, product: str, delivery: str):\n self.exchange = exchange\n self.product = product\n self.delivery = delivery\n\n\nclass Period(Enum):\n Tick = 'Tick'\n Second = 'Second'\n Minute = 'Minute'\n Hour = 'Hour'\n Day = 'Day'\n Week = 'Week'\n Month = 'Month'\n Year = 'Year'\n\n def to_second(self) -> int:\n if self.value == 'Tick':\n return 0\n elif self.value == 'Second':\n return 1\n elif self.value == 'Minute':\n return 60\n elif self.value == 'Hour':\n return 60 * 60\n elif self.value == 'Day':\n return 60 * 60 * 24\n elif self.value == 'Week':\n return 60 * 60 * 24 * 5\n elif self.value == 'Month':\n return 60 * 60 * 24 * 5 * 4\n elif self.value == 'Year':\n return 60 * 60 * 24 * 5 * 4 * 12\n\n def to_english(self) -> str:\n if self.value == 'Tick':\n return 'Tick'\n elif self.value == 'Second':\n return 'Second'\n elif self.value == 'Minute':\n return 'Minute'\n elif self.value == 'Hour':\n return 'Hour'\n elif self.value == 'Day':\n return 'Day'\n elif self.value == 'Week':\n return 'Week'\n elif self.value == 'Month':\n return 'Month'\n elif self.value == 'Year':\n return 'Year'\n\n def to_chinese(self) -> str:\n if self.value == 'Tick':\n return 'Tick'\n elif self.value == 'Second':\n return '秒'\n elif self.value == 'Minute':\n return '分钟'\n elif self.value == 'Hour':\n return '小时'\n elif self.value == 'Day':\n return '日'\n elif self.value == 'Week':\n return '周'\n elif self.value == 'Month':\n return '月'\n elif self.value == 'Year':\n return '年'\n\n def __str__(self, chinese: bool = False):\n if chinese:\n return self.to_chinese()\n else:\n return self.to_english()\n\n\nclass DownloadRequest:\n symbol: str\n start: Union[dt.datetime, dt.date]\n end: Union[dt.datetime, dt.date]\n period: Period\n\n def __init__(self,\n symbol: str,\n period: Period,\n start: Union[dt.datetime, dt.date],\n end: Optional[Union[dt.datetime, dt.date]] = None\n ):\n self.symbol = symbol\n self.period = period\n self.start = start\n if end:\n if isinstance(end, dt.date):\n self.end = end if end < dt.date.today() else dt.date.today()\n else:\n self.end = end if end < dt.datetime.now() else dt.datetime.now()\n else:\n if isinstance(start, dt.date):\n self.end = dt.date.today()\n else:\n self.end = dt.datetime.now()\n\n\ndef tq_download(download_request_list: List[DownloadRequest]):\n # TqSDK api.\n tq_api: TqApi = TqApi(\n auth=TqAuth(\n CONFIGS['TQ']['account'],\n CONFIGS['TQ']['password']\n )\n )\n\n # Download path, make sure it existed.\n download_path: Path = PACKAGE_PATH.joinpath('data_downloaded')\n if not download_path.exists():\n download_path.mkdir()\n\n # csv header.\n bar_column_list: List[str] = [\n 'open', 'high', 'low', 'close', 'volume', 'open_oi', 'close_oi'\n ]\n tick_column_list: List[str] = [\n 'last_price', 'highest', 'lowest',\n 'bid_price1', 'bid_volume1', 'ask_price1', 'ask_volume1',\n 'volume', 'amount', 'open_interest'\n ]\n\n # Do the download.\n task_name: str\n file_path: Path\n task: DataDownloader\n with closing(tq_api):\n download_request: DownloadRequest\n for download_request in download_request_list:\n task_name = download_request.symbol\n file_path = download_path.joinpath(\n f'{download_request.symbol}_{download_request.period.to_english()}.csv'\n )\n task = DataDownloader(\n tq_api,\n symbol_list=download_request.symbol,\n dur_sec=download_request.period.to_second(),\n start_dt=download_request.start,\n end_dt=download_request.end,\n csv_file_name=str(file_path)\n )\n\n while not task.is_finished():\n tq_api.wait_update()\n print(\n f'正在下载 [{task_name}] 的 {download_request.period.to_chinese()} 数据,'\n f'已完成: {task.get_progress():>7.3f}%。'\n )\n\n # 处理下载好的 csv 文件的 header, 也就是 pandas.DataFrame 的 column.\n if task.is_finished():\n df = pd.read_csv(file_path)\n if download_request.period.to_second() == Period.Tick:\n column_list = tick_column_list\n else:\n column_list = bar_column_list\n for column in column_list:\n column_x = ''.join([download_request.symbol, '.', column])\n if column_x in df.columns:\n df.rename(columns={column_x: column}, inplace=True)\n df.to_csv(file_path, index=False)\n","repo_name":"BruceFrankWong/Research","sub_path":"InvestmentResearch/collector/tq/tq.py","file_name":"tq.py","file_ext":"py","file_size_in_byte":5735,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"}
+{"seq_id":"17464897023","text":"from ax.core.parameter import (\n ChoiceParameter,\n FixedParameter,\n ParameterType,\n RangeParameter,\n)\nfrom ax.core.parameter_constraint import (\n ComparisonOp,\n OrderConstraint,\n ParameterConstraint,\n SumConstraint,\n)\nfrom ax.utils.common.testutils import TestCase\n\n\nclass ParameterConstraintTest(TestCase):\n def setUp(self) -> None:\n self.constraint = ParameterConstraint(\n constraint_dict={\"x\": 2.0, \"y\": -3.0}, bound=6.0\n )\n self.constraint_repr = \"ParameterConstraint(2.0*x + -3.0*y <= 6.0)\"\n\n def test_Eq(self) -> None:\n constraint1 = ParameterConstraint(\n constraint_dict={\"x\": 2.0, \"y\": -3.0}, bound=6.0\n )\n constraint2 = ParameterConstraint(\n constraint_dict={\"y\": -3.0, \"x\": 2.0}, bound=6.0\n )\n self.assertEqual(constraint1, constraint2)\n\n constraint3 = ParameterConstraint(\n constraint_dict={\"x\": 2.0, \"y\": -5.0}, bound=6.0\n )\n self.assertNotEqual(constraint1, constraint3)\n\n def test_Properties(self) -> None:\n self.assertEqual(self.constraint.constraint_dict[\"x\"], 2.0)\n self.assertEqual(self.constraint.bound, 6.0)\n\n def test_Repr(self) -> None:\n self.assertEqual(str(self.constraint), self.constraint_repr)\n\n def test_Validate(self) -> None:\n parameters = {\"x\": 4, \"z\": 3}\n with self.assertRaises(ValueError):\n # pyre-fixme[6]: For 1st param expected `Dict[str, Union[float, int]]`\n # but got `Dict[str, int]`.\n self.constraint.check(parameters)\n\n # check slack constraint\n parameters = {\"x\": 4, \"y\": 1}\n # pyre-fixme[6]: For 1st param expected `Dict[str, Union[float, int]]` but\n # got `Dict[str, int]`.\n self.assertTrue(self.constraint.check(parameters))\n\n # check tight constraint (within numerical tolerance)\n parameters = {\"x\": 4, \"y\": (2 - 0.5e-8) / 3}\n self.assertTrue(self.constraint.check(parameters))\n\n # check violated constraint\n parameters = {\"x\": 4, \"y\": (2 - 0.5e-6) / 3}\n self.assertFalse(self.constraint.check(parameters))\n\n def test_Clone(self) -> None:\n constraint_clone = self.constraint.clone()\n self.assertEqual(self.constraint.bound, constraint_clone.bound)\n\n constraint_clone._bound = 7.0\n self.assertNotEqual(self.constraint.bound, constraint_clone.bound)\n\n def test_CloneWithTransformedParameters(self) -> None:\n constraint_clone = self.constraint.clone_with_transformed_parameters(\n transformed_parameters={}\n )\n self.assertEqual(self.constraint.bound, constraint_clone.bound)\n\n constraint_clone._bound = 7.0\n self.assertNotEqual(self.constraint.bound, constraint_clone.bound)\n\n def test_Sortable(self) -> None:\n constraint1 = ParameterConstraint(\n constraint_dict={\"x\": 2.0, \"y\": -3.0}, bound=1.0\n )\n constraint2 = ParameterConstraint(\n constraint_dict={\"y\": -3.0, \"x\": 2.0}, bound=6.0\n )\n self.assertTrue(constraint1 < constraint2)\n\n\nclass OrderConstraintTest(TestCase):\n def setUp(self) -> None:\n self.x = RangeParameter(\"x\", ParameterType.INT, lower=0, upper=1)\n self.y = RangeParameter(\"y\", ParameterType.INT, lower=0, upper=1)\n self.constraint = OrderConstraint(\n lower_parameter=self.x, upper_parameter=self.y\n )\n self.constraint_repr = \"OrderConstraint(x <= y)\"\n\n def test_Properties(self) -> None:\n self.assertEqual(self.constraint.lower_parameter.name, \"x\")\n self.assertEqual(self.constraint.upper_parameter.name, \"y\")\n\n def test_Repr(self) -> None:\n self.assertEqual(str(self.constraint), self.constraint_repr)\n\n def test_Validate(self) -> None:\n self.assertTrue(self.constraint.check({\"x\": 0, \"y\": 1}))\n self.assertTrue(self.constraint.check({\"x\": 1, \"y\": 1}))\n self.assertFalse(self.constraint.check({\"x\": 1, \"y\": 0}))\n\n def test_Clone(self) -> None:\n constraint_clone = self.constraint.clone()\n self.assertEqual(\n self.constraint.lower_parameter, constraint_clone.lower_parameter\n )\n\n constraint_clone._lower_parameter = self.y\n self.assertNotEqual(\n self.constraint.lower_parameter, constraint_clone.lower_parameter\n )\n\n def test_CloneWithTransformedParameters(self) -> None:\n constraint_clone = self.constraint.clone_with_transformed_parameters(\n transformed_parameters={p.name: p for p in self.constraint.parameters}\n )\n self.assertEqual(\n self.constraint.lower_parameter, constraint_clone.lower_parameter\n )\n\n constraint_clone._lower_parameter = self.y\n self.assertNotEqual(\n self.constraint.lower_parameter, constraint_clone.lower_parameter\n )\n\n def test_InvalidSetup(self) -> None:\n z = FixedParameter(\"z\", ParameterType.INT, 0)\n with self.assertRaises(ValueError):\n self.constraint = OrderConstraint(lower_parameter=self.x, upper_parameter=z)\n\n z = ChoiceParameter(\"z\", ParameterType.STRING, [\"a\", \"b\", \"c\"])\n with self.assertRaises(ValueError):\n self.constraint = OrderConstraint(lower_parameter=self.x, upper_parameter=z)\n\n\nclass SumConstraintTest(TestCase):\n def setUp(self) -> None:\n self.x = RangeParameter(\"x\", ParameterType.INT, lower=-5, upper=5)\n self.y = RangeParameter(\"y\", ParameterType.INT, lower=-5, upper=5)\n self.constraint1 = SumConstraint(\n parameters=[self.x, self.y], is_upper_bound=True, bound=5\n )\n self.constraint2 = SumConstraint(\n parameters=[self.x, self.y], is_upper_bound=False, bound=-5\n )\n\n self.constraint_repr1 = \"SumConstraint(x + y <= 5.0)\"\n self.constraint_repr2 = \"SumConstraint(x + y >= -5.0)\"\n\n def test_BadConstruct(self) -> None:\n with self.assertRaises(ValueError):\n SumConstraint(parameters=[self.x, self.x], is_upper_bound=False, bound=-5.0)\n z = ChoiceParameter(\"z\", ParameterType.STRING, [\"a\", \"b\", \"c\"])\n with self.assertRaises(ValueError):\n # pyre-fixme[16]: `SumConstraintTest` has no attribute `constraint`.\n self.constraint = SumConstraint(\n parameters=[self.x, z], is_upper_bound=False, bound=-5.0\n )\n\n def test_Properties(self) -> None:\n self.assertEqual(self.constraint1.op, ComparisonOp.LEQ)\n self.assertTrue(self.constraint1._is_upper_bound)\n\n self.assertEqual(self.constraint2.op, ComparisonOp.GEQ)\n self.assertFalse(self.constraint2._is_upper_bound)\n\n def test_Repr(self) -> None:\n self.assertEqual(str(self.constraint1), self.constraint_repr1)\n self.assertEqual(str(self.constraint2), self.constraint_repr2)\n\n def test_Validate(self) -> None:\n self.assertTrue(self.constraint1.check({\"x\": 1, \"y\": 4}))\n self.assertTrue(self.constraint1.check({\"x\": 4, \"y\": 1}))\n self.assertFalse(self.constraint1.check({\"x\": 1, \"y\": 5}))\n\n self.assertTrue(self.constraint2.check({\"x\": -4, \"y\": -1}))\n self.assertTrue(self.constraint2.check({\"x\": -1, \"y\": -4}))\n self.assertFalse(self.constraint2.check({\"x\": -5, \"y\": -1}))\n\n def test_Clone(self) -> None:\n constraint_clone = self.constraint1.clone()\n self.assertEqual(self.constraint1.bound, constraint_clone.bound)\n\n constraint_clone._bound = 7.0\n self.assertNotEqual(self.constraint1.bound, constraint_clone.bound)\n\n constraint_clone_2 = self.constraint2.clone()\n self.assertEqual(self.constraint2.bound, constraint_clone_2.bound)\n\n def test_CloneWithTransformedParameters(self) -> None:\n constraint_clone = self.constraint1.clone_with_transformed_parameters(\n transformed_parameters={p.name: p for p in self.constraint1.parameters}\n )\n self.assertEqual(self.constraint1.bound, constraint_clone.bound)\n\n constraint_clone._bound = 7.0\n self.assertNotEqual(self.constraint1.bound, constraint_clone.bound)\n","repo_name":"facebook/Ax","sub_path":"ax/core/tests/test_parameter_constraint.py","file_name":"test_parameter_constraint.py","file_ext":"py","file_size_in_byte":8161,"program_lang":"python","lang":"en","doc_type":"code","stars":2182,"dataset":"github-code","pt":"77"}
+{"seq_id":"41415231281","text":"from os import mkdir, remove, rmdir, listdir\r\nfrom tkinter import Tk, Toplevel, ttk, Label, Entry, Button\r\nfrom tkinter.filedialog import asksaveasfilename\r\nfrom subprocess import run, call\r\nfrom os import startfile, getcwd\r\nfrom geopy.geocoders import Nominatim\r\nfrom typing import Union\r\n\r\nclass Snapchat:\r\n\r\n def __init__(self, parent: Union[Tk, Toplevel], long: str = None, lat: str = None, city: str = None):\r\n self.parent = parent\r\n self.long = long\r\n self.lat = lat\r\n self.city = city\r\n self.parent.iconify()\r\n try:\r\n mkdir(\"temp\")\r\n except FileExistsError:\r\n print(\"Folder already exists, possible messy exit previously\")\r\n self.snapwindow = self.genWindow()\r\n self.genWidgets()\r\n\r\n # Generates the window\r\n def genWindow(self) -> Toplevel:\r\n snapwindow = Toplevel(self.parent)\r\n snapwindow.title(\"Snapchat\")\r\n snapwindow.resizable(False, False)\r\n snapwindow.geometry(\"+%d+%d\" % (self.parent.winfo_x(), self.parent.winfo_y()))\r\n snapwindow.protocol(\"WM_DELETE_WINDOW\", lambda: self.closewin())\r\n return snapwindow\r\n\r\n # Generates the widgets to be displayed on the window\r\n def genWidgets(self) -> None:\r\n # Labels\r\n latLabel = Label(self.snapwindow, text=\"Latitude: \")\r\n longLabel = Label(self.snapwindow, text=\"Longitude: \")\r\n cityLabel = Label(self.snapwindow, text=\"City: \")\r\n radiusLabel = Label(self.snapwindow, text=\"Radius (meters): \")\r\n\r\n # Entries\r\n latEntry = Entry(self.snapwindow, width=20)\r\n longEntry = Entry(self.snapwindow, width=20)\r\n cityEntry = Entry(self.snapwindow, width=20)\r\n radiusEntry = Entry(self.snapwindow, width=10)\r\n\r\n radiusEntry.insert(0, \"10000\")\r\n\r\n # Button\r\n SearchButton = Button(self.snapwindow, text=\"Search\", command=lambda: self.verifyInput(latEntry.get(), longEntry.get(), cityEntry.get(), radiusEntry.get()))\r\n SaveButton = Button(self.snapwindow, text=\"Save\", command=lambda: self.saveFile())\r\n\r\n # Autofill\r\n if self.long:\r\n longEntry.insert(0, self.long)\r\n if self.lat:\r\n latEntry.insert(0, self.lat)\r\n if self.city:\r\n cityEntry.insert(0, self.city)\r\n\r\n # Treeview\r\n self.details = ttk.Treeview(self.snapwindow, show=\"headings\", height=\"6\")\r\n\r\n self.details['columns'] = (\"Name\", \"Type\")\r\n self.details.column(\"#0\", width=0)\r\n self.details.column(\"Name\", width=350)\r\n self.details.column(\"Type\", width=150, minwidth=80)\r\n\r\n self.details.heading(\"#0\", text=\"\")\r\n self.details.heading(\"Name\", text=\"Name\")\r\n self.details.heading(\"Type\", text=\"Type\")\r\n\r\n scroll = ttk.Scrollbar(self.snapwindow, orient=\"vertical\", command=self.details.yview)\r\n self.details.config(yscrollcommand=scroll.set)\r\n self.details.bind(\"\", self.openFile)\r\n\r\n # Grid Layout\r\n latLabel.grid(row=0, column=0, sticky=\"E\")\r\n latEntry.grid(row=0, column=1, padx=(0, 5))\r\n longLabel.grid(row=0, column=2, sticky=\"E\")\r\n longEntry.grid(row=0, column=3, padx=(0, 5))\r\n radiusLabel.grid(row=1, column=2, sticky=\"E\")\r\n radiusEntry.grid(row=1, column=3, padx=(0, 5))\r\n cityLabel.grid(row=1, column=0, sticky=\"E\")\r\n cityEntry.grid(row=1, column=1, padx=(0, 5))\r\n\r\n SearchButton.grid(row=2, column=2, columnspan=1, sticky=\"NSEW\", pady=10)\r\n SaveButton.grid(row=4, column=2, pady=(0, 10))\r\n\r\n self.details.grid(row=3, column=0, columnspan=4, padx=(5, 0), pady=5)\r\n scroll.grid(row=3, column=4, padx=(0, 5), sticky=\"NS\")\r\n\r\n # Opens the images displayed in the treeview when double clicked\r\n def openFile(self, event) -> None:\r\n filename = getcwd() + \"/temp/\" + self.details.item(self.details.focus())['values'][0]\r\n try:\r\n startfile(filename)\r\n except AttributeError:\r\n call(['open', filename])\r\n\r\n # Saves the file to a directory of their choice from the temp folder\r\n def saveFile(self) -> None:\r\n if self.details.item(self.details.focus())['values'][1] == \"Image\":\r\n file = asksaveasfilename(initialfile=self.details.item(self.details.focus())['values'][0].rsplit(\".\", 1)[0], filetypes=[(\"JPG image\", \"*.jpg\")]) + \".jpg\"\r\n else:\r\n file = asksaveasfilename(initialfile=self.details.item(self.details.focus())['values'][0].rsplit(\".\", 1)[0], filetypes=[(\"MPEG video\", \"*.mp4\")]) + \".mp4\"\r\n if len(file) > 4:\r\n with open(file, \"wb+\") as f:\r\n with open(getcwd() + \"/temp/\" + self.details.item(self.details.focus())['values'][0], \"rb\") as f2:\r\n f.write(f2.read())\r\n\r\n # Checks to see if the range is an int, and gets the lat, long of the city if chosen\r\n def verifyInput(self, lat: str, long: str, city: str, range: str) -> None:\r\n try:\r\n rangeInt = int(range)\r\n except ValueError:\r\n print(\"Error, radius isn't a number. Resorting to default!\")\r\n rangeInt = 10000\r\n if lat == \"\":\r\n geo = Nominatim(user_agent=\"CTI Toolkit\")\r\n location = geo.geocode(city)\r\n lat = location.latitude\r\n long = location.longitude\r\n\r\n coords: str = str(lat) + \",\" + str(long)\r\n\r\n self.searchSnap(coords, rangeInt)\r\n\r\n # Clears the temp folder, and then runs the snapmap_archiver python module\r\n def searchSnap(self, coords: str, range: int) -> None:\r\n # Cleanup\r\n self.details.delete(*self.details.get_children())\r\n for files in listdir(\"temp\"):\r\n remove(\"temp/\" + files)\r\n\r\n # Runs the command (Will freeze the program)\r\n run('python -m snapmap_archiver -o temp -l=\\\"{}\\\" -r {}'.format(coords, range))\r\n\r\n # Lists the output\r\n for files in listdir(\"temp\"):\r\n if \"mp4\" in files.rsplit(\".\")[1]:\r\n self.details.insert(\"\", 'end', values=(files, \"Video\"))\r\n else:\r\n self.details.insert(\"\", 'end', values=(files, \"Image\"))\r\n\r\n # Closes the window\r\n def closewin(self) -> None:\r\n for file in listdir(\"temp\"):\r\n remove(\"temp/\" + file)\r\n rmdir(\"temp\")\r\n self.parent.deiconify()\r\n self.snapwindow.destroy()\r\n","repo_name":"AlanTheBlank/CTI-Toolkit","sub_path":"snapchat.py","file_name":"snapchat.py","file_ext":"py","file_size_in_byte":6397,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"}
+{"seq_id":"34264409656","text":"# -*- coding: utf-8 -*-\n#tfidf(21)\n\n# -*- coding: utf-8 -*-\n\nimport jieba.posseg as pseg\nimport pandas as pd\nfrom gensim import models, similarities,corpora\nimport codecs\n\n# 构建停用词表\nstop_words = './stop_words.txt'\nstopwords = codecs.open(stop_words,'r',encoding='utf8').readlines()\nstopwords = [ w.strip() for w in stopwords ]\nstop_flag = ['x', 'c', 'u','d', 'p', 't', 'uj', 'm', 'f', 'r']\ndef tokenization(title):\n result = []\n words = pseg.cut(title)\n for word, flag in words:\n if flag not in stop_flag and word not in stopwords:\n result.append(word)\n return result\n\ndef train_text():\n # 训练文本数据\n all_doc = []\n datas = pd.read_csv(\"train_data.csv\")\n titles = datas['title']\n for title in titles:\n all_doc.append(title)\n\n\n # 对目标文档进行分词\n print(\"对目标文档进行分词\")\n all_doc_list = []\n for doc in all_doc:\n doc_list = tokenization(doc)\n all_doc_list.append(doc_list)\n\n\n\n # 测试文档数据\n print(\"测试文档数据\")\n test_doc = []\n test_datas = pd.read_csv(\"test_data.csv\", encoding=\"gbk\")\n test_titles = test_datas[\"title\"]\n for title in test_titles:\n test_doc.append(title)\n\n\n\n # 测试文档进行分词\n test_doc_list = []\n for doc in test_doc:\n doc_list = tokenization(doc)\n test_doc_list.append(doc_list)\n\n # 制作语料库\n print(\"制作语料库\")\n\n dictionary = corpora.Dictionary(all_doc_list)\n dictionary.keys()\n dictionary.token2id\n corpus = [dictionary.doc2bow(doc) for doc in all_doc_list] #(0,1)(1,1)\n tfidf = models.TfidfModel(corpus)\n results = []\n for doc_test_list in test_doc_list:\n doc_test_vec = dictionary.doc2bow(doc_test_list)\n index = similarities.SparseMatrixSimilarity(tfidf[corpus], num_features=len(dictionary.keys()))\n sim = index[tfidf[doc_test_vec]]\n similiar_sorted = sorted(enumerate(sim), key=lambda item: -item[1])[:21]\n print(similiar_sorted)\n indexs = [str(item[0]+1) for item in similiar_sorted]\n print(indexs)\n results.append(\" \".join(indexs))\n\n with open(\"results.txt\", \"w\") as f:\n for item in results:\n item = item.strip().split()\n for i in range(0, 21):\n f.write(item[0] + \"\\t\" + item[i] + \"\\n\")\n\nif __name__ == \"__main__\":\n train_text()\n with open(\"results.txt\", \"r\") as f, open(\"submisson2.txt\", \"w\") as wf:\n wf.write(\"source_id\" + \"\\t\" + \"target_id\" + \"\\n\")\n datas = f.readlines()\n for data in datas:\n data = data.strip().split(\"\\t\")\n wf.write(data[0] + \"\\t\" + data[1] + \"\\n\")\n\n\n\n","repo_name":"WangYanZ/NewsRecommendBaseline","sub_path":"baseline_2.py","file_name":"baseline_2.py","file_ext":"py","file_size_in_byte":2688,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"}
+{"seq_id":"18775026506","text":"def fib():\n count = int(input(\"Enter the number fib you want to print out : \"))\n print(\"You have enter: \", count)\n i = 1\n if count == 0:\n fib = [0]\n elif count == 1:\n fib = [1]\n elif count == 2:\n fib = [1,1]\n elif count > 2:\n fib = [1,1]\n while i < count - 1:\n\n fib.append((fib[i] + fib[i-1]))\n i += 1\n return fib\nprint(\"your fibonacci number is:\", fib())","repo_name":"Peuapeu2019/Selftought","sub_path":"Fibonacci.py","file_name":"Fibonacci.py","file_ext":"py","file_size_in_byte":436,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"}
+{"seq_id":"39112841452","text":"from tracks.models import Track\nfrom django.shortcuts import get_object_or_404\nfrom django.core.exceptions import PermissionDenied\n\nclass TrackMixin(object):\n '''\n Load a track and check user\n can access it\n '''\n model = Track\n context_object_name = 'track'\n\n def get_object(self, check_ownership=False):\n # Load requested track\n self.track = get_object_or_404(Track, pk=self.kwargs['track_id'])\n\n # Check right access to tracks\n track_user = self.track.session.day.week.user\n if check_ownership and track_user != self.request.user:\n raise PermissionDenied\n if 'tracks' not in track_user.get_privacy_rights(self.request.user):\n raise PermissionDenied\n\n return self.track\n","repo_name":"La0/runreport","sub_path":"src/tracks/views/mixins.py","file_name":"mixins.py","file_ext":"py","file_size_in_byte":709,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"77"}
+{"seq_id":"12236653327","text":"#!/usr/bin/python\n###\n# 11B.\n# [chrosta@toolbox:~/Github/advent-of-code@chrosta/advent-of-code/2022]$ cat ./data/11.text | ./11B.py\n###\nimport re, ast, sys\nfrom copy import deepcopy as dc\nfrom itertools import groupby\nfrom functools import reduce\n\n###\n# ???\n###\nALL_DIVISORS_PRODUCT = 1\n\nclass Monkey:\n def __init__(self, number, items, oper, test):\n global ALL_DIVISORS_PRODUCT\n self.__number = number\n self.__items = items\n self.__oper = oper\n self.__test = test \n self.__monkeys = []\n self.__count = 0\n ALL_DIVISORS_PRODUCT *= self.__test\n\n def __repr__(self):\n return str([self.__number, self.__items, self.__test, self.__monkeys[0].number(), self.__monkeys[1].number(), self.__count])\n\n def __str__(self):\n return self.__repr__()\n \n def count(self):\n return self.__count\n\n def number(self):\n return self.__number\n\n def turn(self):\n while len(self.__items) > 0:\n self.__count += 1\n old = self.__items.pop(0)\n new = eval(self.__oper.split(\" = \")[1])\n #--\n new = new % ALL_DIVISORS_PRODUCT\n #--\n if new % self.__test == 0:\n self.__monkeys[0].append_throwed_item(new)\n else:\n self.__monkeys[1].append_throwed_item(new)\n \n def append_throwed_item(self, i):\n self.__items.append(i)\n\n def bind_to_monkeys(self, m):\n self.__monkeys = m\n\ndata = {}\nlines = [l.strip().split(':') for l in sys.stdin.readlines()]\nfor l in lines:\n if \"Monkey\" in l[0]:\n number = int(l[0].split(' ')[1])\n continue\n if \"Starting items\" in l[0]:\n items = [int(i.strip()) for i in l[1].split(\", \")]\n continue\n if \"Operation\" in l[0]:\n oper = l[1].strip()\n continue\n if \"Test\" in l[0]:\n test = int(l[1].split(' ')[3])\n continue\n if \"If true\" in l[0]:\n t = int(l[1].split(' ')[4])\n continue\n if \"If false\" in l[0]:\n f = int(l[1].split(' ')[4])\n continue\n if len(l[0]) == 0:\n data[number] = [items, oper, test, t, f]\n\n[print(\"I:\", k, d[0]) for k, d in data.items()]\nprint(\"---[\", 0, \"]---\")\nmonkeys = [Monkey(k, d[0], d[1], d[2]) for k, d in data.items()]\nfor m in monkeys:\n b = [n for n in data[m.number()][3:5]]\n m.bind_to_monkeys([monkeys[b[0]], monkeys[b[1]]])\n print(\"A:\", m)\n\nfor r in range(0, 10000):\n print(\"---[\", r + 1, \"]---\")\n for m in monkeys:\n print(\"A:\", m)\n m.turn()\n print(\"B:\", m)\n\nprint(\"---[ R ]---\")\nr = [m.count() for m in monkeys]\nr.sort()\nr = reduce(lambda x, y: x*y, r[-2:])\nprint(r)\n###\n# 25712998901\n###\n","repo_name":"chrosta/advent-of-code","sub_path":"2022/11B.py","file_name":"11B.py","file_ext":"py","file_size_in_byte":2700,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"}
+{"seq_id":"72227828730","text":"#!/usr/bin/python\n# -*- encoding: utf-8 -*-\n\nimport sys\nsys.path.insert(0, '.')\nimport os\nimport os.path as osp\nimport random\nimport logging\nimport time\nimport argparse\nimport numpy as np\nfrom tabulate import tabulate\n\nimport torch\nimport torch.nn as nn\nimport torch.distributed as dist\nfrom torch.utils.data import DataLoader\n\nfrom networks import model_factory\nfrom configs import cfg_factory\nfrom dataload.rexroth_cv2 import get_data_loader\nfrom evaluate.evaluate import evaluate\nfrom ohem_ce_loss import OhemCELoss\nfrom lr_scheduler import WarmupPolyLrScheduler\nfrom utils.meters import TimeMeter, AvgMeter\nfrom utils.logger import setup_logger, print_log_msg, print_log_msg_withoutaux\nfrom torch.utils.tensorboard import SummaryWriter\n\n# apex\nhas_apex = True\ntry:\n from apex import amp, parallel\nexcept ImportError:\n has_apex = False\n\n\n## fix all random seeds\ntorch.manual_seed(123)\ntorch.cuda.manual_seed(123)\nnp.random.seed(123)\nrandom.seed(123)\ntorch.backends.cudnn.deterministic = True\n# torch.backends.cudnn.benchmark = True\n# torch.multiprocessing.set_sharing_strategy('file_system')\n\n\ndef parse_args():\n parse = argparse.ArgumentParser()\n parse.add_argument('--local_rank', dest='local_rank', type=int, default=-1,)\n parse.add_argument('--port', dest='port', type=int, default=44554,)\n parse.add_argument('--model', dest='model', type=str, default='bisenetv1')\n parse.add_argument('--finetune-from', type=str, default=None,)\n return parse.parse_args()\n\nargs = parse_args()\ncfg = cfg_factory[args.model]\nwriter = SummaryWriter(log_dir=cfg.logpath)\n\ndef set_model():\n net = model_factory[cfg.model_type](n_classes=cfg.categories, aux_output=cfg.aux_output, export=False)\n if not args.finetune_from is None:\n net.load_state_dict(torch.load(args.finetune_from, map_location='cpu'))\n if cfg.use_sync_bn: net = set_syncbn(net)\n net.cuda()\n net.train()\n criteria_pre = OhemCELoss(0.7)\n if cfg.aux_output:\n criteria_aux = [OhemCELoss(0.7) for _ in range(cfg.num_aux_heads)]\n return net, criteria_pre, criteria_aux\n else:\n return net, criteria_pre\n\ndef set_syncbn(net):\n if has_apex:\n net = parallel.convert_syncbn_model(net)\n else:\n net = nn.SyncBatchNorm.convert_sync_batchnorm(net)\n return net\n\n\ndef set_optimizer(model):\n if hasattr(model, 'get_params'):\n wd_params, nowd_params, lr_mul_wd_params, lr_mul_nowd_params = model.get_params()\n params_list = [\n {'params': wd_params, },\n {'params': nowd_params, 'weight_decay': 0},\n {'params': lr_mul_wd_params, 'lr': cfg.lr_start * cfg.lr_multiplier},\n {'params': lr_mul_nowd_params, 'weight_decay': 0, 'lr': cfg.lr_start * cfg.lr_multiplier},\n ]\n else:\n wd_params, non_wd_params = [], []\n for name, param in model.named_parameters():\n if param.dim() == 1:\n non_wd_params.append(param)\n elif param.dim() == 2 or param.dim() == 4:\n wd_params.append(param)\n params_list = [\n {'params': wd_params, },\n {'params': non_wd_params, 'weight_decay': 0},\n ]\n optim = torch.optim.SGD(\n params_list,\n lr=cfg.lr_start,\n momentum=0.9,\n weight_decay=cfg.weight_decay,\n )\n return optim\n\n\ndef set_model_dist(net):\n if has_apex:\n net = parallel.DistributedDataParallel(net, delay_allreduce=True, find_unused_parameters=True)\n else:\n local_rank = dist.get_rank()\n net = nn.parallel.DistributedDataParallel(\n net,\n device_ids=[local_rank, ],\n output_device=local_rank, find_unused_parameters=True)\n return net\n\n\ndef set_meters():\n time_meter = TimeMeter(cfg.max_iter)\n loss_meter = AvgMeter('loss')\n loss_pre_meter = AvgMeter('loss_prem')\n if cfg.aux_output:\n loss_aux_meters = [AvgMeter('loss_aux{}'.format(i))\n for i in range(cfg.num_aux_heads)]\n return time_meter, loss_meter, loss_pre_meter, loss_aux_meters\n else:\n return time_meter, loss_meter, loss_pre_meter\n\n\ndef train():\n logger = logging.getLogger()\n is_dist = dist.is_initialized()\n\n ## dataset\n dl_train = get_data_loader(\n cfg.im_root, cfg.train_im_anns,\n cfg.ims_per_gpu, cfg.scales, cfg.cropsize,\n cfg.max_iter, mode='train', distributed=is_dist, n_cats=cfg.categories)\n\n ## model\n if cfg.aux_output:\n net, criteria_pre, criteria_aux = set_model()\n else:\n net, criteria_pre = set_model()\n\n ## optimizer\n optim = set_optimizer(net)\n\n ## fp16\n if has_apex:\n opt_level = 'O1' if cfg.use_fp16 else 'O0'\n net, optim = amp.initialize(net, optim, opt_level=opt_level)\n\n ## ddp training\n net = set_model_dist(net)\n\n ## meters\n if cfg.aux_output:\n time_meter, loss_meter, loss_pre_meter, loss_aux_meters = set_meters()\n else:\n time_meter, loss_meter, loss_pre_meter = set_meters()\n ## lr scheduler\n lr_schdr = WarmupPolyLrScheduler(optim, power=0.9,\n max_iter=cfg.max_iter, warmup_iter=cfg.warmup_iters,\n warmup_ratio=0.1, warmup='exp', last_epoch=-1,)\n\n ## train loop\n iteration = 0\n for it, (im, lb) in enumerate(dl_train):\n im = im.cuda()\n lb = lb.cuda()\n\n lb = torch.squeeze(lb, 1)\n\n optim.zero_grad()\n if cfg.aux_output:\n logits, *logits_aux = net(im)\n else:\n logits = net(im)\n loss_pre = criteria_pre(logits, lb)\n loss = loss_pre\n if cfg.aux_output:\n loss_aux = [crit(lgt, lb) for crit, lgt in zip(criteria_aux, logits_aux)]\n loss = loss_pre + sum(loss_aux)\n if has_apex:\n with amp.scale_loss(loss, optim) as scaled_loss:\n scaled_loss.backward()\n else:\n loss.backward()\n optim.step()\n torch.cuda.synchronize()\n lr_schdr.step()\n\n writer.add_scalar(\"loss\", loss_pre, it)\n\n time_meter.update()\n loss_meter.update(loss.item())\n iteration = it\n if cfg.aux_output:\n loss_pre_meter.update(loss_pre.item())\n _ = [mter.update(lss.item()) for mter, lss in zip(loss_aux_meters, loss_aux)]\n\n\n ## print training log message\n if (it + 1) % 100 == 0:\n lr = lr_schdr.get_lr()\n lr = sum(lr) / len(lr)\n writer.add_scalar(\"learning_rate\", lr, it)\n if cfg.aux_output:\n print_log_msg(\n it, cfg.max_iter, lr, time_meter, loss_meter,\n loss_pre_meter, loss_aux_meters)\n else:\n print_log_msg_withoutaux(\n it, cfg.max_iter, lr, time_meter, loss_meter)\n\n ## Save model every 1000 iterations\n if (it + 1) % 1000 == 0:\n save_pth = osp.join(cfg.respth, cfg.save_name)\n logger.info('\\nsave models to {}'.format(save_pth+str(it+1)))\n state = net.module.state_dict()\n if dist.get_rank() == 0: torch.save(state, save_pth+str(it+1))\n writer.add_scalar(\"train_loss_1000\", loss_pre, it)\n\n count = (iteration+1)//1000\n logger.info('\\nevaluating the models')\n classes = [\"Background\", \"Monorail\", \"Person\", \"Forklift\"]\n\n for i in range(count):\n save_pth = osp.join(cfg.respth, cfg.save_name)\n iteration = (i + 1) * 1000\n logger.info('\\n Iteration number:'+str(iteration))\n torch.cuda.empty_cache()\n ious_ss_eval, ious_mssc_eval, ious_mcf_eval, ious_msfc_eval, ious_ss_test, ious_mssc_test, ious_mcf_test, ious_msfc_test = evaluate(cfg, save_pth+str(iteration))\n\n\n for j in range(cfg.categories):\n writer.add_scalar(\"ss_class_iou_eval \"+ classes[j], ious_ss_eval.tolist()[j], iteration)\n writer.add_scalar(\"mssc_class_iou_eval \"+ classes[j], ious_mssc_eval.tolist()[j], iteration)\n writer.add_scalar(\"mcf_class_iou_eval \" + classes[j], ious_mcf_eval.tolist()[j], iteration)\n writer.add_scalar(\"msfc_class_iou_eval \" + classes[j], ious_msfc_eval.tolist()[j], iteration)\n writer.add_scalar(\"ss_class_iou_test \" + classes[j], ious_ss_test.tolist()[j], iteration)\n writer.add_scalar(\"mssc_class_iou_test \" + classes[j], ious_mssc_test.tolist()[j], iteration)\n writer.add_scalar(\"mcf_class_iou_test \" + classes[j], ious_mcf_test.tolist()[j], iteration)\n writer.add_scalar(\"msfc_class_iou_test \" + classes[j], ious_msfc_test.tolist()[j], iteration)\n\n return\n\n\ndef main():\n torch.cuda.set_device(args.local_rank)\n dist.init_process_group(\n backend='nccl',\n init_method='tcp://127.0.0.1:{}'.format(args.port),\n world_size=torch.cuda.device_count(),\n rank=args.local_rank\n )\n if not osp.exists(cfg.respth): os.makedirs(cfg.respth)\n setup_logger('{}-train'.format(cfg.model_type), cfg.respth)\n train()\n writer.flush()\n writer.close()\n\n\nif __name__ == \"__main__\":\n main()","repo_name":"Eashwar93/Realtime-SemanticSegmentation","sub_path":"train/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":9053,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"}
+{"seq_id":"27050515613","text":"\n\nwith open('KakaoTalk_friend.txt', 'r', encoding='utf-8') as input_file:\n out=open('kakaotalk.txt','w')\n while(1):\n s=input_file.readline()\n if not s:\n break\n s=s.split(']')\n if(len(s)<3):\n continue\n out.write(s[2])\n out.close()\n\n","repo_name":"MubaBot/muba-chatbot","sub_path":"chatbot_api/Markov_response/kakaotalk_extract.py","file_name":"kakaotalk_extract.py","file_ext":"py","file_size_in_byte":297,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"}
+{"seq_id":"36529413798","text":"import datetime\nfrom functools import cmp_to_key\nfrom datetime import datetime as dt\nfrom AppConfig import AppConfig\n\nif not AppConfig().isTest():\n from BirdNETLite import analyzeAudioData, prepareAudioSignal, parseArgs\nelse:\n from BirdNETLiteMOCK import analyzeAudioData, prepareAudioSignal, parseArgs\n\ndef getWeek():\n dtn = dt.now()\n return datetime.date(dtn.year, dtn.month, dtn.day).isocalendar().week\n\ndef getTimestamp():\n return dt.now().strftime(\"%Y-%m-%d %H:%M:%S\")\n\ndef getDate():\n return getTimestamp().split(\" \")[0]\n\ndef getTime():\n return getTimestamp().split(\" \")[1]\n\ndef getNewArgMap(lat, lon, week=None):\n argMap = {\"lat\": 0, \"lon\": 0, \"week\": getWeek(), \"overlap\": 0.0}\n if (lat != None):\n argMap[\"lat\"] = lat\n if (lon != None):\n argMap[\"lon\"] = lon\n if (week != None):\n argMap[\"week\"] = week\n return argMap\n\ndef parseArgMap(argMap):\n \"\"\"\n input args as a map {\"lat\": 1, \"lon\": 2, \"week\": 3, \"overlap\": 0}\n \"\"\"\n argv = []\n for a in argMap:\n argv.append(\"--\" + a)\n argv.append(str(argMap[a]))\n return argv\n\n\ndef detectSpecies(sig=None, rate=None, argMap=None, interpreter=None, callbackProgress=None):\n\n parsedArgMap = parseArgMap(argMap)\n args = parseArgs(parsedArgMap)\n if (interpreter == None):\n assert interpreter != None\n\n audioData = prepareAudioSignal(sig, rate, args.overlap)\n week = max(1, min(args.week, 48))\n sensitivity = max(0.5, min(1.0 - (args.sensitivity - 1.0), 1.5))\n detections = analyzeAudioData(audioData, args.lat, args.lon, week, sensitivity, args.overlap, interpreter, callbackProgress)\n\n return {\"detections\": detections, \"timestamp\": getTimestamp(), \"lat\": args.lat, \"lon\": args.lon}\n\n\ndef filterDetections(detections_result, p_limit=0.1, lang=\"en\"):\n count_total = 0\n count = 0\n detections = detections_result[\"detections\"]\n filtered_list = []\n for d in detections:\n detection = detections[d]\n for item in detection:\n count_total += 1\n names, p = item\n if (p > p_limit):\n count += 1\n values = names.split(\"_\")\n filtered_list.append([d, p, values[1], values[0]])\n return {\"count_total\": count_total, \n \"p_limit\": p_limit, \n \"count\": count, \n \"filtered_list\": filtered_list, \n \"timestamp\": detections_result[\"timestamp\"],\n \"lat\": detections_result[\"lat\"],\n \"lon\": detections_result[\"lon\"]\n }\n\ndef compare_sum(a,b):\n return a[0]-b[0]\n\ndef rankResult(filtered_result, desc=True):\n filtered_detections = filtered_result[\"filtered_list\"]\n sci_ix = 3\n sci_names = set(map(lambda x: x[sci_ix], filtered_detections))\n rank_list = []\n for sci_name in sci_names:\n detections_species = list(filter(lambda x: x[sci_ix] == sci_name, filtered_detections))\n p = []\n for d in detections_species:\n p.append(d[1])\n rank_list.append([sum(p), p, detections_species[0][2], detections_species[0][3]])\n rank_list.sort(key=cmp_to_key(compare_sum), reverse=desc)\n return {\"ranked_list\": rank_list,\n \"timestamp\": filtered_result[\"timestamp\"], \n \"lat\": filtered_result[\"lat\"], \n \"lon\": filtered_result[\"lon\"]}\n","repo_name":"gorlang/BirdifyApp","sub_path":"src/BirdifyAPI.py","file_name":"BirdifyAPI.py","file_ext":"py","file_size_in_byte":3328,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"77"}
+{"seq_id":"1073632152","text":"import ops\nimport re\n\n__atom_pattern = re.compile(r'@'\n r'|'\n r'\\$[A-Za-z0-9_]+'\n r'|'\n r'\\d+[`°]\\d{0,2}\\'?\\d{0,2}\"?\\d*'\n r'|'\n r'\\d+\\.?\\d*'\n r'|'\n r'π'\n r'|'\n r'_?[+\\-*/%!^()\\[\\]{},;~\\:?<>.√×]'\n r'|'\n r'[A-Za-z]+'\n r'|'\n r'\\s+'\n )\n\nvar_format = re.compile(r'@|\\$[A-Za-z0-9]$')\n\nans = '@'\n\n\ndef __try_get(key, *funcs):\n for f in funcs:\n try:\n return f(key)\n except:\n pass\n raise KeyError\n\n\ndef translate(formula_str: str, oplist: ops.OpList)-> list:\n def _get_var(s):\n if re.match(var_format, s) is not None:\n return oplist[s]\n else:\n raise KeyError\n\n origin_list = re.findall(__atom_pattern, formula_str)\n formula = [oplist.head]\n for piece in origin_list:\n if re.match(r'\\s+', piece) is not None:\n pass\n else:\n if oplist.is_number(formula[-1]) or oplist.is_right_bracket(formula[-1]):\n try:\n t = __try_get(piece, oplist.get_right_bracket, oplist.get_binary)\n formula.append(t)\n except KeyError:\n try:\n t = __try_get(piece, oplist.get_left_bracket, oplist.get_const,\n oplist.string_to_real, oplist.get_unary, _get_var)\n formula.append(oplist.connector)\n formula.append(t)\n except KeyError:\n t = oplist.get_unary(oplist.postpos_unary_dict[piece])\n count = 0\n for i in range(len(formula) - 1, -1, -1):\n if oplist.is_right_bracket(formula[i]):\n count += 1\n elif oplist.is_left_bracket(formula[i]):\n count -= 1\n if count == 0:\n formula.insert(i, t)\n break\n else:\n formula.append(__try_get(piece, oplist.get_unary, oplist.get_const, oplist.string_to_real,\n oplist.get_left_bracket, oplist.get_right_bracket, _get_var))\n return formula\n\n","repo_name":"StarvinCulex/scicalc","sub_path":"formtrans.py","file_name":"formtrans.py","file_ext":"py","file_size_in_byte":2623,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"}
+{"seq_id":"24680924249","text":"'''\nThere are cases when different processes needs to access the same resource (properly instantiated).\nRace conditions can occur when two or more processes access a shared piece of data or resource simoultaneously.\n'''\n\nimport time\nfrom multiprocessing import Process, Value\n\nr = 1000000\nbalance = Value('f', 200.00) # instance of shared resource (f:float)\n\n# process 1\ndef deposit(balance):\n for i in range(r):\n balance.value += 1.00 # critical section\n\n# process 2\ndef withdraw(balance):\n for i in range(r):\n balance.value -= 1.00 # critical section\n\n\nif __name__ == '__main__':\n\n # define two processes accesing the shared resource\n d = Process(target=deposit, args=(balance,)) \n w = Process(target=withdraw, args=(balance,))\n\n d.start()\n w.start()\n d.join()\n w.join()\n\n print(balance.value)\n # expected output 200 instead is random at each execution","repo_name":"rpalloni/concurrency","sub_path":"9_race_conditions_process.py","file_name":"9_race_conditions_process.py","file_ext":"py","file_size_in_byte":901,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"}
+{"seq_id":"26835762083","text":"import base64\nfrom ipywidgets import CallbackDispatcher, Checkbox, CoreWidget, DOMWidget, register, Tab, VBox\nfrom traitlets import Bool, Bytes, Float, Int, List, TraitError, Unicode, validate\n\n\n@register\nclass NlLink(DOMWidget):\n \"\"\"A widget to display links.\n\n\n Parameters\n ----------\n value: str\n text to display for the link.\n href: str\n URL of the link.\n \"\"\"\n\n _view_name = Unicode(\"LinkView\").tag(sync=True)\n _model_name = Unicode('LinkModel').tag(sync=True)\n _view_module = Unicode(\"neurolang-ipywidgets\").tag(sync=True)\n _model_module = Unicode('neurolang-ipywidgets').tag(sync=True)\n\n _view_module_version = Unicode(\"0.1.0\").tag(sync=True)\n _model_module_version = Unicode('^0.1.0').tag(sync=True)\n\n # Widget specific properties\n # value to appear as link text\n value = Unicode().tag(sync=True)\n # url of the link\n href = Unicode().tag(sync=True)\n\n # TODO check href to be a link\n\n\n@register\nclass NlProgress(DOMWidget):\n \"\"\"A widget to display progress as a horizontal bar.\n\n Parameters\n ----------\n value: float\n amount of progress.\n max: int\n maximum possible value.\n \"\"\"\n\n _view_name = Unicode(\"ProgressView\").tag(sync=True)\n _model_name = Unicode('ProgressModel').tag(sync=True)\n _view_module = Unicode(\"neurolang-ipywidgets\").tag(sync=True)\n _model_module = Unicode('neurolang-ipywidgets').tag(sync=True)\n\n _view_module_version = Unicode(\"0.1.0\").tag(sync=True)\n _model_module_version = Unicode('^0.1.0').tag(sync=True)\n\n # Widget specific properties\n # actual value\n value = Float().tag(sync=True)\n # maximum value\n max = Int().tag(sync=True)\n\n @validate('value')\n def _valid_value(self, proposal):\n if proposal['value'] < 0:\n raise TraitError('Value should be greater than 0.')\n if proposal['value'] > self.max:\n raise TraitError(f\"Value should be less then max value {self.max}\")\n return proposal['value']\n\n\n@register\nclass NlCheckbox(Checkbox):\n \"\"\"A Checkbox widget that changes opacity when disabled.\n\n Parameters\n ----------\n opacity: float\n opacity value for the checkbox when disabled.\n \"\"\"\n\n _view_name = Unicode('NCheckboxView').tag(sync=True)\n _model_name = Unicode('NCheckboxModel').tag(sync=True)\n _view_module = Unicode(\"neurolang-ipywidgets\").tag(sync=True)\n _model_module = Unicode('neurolang-ipywidgets').tag(sync=True)\n\n _view_module_version = Unicode(\"0.1.0\").tag(sync=True)\n _model_module_version = Unicode('^0.1.0').tag(sync=True)\n\n # Widget specific properties\n # opacity value to be used when disabled.\n opacity = Float(0.45).tag(sync=True)\n bg_color = Unicode('white').tag(sync=True)\n\n # TODO check opacity value.\n\n\n@register\nclass NlIconTab(Tab):\n _view_name = Unicode('IconTabView').tag(sync=True)\n _model_name = Unicode('IconTabModel').tag(sync=True)\n _view_module = Unicode(\"neurolang-ipywidgets\").tag(sync=True)\n _model_module = Unicode('neurolang-ipywidgets').tag(sync=True)\n\n _view_module_version = Unicode(\"0.1.0\").tag(sync=True)\n _model_module_version = Unicode('^0.1.0').tag(sync=True)\n\n # Widget specific properties\n title_icons = List().tag(sync=True)\n\n def reset(self):\n self.unobserve_all()\n self.selected_index = None\n self.title_icons = []\n self._titles = {}\n self.children = []\n\n\n@register\nclass NlVBoxOverlay(VBox):\n \"\"\"A VBox widget that is viewed as overlay.\n\n Parameters\n ----------\n \"\"\"\n\n _view_name = Unicode('VBoxOverlayView').tag(sync=True)\n _model_name = Unicode('VBoxOverlayModel').tag(sync=True)\n _view_module = Unicode(\"neurolang-ipywidgets\").tag(sync=True)\n _model_module = Unicode('neurolang-ipywidgets').tag(sync=True)\n\n _view_module_version = Unicode(\"0.1.0\").tag(sync=True)\n _model_module_version = Unicode('^0.1.0').tag(sync=True)\n\n\ndef content_to_json(pydt, manager):\n \"\"\"Serialize file content to json.\n \"\"\"\n if pydt is None:\n return None\n else:\n b64 = base64.encodebytes(pydt).decode()\n return b64\n\n\ncontent_serialization = {\n 'to_json': content_to_json\n}\n\n\n@register\nclass NlDownloadLink(DOMWidget, CoreWidget):\n \"\"\"A widget to download content as file with filename.\n\n\n Parameters\n ----------\n content: str\n content of the file as bytes\n filename: str\n file name\n mimetype: str\n text/csv by default\n description: str\n description for link\n tooltip: str\n tooltip to display when link hovered\n disabled: bool\n boolean value to indicate if the link is disabled\n \"\"\"\n\n _view_name = Unicode(\"DownloadLinkView\").tag(sync=True)\n _model_name = Unicode('DownloadLinkModel').tag(sync=True)\n _view_module = Unicode(\"neurolang-ipywidgets\").tag(sync=True)\n _model_module = Unicode('neurolang-ipywidgets').tag(sync=True)\n\n _view_module_version = Unicode(\"0.1.0\").tag(sync=True)\n _model_module_version = Unicode('^0.1.0').tag(sync=True)\n\n # Widget specific properties\n content = Bytes().tag(sync=True, **content_serialization)\n mimetype = Unicode(\"text/csv\").tag(sync=True)\n filename = Unicode().tag(sync=True)\n description = Unicode().tag(sync=True)\n tooltip = Unicode(\"Download\").tag(sync=True)\n disabled = Bool(False).tag(sync=True)\n\n # below lines are copied from button widget to handle click on the link\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self._click_handlers = CallbackDispatcher()\n self.on_msg(self._handle_button_msg)\n\n # this is necessary when data is big and it content should not be set at when widget is initialized\n def on_click(self, callback, remove=False):\n \"\"\"Register a callback to execute when the button is clicked.\n The callback will be called with one argument, the clicked button\n widget instance.\n Parameters\n ----------\n remove: bool (optional)\n Set to true to remove the callback from the list of callbacks.\n \"\"\"\n self._click_handlers.register_callback(callback, remove=remove)\n\n def click(self):\n \"\"\"Programmatically trigger a click event.\n This will call the callbacks registered to the clicked button\n widget instance.\n \"\"\"\n self._click_handlers(self)\n\n def _handle_button_msg(self, _, content, buffers):\n \"\"\"Handle a msg from the front-end.\n Parameters\n ----------\n content: dict\n Content of the msg.\n \"\"\"\n if content.get('event', '') == 'click':\n self.click()\n","repo_name":"NeuroLang/neurolang_ipywidgets","sub_path":"neurolang_ipywidgets/cell_widgets.py","file_name":"cell_widgets.py","file_ext":"py","file_size_in_byte":6679,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"}
+{"seq_id":"30928303869","text":"from typing import Tuple\n\nfrom .Users import UserList\nfrom ..utils.Logger import getServerLogger\nfrom ..utils.Middleware import Middleware\n\nlogger = getServerLogger(\"P2PMiddleware\")\n\n\nclass P2PMiddleware(Middleware):\n def __init__(self, users: UserList, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n self.users = users\n\n self.handlers = {\"addr_request\": self.get_user_addr}\n\n def get_user_addr(self, sid: str, data: dict) -> Tuple[bool, dict]:\n \"\"\"\n Si llega un evento de tipo addr_request,\n se obtiene la address del usuario de destino y se retorna\n \"\"\"\n\n uri, uuid = None, None\n user = self.users.get_user_by_name(data[\"username\"])\n if user:\n uri, uuid = user.uri, user.uuid\n\n # Este middleware no tiene que seguir avanzando, por lo que se retorna False\n # (La respuesta ya esta completa)\n return False, {\"uri\": uri, \"uuid\": uuid}\n","repo_name":"MrEarle/SSDD-T4","sub_path":"src/server/P2PMiddleware.py","file_name":"P2PMiddleware.py","file_ext":"py","file_size_in_byte":950,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"}
+{"seq_id":"12897685903","text":"import time\nstart = time.time()\ndef seperator(l):\n moves=0\n i=0\n j=1\n l1=l.copy()\n while(jl1[j]):\n i=j\n j+=1\n \n elif(l1[i]l2[j]):\n temp=l2[i]\n l2[i]=l2[j]\n l2[j]=temp\n moves1+=(j-i)\n i+=1\n j+=1\n \n return (l1,moves,l2,moves1)\n \nprint(seperator([1,0,1,1,0,1,1]))\nend=time.time()\nprint()\nprint()\nprint(end-start)\n","repo_name":"DoWithPassion/PythonPrograms","sub_path":"swaps_to_seperate_0s_and1s.py","file_name":"swaps_to_seperate_0s_and1s.py","file_ext":"py","file_size_in_byte":866,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"}
+{"seq_id":"31711927770","text":"import math\nimport time\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium import webdriver\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.wait import WebDriverWait\nfrom webdriver_manager.chrome import ChromeDriverManager\nfrom selenium.webdriver.support.ui import Select\n\nbrowser = webdriver.Chrome(executable_path=ChromeDriverManager().install())\nlink = \"http://suninjuly.github.io/explicit_wait2.html\"\n\n\ndef calc(x):\n return str(math.log(abs(12 * math.sin(int(x)))))\n\ntry:\n browser.get(link)\n price = browser.find_element_by_id('price').text\n my_price = WebDriverWait(browser, 12).until(\n EC.text_to_be_present_in_element(\n (By.ID, \"price\"), \"$100\")\n )\n book = browser.find_element_by_id('book')\n book.click()\n x = int(browser.find_element_by_id('input_value').text)\n my_answer = calc(x)\n browser.find_element_by_id('answer').send_keys(my_answer)\n button = browser.find_element_by_id(\"solve\")\n button.click()\n\nfinally:\n # успеваем скопировать код за 10 секунд\n time.sleep(10)\n # закрываем браузер после всех манипуляций\n browser.quit()\n","repo_name":"AkioYuki/stepik_auto_tests_course","sub_path":"2.4.1.py","file_name":"2.4.1.py","file_ext":"py","file_size_in_byte":1227,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"}
+{"seq_id":"27610145827","text":"from thunderstore.community.models import Community, CommunitySite\nfrom thunderstore.core.utils import capture_exception\nfrom thunderstore.repository.api.v1.viewsets import serialize_package_list_for_community\nfrom thunderstore.repository.models.cache import APIV1PackageCache\n\n\ndef update_api_v1_caches() -> None:\n update_api_v1_indexes()\n\n\ndef update_api_v1_indexes() -> None:\n for site in CommunitySite.objects.iterator():\n try:\n APIV1PackageCache.update_for_community(\n community=site.community,\n content=serialize_package_list_for_community(\n community=site.community,\n ),\n )\n except Exception as e: # pragma: no cover\n capture_exception(e)\n for community in Community.objects.filter(sites=None).iterator():\n try:\n APIV1PackageCache.update_for_community(\n community=community,\n content=serialize_package_list_for_community(\n community=community,\n ),\n )\n except Exception as e: # pragma: no cover\n capture_exception(e)\n APIV1PackageCache.drop_stale_cache()\n","repo_name":"thunderstore-io/Thunderstore","sub_path":"django/thunderstore/repository/api/v1/tasks.py","file_name":"tasks.py","file_ext":"py","file_size_in_byte":1197,"program_lang":"python","lang":"en","doc_type":"code","stars":81,"dataset":"github-code","pt":"77"}
+{"seq_id":"29340036529","text":"#Conversión de Decimal a Binario\n#reciba como entrada un número decimal e imprima el resultado de convertirlo a binario. \ndecimal= int(input(\"ingrese numero decimal: \"))\nlista_modulos = [ ]\nwhile decimal != 0:\n modulo = decimal % 2\n cociente = decimal // 2\n # print(\"COCIENTE::: \"+ str(cociente))\n lista_modulos.append(str(modulo))\n decimal = cociente\nprint(\"resultado=\" + \"\".join(lista_modulos[::-1]))\n","repo_name":"pabloschwarzenberg/grader","sub_path":"hito1_ej4/hito1_ej4_edcf279b44328751cac4ce5efce99364.py","file_name":"hito1_ej4_edcf279b44328751cac4ce5efce99364.py","file_ext":"py","file_size_in_byte":436,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"}
+{"seq_id":"7880903155","text":"import os\nimport sys\nimport numpy as np\nimport pytest\n\nimport tvm\nimport tvm.relay.testing\nimport tvm.relay.transform as transform\nfrom tvm import relay\nfrom tvm.contrib import util\nfrom tvm.relay.annotation import compiler_begin, compiler_end\nfrom tvm.relay.expr_functor import ExprMutator\n\n# Leverage the pass manager to write a simple white list based annotator\n@transform.function_pass(opt_level=0)\nclass WhiteListAnnotator:\n def __init__(self, op_list, compiler):\n assert isinstance(op_list, (list, tuple, set))\n self.op_list = op_list\n self.compiler = compiler\n\n def transform_function(self, func, mod, ctx):\n\n annotator = self\n class Annotator(tvm.relay.ExprMutator):\n def visit_call(self, call):\n op_name = call.op.name\n if op_name in annotator.op_list:\n new_args = []\n for arg in call.args:\n ann = compiler_begin(super().visit(arg),\n annotator.compiler)\n new_args.append(ann)\n new_call = relay.Call(call.op, new_args, call.attrs,\n call.type_args)\n return compiler_end(new_call, annotator.compiler)\n else:\n return super().visit_call(call)\n return Annotator().visit(func)\n\n\nclass CcompilerAnnotator(ExprMutator):\n \"\"\"\n A simple annotator that creates the following program:\n |\n -- begin --\n |\n add\n |\n subtract\n |\n multiply\n |\n -- end --\n |\n \"\"\"\n\n def __init__(self):\n super(CcompilerAnnotator, self).__init__()\n self.in_compiler = 0\n\n def visit_call(self, call):\n if call.op.name == \"add\": # Annotate begin at args\n if self.in_compiler == 1:\n lhs = compiler_begin(super().visit(call.args[0]), \"ccompiler\")\n rhs = compiler_begin(super().visit(call.args[1]), \"ccompiler\")\n op = relay.add(lhs, rhs)\n self.in_compiler = 2\n return op\n elif call.op.name == \"subtract\":\n if self.in_compiler == 1:\n lhs = super().visit(call.args[0])\n rhs = super().visit(call.args[1])\n if isinstance(lhs, relay.expr.Var):\n lhs = compiler_begin(lhs, \"ccompiler\")\n if isinstance(rhs, relay.expr.Var):\n rhs = compiler_begin(rhs, \"ccompiler\")\n return relay.subtract(lhs, rhs)\n elif call.op.name == \"multiply\": # Annotate end at output\n self.in_compiler = 1\n lhs = super().visit(call.args[0])\n rhs = super().visit(call.args[1])\n if isinstance(lhs, relay.expr.Var):\n lhs = compiler_begin(lhs, \"ccompiler\")\n if isinstance(rhs, relay.expr.Var):\n rhs = compiler_begin(rhs, \"ccompiler\")\n op = relay.multiply(lhs, rhs)\n if self.in_compiler == 2:\n op = compiler_end(op, \"ccompiler\")\n self.in_compiler = 0\n return op\n return super().visit_call(call)\n\n\nclass WholeGraphAnnotator(ExprMutator):\n \"\"\"\n An annotator that creates a compiler for an entire graph.\n \"\"\"\n\n def __init__(self, compiler):\n super(WholeGraphAnnotator, self).__init__()\n self.compiler = compiler\n self.last_call = True\n\n def visit_call(self, call):\n curr_last = self.last_call\n self.last_call = False\n\n params = []\n for arg in call.args:\n param = super().visit(arg)\n if isinstance(param, relay.expr.Var):\n param = compiler_begin(param, self.compiler)\n params.append(param)\n\n new_call = relay.Call(call.op, params, call.attrs)\n if curr_last:\n new_call = compiler_end(new_call, self.compiler)\n return new_call\n\n\nclass MobileNetAnnotator(ExprMutator):\n \"\"\"\n Annotate mobilenet until global_avg_pool.\n \"\"\"\n\n def __init__(self, compiler):\n super(MobileNetAnnotator, self).__init__()\n self.compiler = compiler\n self.compiler_open = False\n\n def visit_call(self, call):\n\n if call.op.name == 'nn.global_avg_pool2d':\n self.compiler_open = True\n compiler_open = self.compiler_open\n\n params = []\n for arg in call.args:\n param = super().visit(arg)\n if call.op.name == 'nn.global_avg_pool2d':\n param = compiler_end(param, self.compiler)\n if compiler_open and isinstance(param, relay.expr.Var):\n param = compiler_begin(param, self.compiler)\n params.append(param)\n\n new_call = relay.Call(call.op, params, call.attrs)\n return new_call\n\n\ndef check_result(mod, map_inputs, out_shape, result, tol=1e-5, target=\"llvm\",\n ctx=tvm.cpu(), params=None):\n if sys.platform == \"win32\":\n print(\"Skip test on Windows for now\")\n return\n\n def update_lib(lib):\n test_dir = os.path.dirname(os.path.realpath(os.path.expanduser(__file__)))\n source_dir = os.path.join(test_dir, \"..\", \"..\", \"..\")\n contrib_path = os.path.join(source_dir, \"src\", \"runtime\", \"contrib\")\n\n kwargs = {}\n kwargs[\"options\"] = [\"-O2\", \"-std=c++11\", \"-I\" + contrib_path]\n tmp_path = util.tempdir()\n lib_name = 'lib.so'\n lib_path = tmp_path.relpath(lib_name)\n lib.export_library(lib_path, fcompile=False, **kwargs)\n lib = tvm.module.load(lib_path)\n\n return lib\n\n def check_vm_result():\n with relay.build_config(opt_level=3, disabled_pass=[\"AlterOpLayout\"]):\n exe = relay.vm.compile(mod, target=target, params=params)\n code, lib = exe.save()\n lib = update_lib(lib)\n exe = relay.vm.Executable.load_exec(code, lib)\n vm = relay.vm.VirtualMachine(exe)\n vm.init(ctx)\n out = vm.run(**map_inputs)\n tvm.testing.assert_allclose(out.asnumpy(), result, rtol=tol, atol=tol)\n\n def check_graph_runtime_result():\n with relay.build_config(opt_level=3, disabled_pass=[\"AlterOpLayout\"]):\n json, lib, param = relay.build(mod, target=target, params=params)\n lib = update_lib(lib)\n rt_mod = tvm.contrib.graph_runtime.create(json, lib, ctx)\n\n for name, data in map_inputs.items():\n rt_mod.set_input(name, data)\n rt_mod.set_input(**param)\n rt_mod.run()\n out = tvm.nd.empty(out_shape, ctx=ctx)\n out = rt_mod.get_output(0, out)\n\n tvm.testing.assert_allclose(out.asnumpy(), result, rtol=tol, atol=tol)\n\n check_vm_result()\n check_graph_runtime_result()\n\n\ndef test_multi_node_compiler():\n x = relay.var('x', shape=(10, 10))\n w0 = relay.var('w0', shape=(10, 10))\n w1 = relay.var('w1', shape=(10, 10))\n w2 = relay.var('w2', shape=(10, 10))\n w3 = relay.var('w3', shape=(10, 10))\n w4 = relay.var('w4', shape=(10, 10))\n w5 = relay.var('w5', shape=(10, 10))\n w6 = relay.var('w6', shape=(10, 10))\n w7 = relay.var('w7', shape=(10, 10))\n\n # C compiler\n # FIXME: We generate two compilers for this case but they should be merged to one\n # due to the common input (x).\n z0 = relay.add(x, w0)\n p0 = relay.subtract(z0, w1)\n q0 = relay.multiply(p0, w2)\n\n z1 = relay.add(x, w3)\n p1 = relay.subtract(z1, w4)\n q1 = relay.multiply(p1, w5)\n\n # Other parts on TVM\n z2 = relay.add(x, w6)\n q2 = relay.subtract(z2, w7)\n\n r = relay.concatenate((q0, q1, q2), axis=0)\n f = relay.Function([x, w0, w1, w2, w3, w4, w5, w6, w7], r)\n mod = relay.Module()\n ann = CcompilerAnnotator()\n mod[\"main\"] = ann.visit(f)\n mod = transform.PartitionGraph()(mod)\n mod = transform.InferType()(mod)\n\n x_data = np.random.rand(10, 10).astype('float32')\n w_data = []\n for _ in range(8):\n w_data.append(np.random.rand(10, 10).astype('float32'))\n\n map_inputs = {\"w{}\".format(i): w_data[i] for i in range(8)}\n map_inputs[\"x\"] = x_data\n check_result(\n mod, map_inputs, (30, 10),\n np.concatenate((((x_data + w_data[0]) - w_data[1]) * w_data[2],\n ((x_data + w_data[3]) - w_data[4]) * w_data[5],\n x_data + w_data[6] - w_data[7]),\n axis=0))\n\n\ndef test_extern_ccompiler_single_op():\n @transform.function_pass(opt_level=0)\n class MyAnnotator:\n def transform_function(self, func, mod, ctx):\n class Annotator(tvm.relay.ExprMutator):\n def visit_call(self, call):\n new_args = []\n for arg in call.args:\n ann = compiler_begin(self.visit(arg), \"ccompiler\")\n new_args.append(ann)\n new_call = relay.Call(call.op, new_args)\n return compiler_end(new_call, \"ccompiler\")\n return Annotator().visit(func)\n\n x = relay.var('x', shape=(8, 8))\n y = relay.var('y', shape=(8, 8))\n z = x + y\n f = relay.Function([x, y], z)\n x_data = np.random.rand(8, 8).astype('float32')\n y_data = np.random.rand(8, 8).astype('float32')\n mod = relay.Module()\n mod[\"main\"] = f\n mod = MyAnnotator()(mod)\n mod = transform.PartitionGraph()(mod)\n\n check_result(mod, {\"x\": x_data, \"y\": y_data}, (8, 8), x_data + y_data)\n\n\ndef test_extern_ccompiler_default_ops():\n def expected():\n x = relay.var(\"x\", shape=(8, 8))\n y = relay.var(\"y\", shape=(8, 8))\n x0 = relay.var(\"x0\", shape=(8, 8))\n y0 = relay.var(\"y0\", shape=(8, 8))\n add = x0 + y0\n # Function that uses C compiler\n func = relay.Function([x0, y0], add)\n func = func.set_attribute(\"Primitive\", tvm.expr.IntImm(\"int32\", 1))\n func = func.set_attribute(\"Compiler\",\n tvm.expr.StringImm(\"ccompiler\"))\n func = func.set_attribute(\"ExternalSymbol\",\n tvm.expr.StringImm(\"ccompiler_0\"))\n add_call = relay.Call(func, [x, y])\n # Function that uses default compiler. Ops are fused in this function.\n p0 = relay.var(\"p0\", shape=(8, 8))\n log = relay.log(p0)\n exp = relay.exp(p0)\n concat = relay.concatenate([log, exp], axis=0)\n fused_func = relay.Function([p0], concat)\n fused_func = fused_func.set_attribute(\"Primitive\",\n tvm.expr.IntImm(\"int32\", 1))\n fused_call = relay.Call(fused_func, [add_call])\n main = relay.Function([x, y], fused_call)\n mod = relay.Module()\n mod[\"main\"] = main\n return mod\n\n x = relay.var(\"x\", shape=(8, 8))\n y = relay.var(\"y\", shape=(8, 8))\n add = x + y\n log = relay.log(add)\n exp = relay.exp(add)\n concat = relay.concatenate([log, exp], axis=0)\n f = relay.Function([x, y], concat)\n mod = relay.Module()\n mod[\"main\"] = f\n mod = WhiteListAnnotator([\"add\", \"subtract\", \"multiply\"], \"ccompiler\")(mod)\n mod = transform.PartitionGraph()(mod)\n\n fused_mod = transform.FuseOps(2)(mod)\n expected_mod = expected()\n assert relay.alpha_equal(fused_mod, expected_mod)\n\n x_data = np.random.rand(8, 8).astype('float32')\n y_data = np.random.rand(8, 8).astype('float32')\n np_add = x_data + y_data\n res = np.concatenate([np.log(np_add), np.exp(np_add)])\n check_result(mod, {\"x\": x_data, \"y\": y_data}, (16, 8), res)\n\n\ndef test_extern_ccompiler():\n x = relay.var('x', shape=(2, 2))\n y = relay.var('y', shape=(2, 2))\n z = x + x\n p = y * y\n f = relay.Function([x, y], p - z)\n x_data = np.random.rand(2, 2).astype('float32')\n y_data = np.random.rand(2, 2).astype('float32')\n mod = relay.Module()\n mod[\"main\"] = f\n mod = WhiteListAnnotator([\"add\", \"subtract\", \"multiply\"], \"ccompiler\")(mod)\n mod = transform.PartitionGraph()(mod)\n\n check_result(mod, {\"x\": x_data, \"y\": y_data}, (2, 2), (y_data * y_data) - (x_data + x_data))\n\n\ndef test_extern_dnnl():\n if not tvm.get_global_func(\"relay.ext.dnnl\", True):\n print(\"skip because DNNL codegen is not available\")\n return\n\n dtype = 'float32'\n ishape = (1, 32, 14, 14)\n w1shape = (32, 1, 3, 3)\n data = relay.var('data', shape=(ishape), dtype=dtype)\n weight1 = relay.var('weight1', shape=(w1shape), dtype=dtype)\n depthwise_conv2d_1 = relay.nn.conv2d(data,\n weight1,\n kernel_size=(3, 3),\n padding=(1, 1),\n groups=32)\n depthwise_conv2d_2 = relay.nn.conv2d(depthwise_conv2d_1,\n weight1,\n kernel_size=(3, 3),\n padding=(1, 1),\n groups=32)\n out = relay.add(depthwise_conv2d_1, depthwise_conv2d_2)\n\n f = relay.Function([data, weight1], out)\n\n mod = relay.Module()\n mod['main'] = WholeGraphAnnotator('dnnl').visit(f)\n mod = transform.PartitionGraph()(mod)\n\n ref_mod = relay.Module()\n ref_mod['main'] = f\n\n i_data = np.random.uniform(0, 1, ishape).astype(dtype)\n w1_data = np.random.uniform(0, 1, w1shape).astype(dtype)\n\n ref_ex = relay.create_executor(\"graph\", mod=ref_mod, ctx=tvm.cpu())\n ref_res = ref_ex.evaluate()(i_data, w1_data)\n check_result(mod, {\"data\": i_data, \"weight1\": w1_data},\n (1, 32, 14, 14), ref_res.asnumpy(), tol=1e-5)\n\n\ndef test_extern_dnnl_mobilenet():\n if not tvm.get_global_func(\"relay.ext.dnnl\", True):\n print(\"skip because DNNL codegen is not available\")\n return\n\n dtype = 'float32'\n ishape = (1, 3, 224, 224)\n mod, params = relay.testing.mobilenet.get_workload(\n batch_size=1, dtype='float32')\n\n op_list = [\"nn.conv2d\", \"nn.dense\", \"nn.relu\", \"add\"]\n mod = WhiteListAnnotator(op_list, \"dnnl\")(mod)\n mod = transform.PartitionGraph()(mod)\n i_data = np.random.uniform(0, 1, ishape).astype(dtype)\n\n ref_mod, params = relay.testing.mobilenet.get_workload(batch_size=1,\n dtype='float32')\n ref_ex = relay.create_executor(\"graph\", mod=ref_mod, ctx=tvm.cpu(0))\n ref_res = ref_ex.evaluate()(i_data, **params)\n\n check_result(mod, {\"data\": i_data},\n (1, 1000), ref_res.asnumpy(), tol=1e-5, params=params)\n\n\nif __name__ == \"__main__\":\n test_multi_node_compiler()\n test_extern_ccompiler_single_op()\n test_extern_ccompiler_default_ops()\n test_extern_ccompiler()\n test_extern_dnnl()\n test_extern_dnnl_mobilenet()\n","repo_name":"gary30404/tvm-yolov3","sub_path":"tests/python/relay/test_pass_partition_graph.py","file_name":"test_pass_partition_graph.py","file_ext":"py","file_size_in_byte":14780,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"77"}
+{"seq_id":"41567791867","text":"import wx\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport os\nimport core.load as load\n\n\ndef readout_noise_process(mainFrame):\n gain = float(mainFrame.gain_rdnPage.rdn_textCtrl1.GetValue())\n nClip = int(mainFrame.gain_rdnPage.rdn_textCtrl3.GetValue())\n path = mainFrame.biasfilePath\n files = []\n\n if isinstance(path, str):\n # 输入路径为str,即目录\n for file in os.listdir(path):\n files.append(os.path.join(path, file))\n\n if isinstance(path, list):\n # 输入路径为list, 即文件名\n files = path\n\n # 判断nClip是否合适\n if len(files) <= 2 * nClip:\n dlg = wx.MessageDialog(None, \"请输入合适的nClip!\", caption=\"警告\", style=wx.OK)\n dlg.ShowModal()\n return\n\n tmp = []\n # arr = np.zeros(load.getData(MainFrame, files[0]).shape)\n for file in files:\n each_data = load.getData(mainFrame, file)\n # 多个三维fits 堆叠待解决\n if each_data.ndim > 2:\n # arr = np.concatenate((arr, each_data))\n tmp = each_data\n else:\n tmp.append(each_data)\n\n # 全部图像堆叠\n data = np.array(tmp)\n # 剔除最大值和最小是\n data_sort = np.sort(data, axis=0)\n if nClip:\n data_clip = data_sort[nClip:-nClip, :, :]\n else:\n data_clip = data_sort\n # 计算N张本底图像各个像元的平均值\n data_mean = np.mean(data_clip, axis=0)\n # 计算N张本地图像各个像元的标准偏差作为该像元的读出噪声\n data_std = np.std(data_clip, axis=0)\n # 整个图像的平均读出噪声\n data_std_overall = np.mean(data_std)\n # 读出噪声结果(e-)\n res = gain * data_std_overall\n\n # 显示结果\n mainFrame.gain_rdnPage.rdn_textCtrl2.SetValue(str(round(res, 3)))\n\n # 合并后的fits图像的平均值和标准差分布作图, 以及直方图\n plt.figure(1)\n plt.title(\"MeanValue Distribution\")\n plt.axis('off')\n plt.imshow(data_mean, cmap=plt.cm.gray)\n\n plt.figure(2)\n plt.title(\"StdValue Distribution\")\n plt.imshow(data_std, cmap=plt.cm.gray)\n plt.axis('off')\n\n plt.figure(3)\n\n n, bins, patches = plt.hist(data_std.flatten(), bins='auto', color='steelblue')\n plt.title(\"Readout Noise Historgam\")\n plt.xlabel(\"Readout Noise (e-)\")\n plt.ylabel(\"Counts\")\n plt.tight_layout()\n\n plt.show()\n\n return\n\n\ndef gain_process(mainFrame):\n bias_path = mainFrame.biasfilePath\n flat_path = mainFrame.flatfilePath\n bias_files = []\n flat_files = []\n if isinstance(bias_path, str):\n # 输入路径为str,即目录\n for bias_file in os.listdir(bias_path):\n bias_files.append(os.path.join(bias_path, bias_file))\n\n if isinstance(bias_path, list):\n # 输入路径为list, 即文件名\n bias_files = bias_path\n\n if isinstance(flat_path, str):\n # 输入路径为str,即目录\n for flat_file in os.listdir(flat_path):\n flat_files.append(os.path.join(flat_path, flat_file))\n\n if isinstance(flat_path, list):\n # 输入路径为list, 即文件名\n flat_files = flat_path\n\n bias_tmp = []\n flat_tmp = []\n\n # 读取本底场图像\n for bias_file in bias_files:\n each_data = load.getData(mainFrame, bias_file)\n # 多个三维fits 堆叠待解决\n if each_data.ndim > 2:\n # arr = np.concatenate((arr, each_data))\n bias_tmp = each_data\n else:\n bias_tmp.append(each_data)\n # 读取平场图像\n for flat_file in flat_files:\n each_data = load.getData(mainFrame, flat_file)\n # 多个三维fits 堆叠待解决\n if each_data.ndim > 2:\n # arr = np.concatenate((arr, each_data))\n flat_tmp = each_data\n else:\n flat_tmp.append(each_data)\n # 图像堆叠转为array\n bias_data = np.array(bias_tmp, dtype=float)\n flat_data = np.array(flat_tmp, dtype=float)\n\n # 选择两幅平均值最接近的图像\n bias1, bias2 = load.im_select(bias_data)\n flat1, flat2 = load.im_select(flat_data)\n\n bias_diff = bias1 - bias2\n flat_diff = flat1 - flat2\n\n bias_dif_var = np.var(bias_diff)\n flat_diff_var = np.var(flat_diff)\n\n # 计算增益\n gain = (np.mean(flat1) + np.mean(flat2) - np.mean(bias1) - np.mean(bias2)) / (flat_diff_var - bias_dif_var)\n\n # 显示结果\n mainFrame.gain_rdnPage.gain_textCtrl.SetValue(str(round(gain, 3)))\n return\n\n\ndef ptc_process(mainFrame):\n bias_path = mainFrame.biasfilePath\n flat_path = mainFrame.flatfilePath\n bias_files = []\n flat_files = []\n if isinstance(bias_path, str):\n # 输入路径为str,即目录\n for bias_file in os.listdir(bias_path):\n bias_files.append(os.path.join(bias_path, bias_file))\n\n if isinstance(bias_path, list):\n # 输入路径为list, 即文件名\n bias_files = bias_path\n\n if isinstance(flat_path, str):\n # 输入路径为str,即目录\n for flat_file in os.listdir(flat_path):\n flat_files.append(os.path.join(flat_path, flat_file))\n\n if isinstance(flat_path, list):\n # 输入路径为list, 即文件名\n flat_files = flat_path\n\n bias_tmp = []\n flat_tmp = []\n\n # 读取本底场图像\n for bias_file in bias_files:\n each_data = load.getData(mainFrame, bias_file)\n # 多个三维fits 堆叠待解决\n if each_data.ndim > 2:\n # arr = np.concatenate((arr, each_data))\n bias_tmp = each_data\n else:\n bias_tmp.append(each_data)\n # 图像堆叠转为array\n bias_data = np.array(bias_tmp, dtype=float)\n # 选择两幅平均值最接近的图像\n bias1, bias2 = load.im_select(bias_data)\n bias_mean = (np.mean(bias1) + np.mean(bias2)) / 2\n bias_var = np.var(bias1 - bias2) / 2\n\n # 读取平场图像\n PTC_arr = np.zeros((2, len(flat_files)))\n index = 0\n\n for flat_file in flat_files:\n flat_tmp = []\n if os.path.isdir(flat_file):\n for item in os.listdir(flat_file):\n tmp_path = os.path.join(flat_file, item)\n flat_tmp.append(load.getData(mainFrame, tmp_path))\n else:\n each_data = load.getData(mainFrame, flat_file)\n # 多个三维fits 堆叠待解决\n if each_data.ndim > 2:\n # arr = np.concatenate((arr, each_data))\n flat_tmp = each_data\n else:\n flat_tmp.append(each_data)\n flat_data = np.array(flat_tmp, dtype=float)\n flat1, flat2 = load.im_select(flat_data)\n flat_mean = (np.mean(flat1) + np.mean(flat2)) / 2\n flat_var = np.var(flat1 - flat2) / 2\n\n PTC_arr[0, index] = flat_mean - bias_mean\n PTC_arr[1, index] = flat_var - bias_var\n index += 1\n\n # 找拐点\n argMax = PTC_arr[1, :].argmax()\n PTC_arr_fit = PTC_arr[:, :argMax + 1]\n\n # 拟合\n X_fit = PTC_arr_fit[0, :]\n Y_fit = PTC_arr_fit[1, :]\n Z_fit = np.polyfit(X_fit, Y_fit, 1)\n # 原始数据\n X_origin = PTC_arr[0, :argMax + 1]\n Y_origin = PTC_arr[1, :argMax + 1]\n\n # z[0]为曲线斜率, 1/z[0]为增益\n gain = 1 / Z_fit[0]\n readout_noise = np.sqrt(bias_var) * gain\n fullWellCapacity = gain * PTC_arr[0, argMax]\n\n # PTC 非线性度\n p = np.poly1d(Z_fit)\n delta = []\n for i in range(len(X_origin)):\n delta.append((Y_origin[i] - p(X_origin[i])) / p(X_origin[i]))\n delta = np.array(delta, dtype=float)\n none_linearity = np.mean(np.abs(delta))\n\n # 响应非线性度\n maxExposure = float(mainFrame.ptcPage.ptc_textCtrl1.GetValue())\n length = PTC_arr.shape[1]\n arr_exposure = np.linspace(0, maxExposure, length)\n coord_max = PTC_arr[1, :].argmax()\n response_x = arr_exposure[:coord_max + 1]\n response_y = PTC_arr[0, :coord_max + 1]\n response_fit = np.polyfit(response_x, response_y, 1)\n response_poly = np.poly1d(response_fit)\n response_yp = response_poly(response_x)\n res_non_linearity = np.mean((response_y - response_yp) / response_yp)\n\n # 显示结果\n mainFrame.ptcPage.ptc_textCtrl3.SetValue(str(round(gain, 4)))\n mainFrame.ptcPage.ptc_textCtrl4.SetValue(str(round(readout_noise, 4)))\n mainFrame.ptcPage.ptc_textCtrl5.SetValue(str(round(fullWellCapacity, 2)))\n mainFrame.ptcPage.ptc_textCtrl6.SetValue(str(np.abs(round(none_linearity, 2))))\n mainFrame.ptcPage.ptc_textCtrl7.SetValue(str(np.abs(round(res_non_linearity, 2))))\n\n # 作图\n if mainFrame.ptcPage.ptc_plot_trigger.GetValue():\n # PTC\n plt.figure(1)\n l1, = plt.plot(PTC_arr[0, :], PTC_arr[1, :], 'b*', label='Original Data')\n l2, = plt.plot(X_origin, p(X_origin), 'r--', label='Fit Curve')\n plt.legend(loc='best')\n plt.xlabel('Mean(ADU)')\n plt.ylabel('Variance')\n plt.legend(loc='best')\n plt.title('PTC')\n\n # PTC non-linearity\n plt.figure(2)\n l1, = plt.plot(X_origin, delta, 'b*', label='Non-linearity')\n l2, = plt.plot(X_origin, np.zeros(len(X_origin)), 'r--', label='zero line')\n plt.legend(loc='best')\n plt.xlabel('Mean(ADU)')\n plt.ylabel('Non-linearity')\n plt.title('PTC Non-linearity')\n\n # PTC response\n plt.figure(3)\n l1, = plt.plot(arr_exposure, PTC_arr[0, :], \"b*\", label='Original Data')\n l2, = plt.plot(response_x, response_yp, 'r--', label='Fit Curve')\n plt.legend(loc='best')\n plt.xlabel('Exposure Time / (s)')\n plt.ylabel('Signal / MeanValue')\n plt.title('PTC Response')\n\n # PTC response non-\n plt.figure(4)\n l1, = plt.plot(response_x, (response_y - response_yp) / response_yp, 'b*', label='Non-linearity')\n l2, = plt.plot(response_x, np.zeros(len(response_x)), 'r--', label='zero line')\n plt.legend(loc='best')\n plt.xlabel('Exposure Time / (s)')\n plt.ylabel('Non-linearity')\n plt.title('Response Non-linearity')\n\n plt.show()\n return\n\n\ndef dark_current_process(mainFrame):\n gain = float(mainFrame.darkcurrentPage.dc_textCtrl1.GetValue())\n time = float(mainFrame.darkcurrentPage.dc_textCtrl2.GetValue())\n bias_path = mainFrame.biasfilePath\n dark_path = mainFrame.darkfilePath\n bias_files = []\n dark_files = []\n\n if isinstance(bias_path, str):\n # 输入路径为str,即目录\n for bias_file in os.listdir(bias_path):\n bias_files.append(os.path.join(bias_path, bias_file))\n\n if isinstance(bias_path, list):\n # 输入路径为list, 即文件名\n bias_files = bias_path\n\n if isinstance(dark_path, str):\n # 输入路径为str,即目录\n for dark_file in os.listdir(dark_path):\n dark_files.append(os.path.join(dark_path, dark_file))\n\n if isinstance(dark_path, list):\n # 输入路径为list, 即文件名\n dark_files = dark_path\n\n bias_tmp = []\n dark_tmp = []\n # arr = np.zeros(load.getData(MainFrame, files[0]).shape)\n # 读取本底场图像\n for bias_file in bias_files:\n each_data = load.getData(mainFrame, bias_file)\n # 多个三维fits 堆叠待解决\n if each_data.ndim > 2:\n # arr = np.concatenate((arr, each_data))\n bias_tmp = each_data\n else:\n bias_tmp.append(each_data)\n # 读取暗场图像\n for dark_file in dark_files:\n each_data = load.getData(mainFrame, dark_file)\n # 多个三维fits 堆叠待解决\n if each_data.ndim > 2:\n # arr = np.concatenate((arr, each_data))\n dark_tmp = each_data\n else:\n dark_tmp.append(each_data)\n # 图像堆叠转为array\n bias_data = np.array(bias_tmp)\n dark_data = np.array(dark_tmp)\n\n bias_mean = load.im_combine(bias_data)\n dark_mean = load.im_combine(dark_data)\n\n # 暗电流平均值\n res_darkCurrent = np.mean(dark_mean - bias_mean)\n\n # 计算热像元\n # 取第一帧暗场图像\n dark_firstFrame = dark_data[0, :, :]\n dark_res = dark_firstFrame - bias_mean\n # 计算阈值\n threshold_val = np.mean(dark_res) + 25\n # 热像元个数\n coords = np.where(dark_res >= threshold_val)\n hot_pixel_num = coords[0].shape[0]\n count_row = np.unique(coords[0], return_counts=True)\n count_col = np.unique(coords[1], return_counts=True)\n # 热像元缺陷行列\n hot_pixel_row = count_row[0][np.where(count_row[1] > 100)]\n hot_pixel_col = count_col[0][np.where(count_col[1] > 100)]\n\n # 计算超过4倍典型值的像元比例\n coord_hotPixel_over4x = np.where(dark_res > 4 * res_darkCurrent)\n ratio_over4x = coord_hotPixel_over4x[0].shape[0] / dark_res.size\n\n # 显示结果\n mainFrame.darkcurrentPage.dc_textCtrl3.SetValue(str(round(res_darkCurrent, 6) / time * gain))\n mainFrame.darkcurrentPage.dc_textCtrl4.SetValue(str(hot_pixel_num))\n mainFrame.darkcurrentPage.dc_textCtrl5.SetValue(str(round(ratio_over4x, 5)))\n mainFrame.darkcurrentPage.dc_textCtrl6.SetValue(\",\".join(hot_pixel_row))\n mainFrame.darkcurrentPage.dc_textCtrl6.SetValue(\",\".join(hot_pixel_col))\n return\n\n\ndef prnu_process(mainFrame):\n bias_path = mainFrame.biasfilePath\n flat_path = mainFrame.flatfilePath\n bias_files = []\n flat_files = []\n if isinstance(bias_path, str):\n # 输入路径为str,即目录\n for bias_file in os.listdir(bias_path):\n bias_files.append(os.path.join(bias_path, bias_file))\n\n if isinstance(bias_path, list):\n # 输入路径为list, 即文件名\n bias_files = bias_path\n\n if isinstance(flat_path, str):\n # 输入路径为str,即目录\n for flat_file in os.listdir(flat_path):\n flat_files.append(os.path.join(flat_path, flat_file))\n\n if isinstance(flat_path, list):\n # 输入路径为list, 即文件名\n flat_files = flat_path\n\n bias_tmp = []\n flat_tmp = []\n\n # 读取本底场图像\n for bias_file in bias_files:\n each_data = load.getData(mainFrame, bias_file)\n # 多个三维fits 堆叠待解决\n if each_data.ndim > 2:\n # arr = np.concatenate((arr, each_data))\n bias_tmp = each_data\n else:\n bias_tmp.append(each_data)\n # 读取平场图像\n for flat_file in flat_files:\n each_data = load.getData(mainFrame, flat_file)\n # 多个三维fits 堆叠待解决\n if each_data.ndim > 2:\n # arr = np.concatenate((arr, each_data))\n flat_tmp = each_data\n else:\n flat_tmp.append(each_data)\n # 图像堆叠转为array\n bias_data = np.array(bias_tmp, dtype=float)\n flat_data = np.array(flat_tmp, dtype=float)\n\n # 选择两幅平均值最接近的图像\n bias1, bias2 = load.im_select(bias_data)\n flat1, flat2 = load.im_select(flat_data)\n\n bias_diff = bias1 - bias2\n flat_diff = flat1 - flat2\n\n bias_dif_var = np.var(bias_diff)\n flat_diff_var = np.var(flat_diff)\n\n res_prnu = np.sqrt(2) * np.sqrt(\n np.var(flat1) + np.var(flat2) - np.var(bias1) - np.var(bias2) - (flat_diff_var - bias_dif_var)) \\\n / (np.mean(flat1) + np.mean(flat2) - np.mean(bias1) - np.mean(bias2))\n\n mainFrame.prnuPage.prnu_textCtrl1.SetValue(str(round(res_prnu, 4)))\n return\n","repo_name":"bitursa/CCD-Data-Process","sub_path":"core/item.py","file_name":"item.py","file_ext":"py","file_size_in_byte":15309,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"}
+{"seq_id":"72799230649","text":"'''\nWhile DFS uses a set, for BFS we use a queue\nFor each items that we pop off the queue, we find its unvisied neighbors and add them to the end of the queue\n'''\n\nfrom collections import deque\n\ndef BFS(graph,start,visited={}):\n queue = deque([start])\n\n while queue:\n vertex = queue.popleft()\n visited.add(vertex)\n for neighbor in graph[vertex]:\n if neighbor not in visited:\n queue.append(neighbor)\n \n return visited\n","repo_name":"LennyGonz/LeetCode-Questions","sub_path":"Patterns/Tree_Breadth_First_Search/0_Understand_BFS.py","file_name":"0_Understand_BFS.py","file_ext":"py","file_size_in_byte":442,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"}
+{"seq_id":"2885573106","text":"\"\"\"\nInsertion sort\n\n\nInsertion sort involves finding the right place for a given element in a sorted list. So in beginning we compare the first two elements \nand sort them by comparing them. Then we pick the third element and find its proper position among the previous two sorted elements. \nThis way we gradually go on adding more elements to the already sorted list by putting them in their proper position.\n\n\"\"\"\n\"\"\"\npseudo code\n\nprocedure insertionSort( A : array of items )\nint holePosition\n int valueToInsert\n\t\n for i = 1 to length(A) inclusive do:\n\t\n /* select value to be inserted */\n valueToInsert = A[i]\n holePosition = i\n \n /*locate hole position for the element to be inserted */\n\t\t\n while holePosition > 0 and A[holePosition-1] > valueToInsert do:\n A[holePosition] = A[holePosition-1]\n holePosition = holePosition -1\n end while\n\t\t\n /* insert the number at hole position */\n A[holePosition] = valueToInsert\n \n end for\n\t\nend procedure\n\"\"\"\n\n\n#Algorithm\ndef insertion_sort(InputList):\n for i in range(1, len(InputList)):\n j = i-1\n nxt_element = InputList[i]\n\t\t\n while (InputList[j] > nxt_element) and (j >= 0):\n InputList[j+1] = InputList[j]\n j=j-1\n InputList[j+1] = nxt_element\n\nunsorted_list = []\nnum= int(input(\"Enter number of elements\"))\n\nprint(f'Enter {num} elements')\n\nfor i in range(num):\n data = int(input(f'{i+1}. '))\n unsorted_list.append(data)\nprint(f'Unsorted list is{unsorted_list}')\ninsertion_sort(list)\nprint(f'Sorted list is {merge_sort(unsorted_list)}')\n","repo_name":"DSCTOCE/Algorithms","sub_path":"insertionsort.py","file_name":"insertionsort.py","file_ext":"py","file_size_in_byte":1615,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"77"}
+{"seq_id":"37130806345","text":"def selection(x):\n n = len(x)\n for i in range(0, n-1):\n ordenado = True\n for j in range(i+1, n):\n if x[i]>x[j]:\n x[i], x[j] = x[j], x[i]\n ordenado = False\n if ordenado:\n return x\n return x\nx = [2, 7, 8, 1, 3, 6]\nprint(\"Fim do algoritmo -\", selection(x))\n","repo_name":"marceloarantes19/estruturaDeDados2022","sub_path":"ordenacaoNaoOtima/selectionSort2.py","file_name":"selectionSort2.py","file_ext":"py","file_size_in_byte":290,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"77"}
+{"seq_id":"9699146789","text":"import requests # , time, xml\nfrom bs4 import BeautifulSoup\nimport datetime as dt\nimport re, feedparser\n\n\ndef fix_string(text, k):\n text = text.strip()\n pattern = r'[a-z][A-Z]|\\d[A-Z]|[а-я][А-Я]|[а-я][a-z]|\\d[a-z]|\\d[а-я]|[а-я][A-Z]|[a-z][а-я]'\n result_find = re.findall(pattern, text)\n for match in result_find:\n text = text.replace(match, ' '.join(match))\n if 'https://www.kommersant.ru' in text and 'Коммерсант' in k:\n text = text.split('https://www.kommersant.ru')[0]\n if text.startswith('Темная Удмуртия:'):\n text = text.lstrip('Темная Удмуртия:')\n text = text.strip()\n return text\n\ndef feeding_date(url):\n feeds = feedparser.parse(url)\n\n working_date = dt.datetime.today() - dt.timedelta(7)\n i = 1\n for feed in feeds.entries:\n datetime_object = dt.datetime.strptime(str(feed.published).rstrip(\" GMT\"), '%a, %d %b %Y %H:%M:%S')\n # published_date = dt.datetime.today().strftime('%a, %d %B %Y %H:%M:%S')\n if datetime_object < working_date:\n break\n i += 1\n return i\n\ndef parsing_udm_gov(url):\n i = 0\n udm_news = []\n for k, v in url.items():\n count = feeding_date(v)\n resp = requests.get(v)\n soup_gov = BeautifulSoup(resp.text, 'lxml')\n description_news = []\n for tagD in soup_gov.find_all('description')[:count]:\n content_news = fix_string(tagD.text, k)\n if 100 < len(content_news) and content_news.startswith('Forwarded From') == False:\n\n if 'Коммерсант' in k and 'бизнес-завтрак' not in content_news \\\n and 'бизнес-пикник' not in content_news \\\n and 'кругл' not in content_news \\\n and 'стол' not in content_news:\n # print(k)\n i += 1\n description_news.append(content_news)\n elif 'kommersant' not in content_news and 'Коммерсант' not in k:\n i += 1\n description_news.append(content_news)\n\n\n\n\n for descript in description_news:\n descript = descript.replace('\"', '\\'').rstrip(']]>')\n article = (descript[:35] + '...') # if len(descript) > 75 else (descript[:25] + '...')\n pos = article.find('http')\n article = article[:pos - 1] + '...'\n article = re.sub(r'http[^(\\s)]+', '...', article)\n date = dt.datetime.now().strftime(\"%d-%m-%y %H:%M:%S\")\n content = (article, descript, k, date)\n udm_news.append(content)\n return udm_news\n\n","repo_name":"krastykovyaz/vk_wall_poster","sub_path":"parsing_news.py","file_name":"parsing_news.py","file_ext":"py","file_size_in_byte":2688,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"}
+{"seq_id":"24386353106","text":"# coding=utf-8\n'''\n方法1:从nums内第三个数开始,插入到之前结果的缝隙中,组成新的排列,知道numds内数字全部插完为止\neg:前两个数全排列为[[|0|1|], [|1|0|]],第三个数可以在|位置插入,一共用六种组合】\n\n方法2:定义一个used数组,记录当前数字是否在item中,如果不在,插入到item中,若item长度等于nums,说明所有数字插入完毕,将\nitem加到结果res中,item弹出最后一个数字,继续遍历。这种方法会首先固定第一个数字,将后面的数字全排列,\n'''\n\n\ndef permute(nums):\n \"\"\"\n :type nums: List[int]\n :rtype: List[List[int]]\n \"\"\"\n if not nums:\n return []\n n = nums.__len__()\n if n == 1:\n return nums\n buf_ans = [[nums[0], nums[1]], [nums[1], nums[0]]]\n res = buf_ans\n index = 2\n for v in nums[2:]:\n res = []\n for buf in buf_ans:\n for i in range(index + 1):\n res.append(buf[:i] + [v] + buf[i:])\n index += 1\n buf_ans = res\n return res\n\n\ndef permute1(nums):\n def helper(num, used, item, res):\n if item.__len__() == num.__len__():\n res.append(item[:])\n return\n for i in range(num.__len__()):\n if not used[i]:\n used[i] = True\n item.append(num[i])\n helper(num, used, item, res)\n item.pop()\n used[i] = False\n\n if not nums:\n return nums\n res = []\n used = [False for _ in range(nums.__len__())]\n item = []\n helper(nums, used, item, res)\n return res\n\ndef permute2(nums):\n import itertools\n return [i for i in itertools.permutations(nums)]\n\n\nprint(permute2([0, 1, 2]))\n","repo_name":"ChangXiaodong/Leetcode-solutions","sub_path":"4/46-Permutations.py","file_name":"46-Permutations.py","file_ext":"py","file_size_in_byte":1754,"program_lang":"python","lang":"zh","doc_type":"code","stars":4,"dataset":"github-code","pt":"77"}
+{"seq_id":"26180219904","text":"from datetime import datetime, timedelta\n\nfrom flask import current_app as app\nfrom nessie.externals import canvas_data, redshift, s3\nfrom nessie.jobs.background_job import BackgroundJob, BackgroundJobError\nfrom nessie.lib import berkeley\nfrom nessie.lib.util import get_s3_canvas_daily_path\nimport pandas as pd\n\n\n\"\"\"Logic for generate canvas data catalog job.\"\"\"\n\n\nclass RefreshCanvasDataCatalog(BackgroundJob):\n\n def run(self):\n # Retrieve latest schema definitions from Canvas data API\n response = canvas_data.get_canvas_data_schema()\n external_schema = app.config['REDSHIFT_SCHEMA_CANVAS']\n redshift_iam_role = app.config['REDSHIFT_IAM_ROLE']\n canvas_schema = []\n\n # Parse and isolate table and column details\n for key, value in response['schema'].items():\n for column in value['columns']:\n # Not every column has description and length.\n description = None\n if 'description' in column:\n description = column['description']\n\n length = None\n if 'length' in column:\n length = column['length']\n\n canvas_schema.append([\n value['tableName'],\n column['name'],\n column['type'],\n description,\n length,\n ])\n # Create a dataframe\n schema_df = pd.DataFrame(canvas_schema)\n schema_df.columns = [\n 'table_name',\n 'column_name',\n 'column_type',\n 'column_description',\n 'column_length',\n ]\n\n # The schema definitions received from Canvas are Redshift compliant. We update\n # cetain column types to match Glue and Spectrum data types.\n schema_df['glue_type'] = schema_df['column_type'].replace({\n 'enum': 'varchar',\n 'guid': 'varchar',\n 'text': 'varchar(max)',\n 'date': 'timestamp',\n 'datetime': 'timestamp',\n })\n\n schema_df['transformed_column_name'] = schema_df['column_name'].replace({\n 'default': '\"default\"',\n 'percent': '\"percent\"',\n })\n # Create Hive compliant storage descriptors\n canvas_external_catalog_ddl = self.generate_external_catalog(external_schema, schema_df)\n\n # Clean up and recreate refreshed tables on Glue using Spectrum\n redshift.drop_external_schema(external_schema)\n redshift.create_external_schema(external_schema, redshift_iam_role)\n\n if redshift.execute_ddl_script(canvas_external_catalog_ddl):\n app.logger.info('Canvas schema creation job completed.')\n else:\n app.logger.error('Canvas schema creation job failed.')\n raise BackgroundJobError('Canvas schema creation job failed.')\n\n self.verify_external_data_catalog()\n return 'Canvas external schema created and verified.'\n\n def generate_external_catalog(self, external_schema, schema_df):\n canvas_path = self.generate_canvas_path()\n canvas_tables = schema_df.table_name.unique()\n s3_canvas_data_url = 's3://' + app.config['LOCH_S3_BUCKET'] + '/' + canvas_path\n s3_requests_url = 's3://{}/{}'.format(app.config['LOCH_S3_BUCKET'], berkeley.s3_canvas_data_path_current_term())\n external_table_ddl = ''\n\n for table in canvas_tables:\n table_columns = schema_df.loc[schema_df['table_name'] == table].reset_index()\n storage_descriptor_df = table_columns[['transformed_column_name', 'glue_type']]\n\n create_ddl = 'CREATE EXTERNAL TABLE {}.{}\\n(\\n'.format(external_schema, table)\n storage_descriptors = ''\n for index in storage_descriptor_df.index:\n storage_descriptors = '{} {} {}'.format(\n storage_descriptors,\n storage_descriptor_df['transformed_column_name'][index],\n storage_descriptor_df['glue_type'][index],\n )\n if (index != (len(storage_descriptor_df.index) - 1)):\n storage_descriptors = storage_descriptors + ',\\n'\n\n table_properties = '\\n) \\nROW FORMAT DELIMITED FIELDS \\nTERMINATED BY \\'\\t\\' \\nSTORED AS TEXTFILE'\n if (table != 'requests'):\n table_location = '\\nLOCATION \\'{}/{}\\''.format(s3_canvas_data_url, table)\n else:\n table_location = '\\nLOCATION \\'{}/{}\\''.format(s3_requests_url, table)\n\n external_table_ddl = '{}\\n{}{}{}{};\\n\\n'.format(\n external_table_ddl,\n create_ddl,\n storage_descriptors,\n table_properties,\n table_location,\n )\n\n # For debugging process, export to external_table_ddl to file to get a well formed SQL template for canvas-data\n return external_table_ddl\n\n # Gets an inventory of all the tables by tracking the S3 canvas-data daily location and run count verification to ensure migration was successful\n def verify_external_data_catalog(self):\n s3_client = s3.get_client()\n bucket = app.config['LOCH_S3_BUCKET']\n external_schema = app.config['REDSHIFT_SCHEMA_CANVAS']\n prefix = self.generate_canvas_path()\n app.logger.info(f'Daily path = {prefix}')\n directory_names = []\n s3_objects = s3_client.list_objects_v2(Bucket=bucket, Prefix=prefix)\n for object_summary in s3_objects['Contents']:\n # parse table names from the S3 object URLs\n directory_names.append(object_summary['Key'].split('/')[3])\n\n # Get unique table names from S3 object list\n tables = sorted(list(set(directory_names)))\n # Ensure that all tables required by downstream jobs have data present in S3.\n required_tables = [\n 'assignment_dim',\n 'assignment_override_dim',\n 'assignment_override_user_rollup_fact',\n 'course_dim',\n 'course_score_fact',\n 'course_section_dim',\n 'enrollment_dim',\n 'enrollment_fact',\n 'enrollment_term_dim',\n 'pseudonym_dim',\n 'submission_dim',\n 'submission_fact',\n 'user_dim',\n ]\n for required_table in required_tables:\n if required_table not in tables:\n raise BackgroundJobError(f'No data in S3 for external table {required_table}: aborting job.')\n\n app.logger.info(f'Tables to be verified : {tables}')\n for table in tables:\n result = redshift.fetch(f'SELECT COUNT(*) FROM {external_schema}.{table}')\n if result and result[0] and result[0]['count']:\n count = result[0]['count']\n app.logger.info(f'Verified external table {table} ({count} rows).')\n else:\n raise BackgroundJobError(f'Failed to verify external table {table}: aborting job.')\n app.logger.info(f'Canvas verification job completed successfully for {len(tables)} tables')\n return True\n\n def generate_canvas_path(self):\n canvas_path = get_s3_canvas_daily_path()\n if not s3.get_keys_with_prefix(canvas_path):\n canvas_path = get_s3_canvas_daily_path(datetime.now() - timedelta(days=1))\n if not s3.get_keys_with_prefix(canvas_path):\n raise BackgroundJobError('No timely Canvas data found, aborting')\n else:\n app.logger.info('Falling back to yesterday\\'s Canvas data')\n return canvas_path\n","repo_name":"ets-berkeley-edu/nessie","sub_path":"nessie/jobs/refresh_canvas_data_catalog.py","file_name":"refresh_canvas_data_catalog.py","file_ext":"py","file_size_in_byte":7824,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"77"}
+{"seq_id":"32765980130","text":"\n\ndef part1(file):\n total = 0\n with open(file) as f:\n while True:\n line = f.readline().strip()\n if not line:\n break\n left, right = line.split(\",\")\n ll, lr = left.split(\"-\")\n rl, rr = right.split(\"-\")\n if int(ll) >= int(rl) and int(lr) <= int(rr) or int(ll) <= int(rl) and int(lr) >= int(rr):\n total += 1\n\n return total\n\n\ndef part2(file):\n total = 0\n with open(file) as f:\n while True:\n line = f.readline().strip()\n if not line:\n break\n left, right = line.split(\",\")\n ll, lr = left.split(\"-\")\n rl, rr = right.split(\"-\")\n ll, lr, rl, rr = int(ll), int(lr), int(rl), int(rr)\n if rl <= lr <= rr \\\n or rl <= ll <= rr \\\n or ll >= rl and lr <= rr \\\n or ll <= rl and lr >= rr:\n total += 1\n\n return total\n\n\ndef main():\n print(\"the answer is \")\n print(part1(\"data/test_data\"))\n print(part1(\"data/real_data\"))\n print(part2(\"data/test_data\"))\n print(part2(\"data/real_data\"))\n\n\nif __name__ == \"__main__\":\n main()\n\n","repo_name":"od107/aoc22","sub_path":"day4/pairs.py","file_name":"pairs.py","file_ext":"py","file_size_in_byte":1211,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"}
+{"seq_id":"11673071473","text":"import threading\nimport zmq\nimport time\nimport json\nimport sys\nimport os\nsys.path.append(\"..\")\n# from controller.block_chain import *\n\n\nfrom hashlib import sha256\nimport json\nimport time\n\n\nclass Block(object):\n\n def __init__(self, transactions, previous_hash):\n self.transactions = transactions\n # self.timestamp = timestamp\n self.previous_hash = previous_hash\n self.nonce = 0\n\n def compute_hash(self):\n # A function that return the hash of the block contents.\n block_string = str(self.transactions) + str(self.nonce)\n # block_string = json.dumps(self._dict_, sort_keys = True)\n return str(sha256(block_string.encode()).hexdigest())\n\n\nclass MessageThread(threading.Thread):\n\n def __init__(self, threadID, name, counter):\n threading.Thread.__init__(self)\n self.threadID = threadID\n self.name = name\n self.counter = counter\n self.endpoint = \"tcp://127.0.0.1:5002\"\n\n def run(self): # 把要执行���代码写到run函数里面 线程在创建后会直接运行run函数\n context = zmq.Context()\n client = context.socket(zmq.SUB)\n client.connect(self.endpoint)\n client.setsockopt(zmq.SUBSCRIBE, b'') # Terminate early\n while True:\n print(\"5002 waiting...\")\n rep = client.recv_json()\n reply = json.loads(rep)\n print(\"5002 received: \", reply)\n block = Block(reply, previous_hash=0)\n hash_result, noce = self.proof_of_work(block=block)\n for key in reply.keys():\n reply[key]['noce'] = str(noce)\n reply[key]['hash_cur'] = str(hash_result)\n MessageThread.send_finish_status(reply)\n\n def proof_of_work(self, block):\n block.nonce = 0\n computed_hash = block.compute_hash()\n while not computed_hash.startswith('O' * 5):\n block.nonce += 1\n computed_hash = block.compute_hash()\n # print(computed_hash)\n if computed_hash.startswith('0' * 5):\n break\n print(' 最终结果:{}, 随机数:{}'.format(computed_hash, block.nonce))\n return computed_hash, block.nonce\n\n @staticmethod\n def send_finish_status(block_object):\n context = zmq.Context()\n socket = context.socket(zmq.REQ)\n socket.connect(\"tcp://127.0.0.1:5003\")\n block_dict = {}\n block_dict['finished'] = block_object\n block_dict['uid'] = LoginThread.uid\n block_string = json.dumps(block_dict)\n print(\"5003 send: \", block_string)\n socket.send_json(json.dumps(block_dict))\n socket.recv_string()\n socket.close()\n\n\nclass Client(threading.Thread):\n\n def __init__(self, threadID, name, counter):\n threading.Thread.__init__(self)\n self.threadID = threadID\n self.name = name\n self.counter = counter\n self.endpoint = \"tcp://127.0.0.1:5004\"\n\n def run(self):\n context = zmq.Context()\n client = context.socket(zmq.SUB)\n client.connect(self.endpoint)\n client.setsockopt(zmq.SUBSCRIBE, b'')\n while True:\n data = client.recv_json()\n reply = json.loads(data)\n print(\"5004 received\", reply)\n for key in reply.keys():\n load_data = {}\n if os.path.exists('.\\\\data\\\\block\\\\pid-' + str(reply[key]['pid']) + '.txt'):\n with open('.\\\\data\\\\block\\\\pid-' + str(reply[key]['pid']) + '.txt', 'r', encoding='utf-8') as f:\n load_data = json.load(f)\n if key in load_data:\n continue\n else:\n load_data[key] = reply[key]\n print(\"write data: \", load_data[key])\n with open('.\\\\data\\\\block\\\\pid-' + str(reply[key]['pid']) + '.txt', 'w', encoding=\"utf-8\") as f:\n f.write(json.dumps(load_data, indent=2, ensure_ascii=False))\n else:\n load_data[key] = reply[key]\n with open('.\\\\data\\\\block\\\\pid-' + str(reply[key]['pid']) + '.txt', 'w', encoding=\"utf-8\") as f:\n f.write(json.dumps(load_data, indent=2, ensure_ascii=False))\n context = zmq.Context()\n socket = context.socket(zmq.REQ)\n socket.connect(\"tcp://127.0.0.1:5001\")\n block_dict = {'ok': 'true', 'uid': LoginThread.uid}\n block_string = json.dumps(block_dict)\n print(\"5003 send: \", block_string)\n socket.send_json(json.dumps(block_dict))\n socket.recv_string()\n socket.close()\n\n\nclass LoginThread(threading.Thread):\n uid = \"\"\n\n def __init__(self, threadID, name, counter):\n threading.Thread.__init__(self)\n self.threadID = threadID\n self.name = name\n self.counter = counter\n self.endpoint = \"tcp://127.0.0.1:5006\"\n\n def login(self):\n context = zmq.Context()\n socket = context.socket(zmq.REQ)\n socket.connect(\"tcp://127.0.0.1:5005\")\n while True:\n data_dict = {}\n for filename in os.listdir(os.getcwd() + \"\\\\data\\\\block\"):\n with open(\".\\\\data\\\\block\\\\\" + filename, 'r', encoding='utf-8') as f:\n load_data = json.load(f)\n for key in load_data.keys():\n data_dict[load_data[key]['pid']] = load_data\n if data_dict:\n data_dict['uid'] = self.uid\n socket.send_json(json.dumps(data_dict))\n else:\n socket.send_json(json.dumps({'nothing': \"true\"}))\n rec = socket.recv_json()\n print(\"my received: \", rec)\n time.sleep(60)\n\n def run(self): # 把要执行的代码写到run函数里面 线程在创建后会直接运行run函数\n context = zmq.Context()\n client = context.socket(zmq.SUB)\n client.connect(self.endpoint)\n client.setsockopt(zmq.SUBSCRIBE, b'') # Terminate early\n while True:\n rep = client.recv_json()\n reply = json.loads(rep)\n print(\"5006 received: \", reply)\n for key in reply.keys():\n with open('.\\\\data\\\\block\\\\pid-' + key + '.txt', 'w', encoding=\"utf-8\") as f:\n f.write(json.dumps(reply[key], indent=2, ensure_ascii=False))\n client.close()\n\n\nif __name__ == \"__main__\":\n login = LoginThread(1, \"Thread-1\", 1)\n LoginThread.uid = input(\"your uid\")\n login_thread = threading.Thread(target=login.login)\n login_thread.start()\n login.start()\n handler2 = MessageThread(3, \"Thread-3\", 1)\n handler2.start()\n handler3 = Client(4, \"Thread-4\", 1)\n handler3.start()\n","repo_name":"kelekle/simple_block_chain","sub_path":"client/subscriber.py","file_name":"subscriber.py","file_ext":"py","file_size_in_byte":6776,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"77"}
+{"seq_id":"70109434490","text":"# Modelo\n# Controllar los datos\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\nclass Person: \n def __init__(self, name, email):\n self.name = name \n self.email = email \n\nclass Student(Person):\n def __init__(self, name, last_name, phone, email, type_help):\n Person.__init__(self, name, email)\n self.last_name = last_name\n self.phone = phone\n self.type_help = type_help \n\nclass Voluntary(Person):\n def __init__(self, name, type_help, email, phone, courses):\n Person.__init__(self, name, email)\n self.courses = courses\n self.phone = phone\n self.type_help = type_help\n\nclass Donation(Person):\n def __init__(self, name, email, donated_money, others_donated):\n Person.__init__(self, name, email)\n self.donated_money = donated_money\n self.others_donated = others_donated\n \nclass Model:\n FILE_STUDENTS = \"docs//students.csv\"\n FILE_VOLUNNTEERING = \"docs//volunteering.csv\"\n FILE_INSTITUTIONS = \"docs//institutions.csv\"\n FILE_DONATIONS = \"docs//donations.csv\"\n FILE_INFORMATION = \"docs//information.csv\"\n\n def __init__(self):\n self.df_students = pd.read_csv(self.FILE_STUDENTS, encoding=\"latin1\")\n self.df_volunnteering = pd.read_csv(self.FILE_VOLUNNTEERING, encoding=\"latin1\")\n self.df_donations = pd.read_csv(self.FILE_DONATIONS, encoding=\"latin1\")\n self.df_institutions = pd.read_csv(self.FILE_INSTITUTIONS, encoding=\"latin1\")\n self.df_information = pd.read_csv(self.FILE_INFORMATION, encoding=\"latin1\")\n \n # --> Métodos para agregar datos nuevos a las \"bases de datos\"\n def add_new_student(self, student : Student):\n new_registry = {\n \"nombre\" : student.name,\n \"apellido\" : student.last_name,\n \"correo\" : student.email,\n \"numero\" : student.phone,\n \"tipo_ayuda_buscada\" : student.type_help\n }\n\n self.df_students = self.df_students.append(new_registry, ignore_index=True)\n self.df_students.to_csv(self.FILE_STUDENTS, index=False)\n return \"Información guardada con éxito\"\n\n def add_new_voluntary(self, voluntary : Voluntary):\n new_registry = {\n \"nombre_voluntariado\" : voluntary.name,\n \"tipo_ayuda\" : voluntary.phone,\n \"correo\" : voluntary.email,\n \"numero_telefonico\" : voluntary.phone,\n \"cuantcursos\" : voluntary.courses\n }\n\n self.df_volunnteering = self.df_volunnteering.append(new_registry, ignore_index=True)\n self.df_volunnteering.to_csv(self.FILE_VOLUNNTEERING, index=False)\n return \"Información guardada con éxito\"\n\n def add_new_donnation(self, donation : Donation):\n new_registry = {\n \"name\" : donation.name,\n \"email\" : donation.email,\n \"donated_money\" : donation.donated_money,\n \"others_donated\" : donation.others_donated,\n }\n\n self.df_donations = self.df_donations.append(new_registry, ignore_index=True)\n self.df_donations.to_csv(self.FILE_DONATIONS, index=False)\n return \"Información guardada con éxito\"\n\n def add_count_visite(self):\n self.df_information[\"veces_estudiante_interaccion\"][0] += 1\n self.df_information.to_csv(self.FILE_INFORMATION, index=False)\n\n # --> Métodos para llevar mostrar información en específica\n def show_recomendation_complete(self, type_help):\n total_elements = self.df_institutions.shape[0]\n for i in range(total_elements):\n if self.df_institutions[\"tipobeca\"][i] == type_help or self.df_institutions[\"tipobeca\"][i] == \"Mixto\":\n print(f\"-> {self.df_institutions['nombre'][i]} - Descripcion:\\n{self.df_institutions['detalles'][i]}\\n\")\n\n def show_other_recomendations(self):\n total_elements = self.df_institutions.shape[0]\n for i in range(total_elements):\n if self.df_institutions[\"tipobeca\"][i] != \"Completa\" or self.df_institutions[\"tipobeca\"][i] != \"Parcial\":\n print(f\"-> {self.df_institutions['nombre'][i]} - Descripcion:\\n{self.df_institutions['detalles'][i]}\\n\")\n\n # --> Métodos para llevar a cabo el análisis de datos\n def get_information_general(self):\n info = f\"\"\"\n Estudiantes que ingresaron a la pagina: {self.df_information[\"veces_estudiante_interaccion\"][0]}\n Cantidad de estudiantes registrados: {self.df_students.shape[0]} \n \"\"\"\n return info\n\n def get_donations_recived(self):\n info = f\"\" \n \n money = 0\n elements = self.df_donations.shape[0]\n # Mostrar dinero donado\n for i in range(elements): money += int(self.df_donations[\"donated_money\"][i])\n info += f\"\\nDinero donado en total: {money}\\n\"\n\n # Mostrar elementos donados\n info += f\"\\nElementos donados:\\n\"\n for i in range(elements): \n donated_elems = self.df_donations[\"others_donated\"][i].split(\"+\")\n for elem in donated_elems: \n if not len(elem) == 0:\n info += f\"-> {elem}\\n\"\n\n return info\n\n def show_grafic(self, type):\n prueba = {\"Cantidad que entraron a la página\" : self.df_information[\"veces_estudiante_interaccion\"][0],\n \"Estudiantes registrados\" : self.df_students.shape[0]} if type == 1 else {\"Estudiantes\" : self.df_students.shape[0],\n \"Voluntareados\" : self.df_volunnteering.shape[0],\n \"Instituciones\" : self.df_institutions.shape[0]}\n\n dates = list(prueba.keys())\n values = list(prueba.values())\n\n plt.bar(dates, values, color = \"red\", width = 0.4)\n\n plt.title(\"Estudiantes ingresados\")\n plt.ylabel(\"Cantidad Control\")\n plt.xlabel(\"Tipo de interacción\")\n plt.show() \n","repo_name":"CrisLayB/ProyectoHIODS","sub_path":"model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":5851,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"}
+{"seq_id":"15703393562","text":"import numpy as np\n\nn = 2\nN = 10\n\nxes = np.random.random_sample((N, n)) # N массивов по n в длину\nyes = np.random.random_sample((N,)) # Один длинный массив длины N\n\n\nprint(xes)\nprint(\"xes\")\nprint(yes)\nprint(\"yes\")\ny_mean = np.sum(yes) / N # средний от всех y \n\n# x1_mean = np.sum(xes.transpose()[0]) / N\n# x2_mean = np.sum(xes.transpose()[1]) / N\n\n# 1 относится к тому, что массив получится одномерный\nx_1_means = np.array(\n [sum(xes.transpose()[i]) / N for i in range(n)]) # считаем среднее от всех координат x по отдельности. \n\nxy_1_means = np.array(\n [np.sum(yes * xes.transpose()[i_s]) / N for i_s in range(n)]) # \n\n# 2 относится к тому, что массив получится двумерный. Получается матричка размера n на n.\nx_2_means = np.array([np.array([\n np.sum(xes.transpose()[i] * xes.transpose()[i_s]) / N\n for i in range(n)]) for i_s in range(n)]) # \n\n\ndef matrix_function(i, i_s):\n if i_s == 0:\n # мы попали в самое первое уравнение\n if i == 0:\n return 1\n else:\n return x_1_means[i - 1]\n else:\n # мы попали в более сложный случай\n if i == 0:\n return x_1_means[i_s - 1]\n else:\n return x_2_means[i_s - 1][i - 1]\n\n\ndef right_part_function(i_s):\n if i_s == 0:\n return y_mean\n else:\n return xy_1_means[i_s - 1]\n\n# n+1 поскольку у нас 1ое уравнение есть и все остальные, которых n\n\n# для нахождения a_0 и a_i нужно будет обращать main_matrix\nmain_matrix = np.array([np.array([matrix_function(i, i_s) for i in range(n + 1)]) for i_s in range(n + 1)])\n# напишем же правую часть уравнения\nright_part = np.array([right_part_function(i_s) for i_s in range(n + 1)])\n# Это строчка вида [a_0, a_1, a_2, ...]\nour_result = np.linalg.solve(main_matrix, right_part)\nprint(our_result)","repo_name":"AlexandraKalinina/vkr","sub_path":"liner.py","file_name":"liner.py","file_ext":"py","file_size_in_byte":2192,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"}
+{"seq_id":"33427935330","text":"import os\nimport sys\nsys.path.append(\".\")\n\nimport torch\nfrom torch.utils.data import DataLoader, random_split\n\nfrom ImageAestheticsGANs.AADB.AADB import AADB_binaries\nfrom tqdm import tqdm\nfrom ImageAestheticsGANs.models.ResNet18 import RegressionNetwork\nimport torch.nn as nn\nfrom ImageAestheticsGANs.loss_functions.focal_loss import FocalLoss\nimport argparse\n\nparser = argparse.ArgumentParser(description=\"Arguments for training loop\")\nparser.add_argument('--batch_size', type=int, help=\"Number of batches\")\nparser.add_argument('--epochs', type=int, default=200, help=\"Number of epochs\")\nparser.add_argument('--image_size', type=int,default=64, help=\"Image dimensions\")\nparser.add_argument('--load', type=bool, default=False, help=\"Loading model?\")\nparser.add_argument('--lr', type=float, default=0.0002, help=\"Learning rate\")\nparser.add_argument('--ckpt', type=str, help=\"Checkpoint for loading\")\nparser.add_argument('--beta', type=float, default=0.5, help=\"Beta for Adam optimizer\")\nparser.add_argument('--optim', type=str, default='sgd', help=\"Optimizer for the algorithm (adam/sgd)\")\nparser.add_argument('--criterion', type=str, default='bcelogits', help=\"Loss function (cross/bcelogits/focal)\")\nparser.add_argument('--results', type=str, help=\"Results folder\")\nargs = parser.parse_args()\n\nbatch_size = args.batch_size\nepochs = args.epochs\nload = args.load\nckpt = args.ckpt\nlr = args.lr\nbeta = args.beta\nimage_size = args.image_size\n\ndata_path = 'F:\\Projects\\Disertatie\\ImageAestheticsGANs\\AADB'\n\naadb = AADB_binaries(data_path, image_size)\naadb_test = AADB_binaries(data_path, image_size, test=True)\nn_classes = aadb.get_num_classes()\n\nval_size = 500\ntrain_size = len(aadb) - val_size\n\ntrain_ds, val_ds = random_split(aadb, [train_size, val_size])\ntrain_dl = DataLoader(train_ds, batch_size, shuffle=True, num_workers=0, pin_memory=True, drop_last=True)\nvalid_dl = DataLoader(val_ds, batch_size, num_workers=0, pin_memory=True)\n\n\ndef get_default_device():\n '''Pick GPU if available'''\n if torch.cuda.is_available():\n return torch.device('cuda')\n else:\n return torch.device('cpu')\n\n\ndef to_device(data, device):\n '''Move tensors to chosen device'''\n if isinstance(data, (list, tuple)):\n return [to_device(x, device) for x in data]\n return data.to(device, non_blocking=True).to(torch.float32)\n\n\nclass DeviceDataLoader():\n def __init__(self, dl, device):\n self.dl = dl\n self.device = device\n\n def __iter__(self):\n for b in self.dl:\n yield to_device(b, self.device)\n\n def __len__(self):\n return len(self.dl)\n\n\ndevice = get_default_device()\ntrain_dl = DeviceDataLoader(train_dl, device)\nvalid_dl = DeviceDataLoader(valid_dl, device)\n\nif args.criterion == \"bcelogits\":\n criterion = nn.BCEWithLogitsLoss()\nelif args.criterion == \"focal\":\n criterion = FocalLoss()\nelif args.criterion == \"cross\":\n criterion = nn.CrossEntropyLoss()\n\nmodel = RegressionNetwork(backbone='resnet18', num_attributes=n_classes, pretrained=True)\nmodel = model.to('cuda')\n\nif args.optim == 'adam':\n opt = torch.optim.Adam(model.parameters(), lr=lr, betas=(beta, 0.999))\nelif args.optim == 'sgd':\n opt = torch.optim.SGD(params=model.parameters(), lr=lr, momentum=0.9)\n\nif load:\n print(\"Loading checkpoint...\")\n\n checkpoint = torch.load(ckpt)\n last_epoch = checkpoint['epoch'] + 1\n\n train_losses = checkpoint['train_losses']\n val_losses = checkpoint['val_losses']\n\n loss = train_losses[-1]\n\n model.load_state_dict(checkpoint['model'])\n opt.load_state_dict(checkpoint['optimizer'])\n model.eval()\n\nelse:\n last_epoch = 0\n\n train_losses = []\n val_losses = []\n\nfor epoch in range(last_epoch, epochs):\n\n # Training Phase\n model.train()\n\n pbar = tqdm(enumerate(train_dl), total=len(train_dl))\n for batch, (images, labels) in pbar:\n opt.zero_grad()\n\n outputs = model(images)\n loss = criterion(outputs, labels)\n loss.backward()\n\n # predicted = outputs.detach() > 0.5\n\n # correct = (predicted == labels.type(torch.uint8))\n\n # accuracy = correct.sum().item() / (len(correct) * n_classes)\n\n opt.step()\n\n # pbar.set_description(\"Epoch {}, Loss: {:.4f}, Accuracy: {:.4f}\".format(\n # epoch, float(loss), float(accuracy)))\n pbar.set_description(\"Epoch {}, Loss: {:.4f}\".format(\n epoch, float(loss)))\n train_losses.append(loss)\n\n # Evaluation Phase\n model.eval()\n correct = 0\n total = 0\n pbar = tqdm(enumerate(valid_dl), total=len(valid_dl))\n for batch, (images, labels) in pbar:\n with torch.no_grad():\n outputs = model(images)\n val_loss = loss = criterion(outputs, labels)\n\n predicted = outputs > 0.5\n\n correct += (predicted == labels.type(torch.uint8)).sum().item()\n total += len(labels) * n_classes\n\n accuracy = correct / total\n val_losses.append(val_loss)\n print('Accuracy of all test images: %.3f' % (accuracy * 100))\n if epoch % 10 == 0:\n filename = \"{}_epoch_{}_accuracy_{:.4f}_.pt\".format('AADB', epoch, accuracy)\n torch.save({'epoch': epoch,\n 'model': model.state_dict(),\n 'optimizer': opt.state_dict(),\n 'train_losses': train_losses,\n 'val_losses': val_losses\n }, os.path.join(args.results, filename))\n load = False\n","repo_name":"PetreBogdan/ImageAestheticGANs","sub_path":"ImageAestheticsGANs/resnet18_train.py","file_name":"resnet18_train.py","file_ext":"py","file_size_in_byte":5430,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"}
+{"seq_id":"35067457979","text":"import inspect\nimport tempfile\nimport os.path\nimport urllib.request\nimport textwrap\nfrom collections import namedtuple\nimport random\nimport numpy as np\nimport uuid\nfrom typing import List\nimport yaml\nimport re\nimport os\nimport sys\n\nSPLITS = ('train', 'val', 'test')\n\n\nclass VergeMLError(Exception):\n\n def __init__(self, message, suggestion=None, help_topic=None, hint_type=None, hint_key=None):\n super().__init__(message)\n self.suggestion = suggestion\n self.message = message\n self.hint_type = hint_type\n self.hint_key = hint_key\n self.help_topic = help_topic\n \n def __str__(self):\n if self.suggestion:\n if len(self.message + self.suggestion) < 80:\n return self.message + \" \" + self.suggestion\n else:\n return self.message + \"\\n\" + self.suggestion\n else:\n return self.message\n\n\ndef wrap_text(text):\n # TODO check terminal width\n res = []\n for para in text.split(\"\\n\\n\"):\n if para.splitlines()[0].strip().endswith(\":\"):\n res.append(para)\n else:\n res.append(textwrap.fill(para, drop_whitespace=True, fix_sentence_endings=True))\n return \"\\n\\n\".join(res)\n\n\ndef print_text(text):\n print(wrap_text(text))\n\n\n_Intro = namedtuple('_Intro', ['args', 'defaults', 'types'])\n\n\ndef introspect(call):\n spec = inspect.getfullargspec(call)\n args = spec.args\n defaults = dict(zip(reversed(spec.args), reversed(spec.defaults or [])))\n types = spec.annotations\n return _Intro(args, defaults, types)\n\n\n# taken from here: https://www.python-course.eu/levenshtein_distance.php\ndef _iterative_levenshtein(s, t):\n \"\"\" \n iterative_levenshtein(s, t) -> ldist\n ldist is the Levenshtein distance between the strings \n s and t.\n For all i and j, dist[i,j] will contain the Levenshtein \n distance between the first i characters of s and the \n first j characters of t\n \"\"\"\n rows = len(s)+1\n cols = len(t)+1\n dist = [[0 for x in range(cols)] for x in range(rows)]\n # source prefixes can be transformed into empty strings \n # by deletions:\n for i in range(1, rows):\n dist[i][0] = i\n # target prefixes can be created from an empty source string\n # by inserting the characters\n for i in range(1, cols):\n dist[0][i] = i\n \n for col in range(1, cols):\n for row in range(1, rows):\n if s[row-1] == t[col-1]:\n cost = 0\n else:\n cost = 1\n dist[row][col] = min(dist[row-1][col] + 1, # deletion\n dist[row][col-1] + 1, # insertion\n dist[row-1][col-1] + cost) # substitution\n \n return dist[row][col]\n\ndef did_you_mean(candidates, value, fmt=\"'{}'\"):\n candidates = list(candidates)\n names = list(sorted(map(lambda n: (_iterative_levenshtein(value, n), n), candidates)))\n names = list(filter(lambda dn: dn[0] <= 2, names))\n return 'Did you mean ' + fmt.format(names[0][1]) + '?' if names else None\n\n\ndef dict_set_path(d, path, value):\n c = d\n path = path.split(\".\")\n for key in path[:-1]:\n c = c.setdefault( key, {} )\n c[path[-1]] = value\n\ndef dict_del_path(d, path):\n if isinstance(path, str):\n path = path.split(\".\")\n if len(path) == 1:\n del[d[path[0]]]\n else:\n p, *rest = path\n dict_del_path(d[p], rest)\n if not d[p]:\n del d[p]\n\ndef dict_has_path(d, path):\n c = d\n for p in path.split(\".\"):\n if isinstance(c, dict) and p in c:\n c = c[p]\n else:\n return False\n return True\n\n_DEFAULT = object()\ndef dict_get_path(d, path, default=_DEFAULT):\n c = d\n for p in path.split(\".\"):\n if p in c:\n c = c[p]\n elif default != _DEFAULT:\n return default\n else:\n raise KeyError(path)\n return c\n\ndef dict_merge(dict1, dict2):\n if not isinstance(dict1, dict) or not isinstance(dict2, dict):\n return dict2\n for k in dict2:\n if k in dict1:\n dict1[k] = dict_merge(dict1[k], dict2[k])\n else:\n dict1[k] = dict2[k]\n return dict1\n\ndef dict_paths(d, path=None):\n res = []\n if path:\n if not dict_has_path(d, path):\n return res\n value = dict_get_path(d, path)\n else:\n value = d\n if not isinstance(d, dict):\n return res\n def _collect_path(d, path):\n for k, v in d.items():\n npath = f\"{path}.{k}\" if path is not None else k\n if isinstance(v, dict):\n _collect_path(v, npath)\n else:\n res.append(npath)\n _collect_path(value, path)\n return res\n\n\ndef parse_ai_names(argv):\n names = []\n for part in argv:\n if re.match(\"^@[a-zA-Z0-9_-]+$\", part):\n names.append(part[1:])\n else:\n break\n rest = argv[len(names):]\n return names, rest\n\ndef parse_split(value):\n \"\"\"Decodes the split value.\n \n Returns a tuple (type, value) where type is either perc, num or dir set.\n \"\"\"\n assert isinstance(value, (int, str))\n\n if isinstance(value, int):\n return ('num', value)\n elif value.endswith(\"%\"):\n return ('perc', float(value.rstrip(\"%\").strip()))\n elif value.isdigit():\n return ('num', int(value))\n else:\n return ('dir', value)\n\ndef format_info_text(text, indent=0, width=70):\n text = text.strip(\"\\n\")\n res = []\n for line in text.splitlines():\n if line.startswith(\" \"):\n res.append(line)\n elif line.strip() == \"\":\n res.append(line)\n else:\n res.extend(textwrap.wrap(line, width=width-indent))\n if indent:\n indstr = str(' ' * indent)\n res = list(map(lambda l: indstr + l, res))\n return \"\\n\".join(res)\n","repo_name":"tspannhw/vergeml","sub_path":"vergeml/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":5902,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"77"}
+{"seq_id":"35515066025","text":"\"\"\"\nThis example uses the light sensor on the CPB, located net to the picture of the eye on the board.\nOnce you have the library loaded, try shining a flashlight on your CP to watch the number of\nNeoPixels lit up increase, or try covering up the light sensor to watch the number decrease.\n\"\"\"\nimport time\nfrom adafruit_circuitplayground import cp\n\ncp.pixels.auto_write = False\ncp.pixels.brightness = 0.3\n\n\ndef scale_range(value):\n \"\"\"Scale a value from 0-320 (light range) to 0-9 (NeoPixel range, 10 total LEDs).\n Allows remapping light value to pixel position for light meter demo.\"\"\"\n return round(value / 320 * 10)\n\n\nwhile True:\n # light value remapped to pixel position\n peak = scale_range(cp.light)\n print(cp.light)\n print(int(peak))\n\n for i in range(0, 10, 1):\n if i <= peak:\n cp.pixels[i] = (0, 255, 255)\n else:\n cp.pixels[i] = (0, 0, 0)\n cp.pixels.show()\n time.sleep(0.05)\n","repo_name":"kattni/PyCon2023","sub_path":"Circuit_Playground_Bluefruit/Circuit_Playground_Bluefruit_cp_Library_Examples/cp_light_neopixels.py","file_name":"cp_light_neopixels.py","file_ext":"py","file_size_in_byte":947,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"77"}
+{"seq_id":"43052922104","text":"import sys\nimport time\nfrom mongoengine import *\nimport datetime as dt\nimport json\nfrom flask_cors import CORS\nfrom flask_restful import Resource, Api, reqparse\nfrom flask import Flask\nsys.path.append(\"..\")\n\nimport utils.credentials\nfrom database.Request import Request as dbRequest\nfrom database.Station import Station as dbStation\nfrom database.Proposition import Proposition as dbProposition\nfrom database.TrainRecord import TrainRecord as dbTrainRecord\n\n\napp = Flask(__name__)\napi = Api(app)\nCORS(app)\n\n\n# Requests\n# shows a list of requests and lets you POST to add new requests in the database\nclass Requests(Resource):\n\n @staticmethod\n def get():\n \"\"\"\n Get the list of all the requests registered in the database.\n :return: A list of JSON each containing a request.\n \"\"\"\n start = time.time()\n db_requests = dbRequest.objects\n requests = json.loads(db_requests.to_json())\n for k in range(len(requests)):\n requests[k]['date'] = str((db_requests[k]['date']))\n requests[k]['destination'] = db_requests[k]['destination'].name\n requests[k]['origin'] = db_requests[k]['origin'].name\n end = time.time()\n print(\"GET /requests took \"+str(end-start)+\" s\")\n return requests, 200\n\n @staticmethod\n def post():\n \"\"\"\n Add a NEW request that will be registered in the database.\n :param: Arguments of the POST request.\n origin: Code of the origin station (i.e. FRADI) - Required\n destination: Code of the destination station (i.e. FRAFJ) - Required\n date: Date of the first request with the format %Y-%m-%d %H:%M:%S - Required\n gapTime: Number of days you want to execute the request - Default is 0\n :return: A JSON file of the request newly registered.\n \"\"\"\n\n # requests parser\n requests_parser = reqparse.RequestParser()\n requests_parser.add_argument(\n name='origin', type=str, required=True, help=\"The code of the origin station\")\n requests_parser.add_argument(name='destination', type=str, required=True,\n help=\"The code of the destination station\")\n requests_parser.add_argument(name='date', type=str, required=True,\n help=\"The date of the first request; format : '%Y-%m-%d %H:%M:%S'\")\n requests_parser.add_argument(name='gapTime', default=0, type=int,\n help=\"The number of days you want to execute the request; 0 just for once\")\n requests_args = requests_parser.parse_args()\n\n # gets the corresponding Station object\n origin_station = dbStation().get_station_by_code(\n requests_args['origin'])\n destination_station = dbStation().get_station_by_code(\n requests_args['destination'])\n\n if requests_args['gapTime'] == 0:\n unique_date = True\n else:\n unique_date = False\n\n # checks if the request already exists in the database\n request_exist = dbRequest.objects(origin=origin_station, destination=destination_station,\n uniqueDate=unique_date,\n date=dt.datetime.strptime(\n requests_args['date'], \"%Y-%m-%d %H:%M:%S\"),\n gapTime=requests_args['gapTime']).first() is not None\n\n if request_exist:\n request_id = dbRequest.objects(origin=origin_station,\n destination=destination_station,\n uniqueDate=unique_date,\n date=dt.datetime.strptime(\n requests_args['date'], \"%Y-%m-%d %H:%M:%S\"),\n gapTime=requests_args['gapTime']).first().id\n return \"The request already exists at id {}\".format(request_id), 208\n else:\n request = dbRequest(origin=origin_station,\n destination=destination_station,\n uniqueDate=unique_date,\n date=dt.datetime.strptime(\n requests_args['date'], \"%Y-%m-%d %H:%M:%S\"),\n gapTime=requests_args['gapTime'])\n request.save()\n\n request = json.loads(request.to_json())\n request['date'] = requests_args['date']\n request['destination'] = destination_station.name\n request['origin'] = origin_station.name\n return request, 201\n\n\napi.add_resource(Requests, '/requests')\n\n\n# Request\n# shows a single request item and lets you PUT or DELETE a request item in the database\nclass Request(Resource):\n\n @staticmethod\n def get(request_id):\n \"\"\"\n Get a single request registered in the database.\n :param: request_id: Id of the request to get\n :return: A JSON file of the request.\n \"\"\"\n if dbRequest.objects(id=request_id).first() is not None:\n db_request = dbRequest.objects(id=request_id).first()\n request = json.loads(db_request.to_json())\n request['date'] = str((db_request['date']))\n request['destination'] = db_request['destination'].name\n request['origin'] = db_request['origin'].name\n return request, 200\n else:\n return \"Request not found at this id {}\".format(request_id), 404\n\n @staticmethod\n def delete(request_id):\n \"\"\"\n Delete from the database a single request registered in the database.\n :param: request_id: Id of the request to delete\n :return: 204.\n \"\"\"\n dbRequest.objects(id=request_id).delete()\n return \"\", 204\n\n @staticmethod\n def put(request_id):\n \"\"\"\n Update a request that is registered in the database.\n :param: Arguments of the PUT request.\n origin: Code of the origin station (i.e. FRADI) - Default is the request's one\n destination: Code of the destination station (i.e. FRAFJ) - Default is the request's one\n date: Date of the first request with the format %Y-%m-%d %H:%M:%S - Default is the request's one\n gapTime: Number of days you want to execute the request - Default is the request's one\n :return: A JSON file of the request newly updated.\n \"\"\"\n\n # request parser\n request_parser = reqparse.RequestParser()\n request_parser.add_argument(\n name='origin', type=str, help=\"The code of the origin station\")\n request_parser.add_argument(\n name='destination', type=str, help=\"The code of the destination station\")\n request_parser.add_argument(name='date', type=str,\n help=\"The date of the first request; format : '%Y-%m-%d %H:%M:%S'\")\n request_parser.add_argument(name='gapTime', type=int,\n help=\"The number of days you want to execute the request; 0 just for once\")\n request_args = request_parser.parse_args()\n\n request = dbRequest.objects(id=request_id).first()\n\n # get default values\n if request_args['origin'] is None:\n request_args['origin'] = request.origin.code\n if request_args['destination'] is None:\n request_args['destination'] = request.destination.code\n if request_args['date'] is None:\n request_args['date'] = dt.datetime.strftime(\n request.date, \"%Y-%m-%d %H:%M:%S\")\n if request_args['gapTime'] is None:\n request_args['gapTime'] = request.gapTime\n\n if request_args['gapTime'] == 0:\n unique_date = True\n else:\n unique_date = False\n\n origin_station = dbStation().get_station_by_code(\n request_args['origin'])\n destination_station = dbStation().get_station_by_code(\n request_args['destination'])\n\n dbRequest.objects(id=request_id).update_one(set__origin=origin_station,\n set__destination=destination_station, set__uniqueDate=unique_date,\n set__date=dt.datetime.strptime(request_args['date'],\n \"%Y-%m-%d %H:%M:%S\"),\n set__gapTime=request_args['gapTime'])\n\n db_request = dbRequest.objects(id=request_id).first()\n request = json.loads(db_request.to_json())\n request['date'] = str((db_request['date']))\n request['destination'] = db_request['destination'].name\n request['origin'] = db_request['origin'].name\n return request, 200\n\n\napi.add_resource(Request, '/requests/')\n\n\n# Stations\n# shows a list of stations and lets you POST to add new stations in the database\nclass Stations(Resource):\n @staticmethod\n def get():\n \"\"\"\n Get the list of all the stations registered in the database.\n :return: A list of JSON each containing a station.\n \"\"\"\n\n # stations parser\n stations_parser = reqparse.RequestParser()\n stations_parser.add_argument(\n name='name', type=str, help=\"The name of the station\")\n stations_args = stations_parser.parse_args()\n\n start = time.time()\n if stations_args['name'] is not None:\n stations = json.loads(dbStation.search_station(stations_args['name']).order_by(\"name\").to_json())\n else:\n stations = json.loads(dbStation.objects.order_by(\"name\").to_json())\n end = time.time()\n print(\"GET /stations took \"+str(end-start)+\" s\")\n return stations, 200\n\n @staticmethod\n def post():\n \"\"\"\n Add a NEW station that will be registered in the database.\n :param: Arguments of the POST request.\n code: Code of the station (i.e. FRAFJ) - Required\n name: Name of the station - Required\n :return: A JSON file of the station newly registered.\n \"\"\"\n\n # stations parser\n stations_parser = reqparse.RequestParser()\n stations_parser.add_argument(\n name='code', type=str, required=True, help=\"The code of the station (i.e. FRAFJ)\")\n stations_parser.add_argument(\n name='name', type=str, required=True, help=\"The name of the station\")\n stations_args = stations_parser.parse_args()\n\n # checks if the station already exists in the database\n station_exist = dbStation.objects(\n code=stations_args['code'], name=stations_args['name']).first() is not None\n\n if station_exist:\n station_id = dbStation.objects(\n code=stations_args['code'], name=stations_args['name']).first().id\n return \"The station already exists at id {}\".format(station_id), 208\n else:\n station = dbStation(\n code=stations_args['code'], name=stations_args['name'])\n station.save()\n return json.loads(station.to_json()), 201\n\n\napi.add_resource(Stations, '/stations')\n\n\n# Station\n# shows a single station item and lets you PUT or DELETE a station item in the database\nclass Station(Resource):\n\n @staticmethod\n def get(station_id):\n \"\"\"\n Get a single station registered in the database.\n :param: station_id: Id of the station to get\n :return: A JSON file of the station.\n \"\"\"\n if dbStation.objects(id=station_id).first() is not None:\n return json.loads(dbStation.objects(id=station_id).first().to_json()), 200\n else:\n return \"Station not found at this id {}\".format(station_id), 404\n\n @staticmethod\n def delete(station_id):\n \"\"\"\n Delete from the database a single station registered in the database.\n :param: request_id: Id of the station to delete\n :return: 204.\n \"\"\"\n dbStation.objects(id=station_id).delete()\n return \"\", 204\n\n @staticmethod\n def put(station_id):\n \"\"\"\n Update a station that is registered in the database.\n :param: code: Code of the origin station (i.e. FRADI) - Default is the station's one\n name: Code of the destination station (i.e. FRAFJ) - Default is the station's one\n :return: A JSON file of the station newly updated.\n \"\"\"\n\n # station parser\n station_parser = reqparse.RequestParser()\n station_parser.add_argument(\n name='code', type=str, help=\"The code of the station (i.e. FRAFJ)\")\n station_parser.add_argument(\n name='name', type=str, help=\"The name of the station\")\n station_args = station_parser.parse_args()\n\n station = dbStation.objects(id=station_id).first()\n\n # get default values\n if station_args['code'] is None:\n station_args['code'] = station.code\n if station_args['name'] is None:\n station_args['name'] = station.name\n\n dbStation.objects(id=station_id).update_one(\n set__code=station_args['code'], set__name=station_args['name'])\n return json.loads(dbStation.objects(id=station_id).first().to_json()), 200\n\n\napi.add_resource(Station, '/stations/')\n\n\n# Propositions\n# shows a list of propositions and lets you POST to add new propositions in the database\nclass Propositions(Resource):\n @staticmethod\n def get():\n \"\"\"\n Get the list of all the propositions registered in the database.\n :return: A list of JSON each containing a proposition.\n \"\"\"\n return json.loads(dbProposition.objects.to_json()), 200\n\n # @staticmethod\n # def post():\n # \"\"\"\n # Add a NEW proposition that will be registered in the database.\n # :param: Arguments of the POST request.\n # amount: Price of the proposition - Required\n # remainingSeat: Number of remaining seats for the proposition - Required\n # :return: A JSON file of the proposition newly registered.\n # \"\"\"\n\n # # propositions parser\n # propositions_parser = reqparse.RequestParser()\n # propositions_parser.add_argument(name='amount', type=int, required=True, help=\"The price of the proposition\")\n # propositions_parser.add_argument(name='remainingSeat', type=float, required=True,\n # help=\"The number of remaining seats for the proposition\")\n # propositions_args = propositions_parser.parse_args()\n\n # # checks if the proposition already exists in the database\n # proposition_exist = dbProposition.objects(amount=propositions_args['amount'],\n # remainingSeat=propositions_args['remainingSeat']).first() is not None\n # print(proposition_exist, dbProposition.objects(amount=propositions_args['amount'],\n # remainingSeat=propositions_args['remainingSeat']))\n # if proposition_exist:\n # proposition_id = dbProposition.objects(amount=propositions_args['amount'],\n # remainingSeat=propositions_args['remainingSeat']).first().id\n # return \"The proposition already exists at id {}\".format(proposition_id), 208\n # else:\n # proposition = dbProposition(amount=propositions_args['amount'],\n # remainingSeat=propositions_args['remainingSeat'])\n # proposition.save()\n # return json.loads(proposition.to_json()), 201\n\n\napi.add_resource(Propositions, '/propositions')\n\n\n# Proposition\n# shows a single proposition item and lets you PUT or DELETE a proposition item in the database\nclass Proposition(Resource):\n\n @staticmethod\n def get(proposition_id):\n \"\"\"\n Get a single proposition registered in the database.\n :param: proposition_id: Id of the proposition to get\n :return: A JSON file of the proposition.\n \"\"\"\n if dbProposition.objects(id=proposition_id).first() is not None:\n return json.loads(dbProposition.objects(id=proposition_id).first().to_json()), 200\n else:\n return \"Proposition not found at this id {}\".format(proposition_id), 404\n\n @staticmethod\n def delete(proposition_id):\n \"\"\"\n Delete from the database a single proposition registered in the database.\n :param: proposition_id: Id of the proposition to delete\n :return: 204.\n \"\"\"\n dbProposition.objects(id=proposition_id).delete()\n return \"\", 204\n\n # @staticmethod\n # def put(proposition_id):\n # \"\"\"\n # Update a proposition that is registered in the database.\n # :param: amount: Price of the proposition - Default is the proposition's one\n # remainingSeat: Number of remaining seats for the proposition - Default is the proposition's one\n # :return: A JSON file of the proposition newly updated.\n # \"\"\"\n\n # # proposition parser\n # proposition_parser = reqparse.RequestParser()\n # proposition_parser.add_argument(name='amount', type=int, help=\"The price of the proposition\")\n # proposition_parser.add_argument(name='remainingSeat', type=float,\n # help=\"The number of remaining seats for the proposition\")\n # proposition_args = proposition_parser.parse_args()\n\n # proposition = dbProposition.objects(id=proposition_id).first()\n\n # # get default values\n # if proposition_args['amount'] is None:\n # proposition_args['amount'] = proposition.amount\n # if proposition_args['remainingSeat'] is None:\n # proposition_args['remainingSeat'] = proposition.remainingSeat\n\n # dbProposition.objects(id=proposition_id).update_one(set__amount=proposition_args['amount'],\n # set__remainingSeat=proposition_args['remainingSeat'])\n # return json.loads(dbProposition.objects(id=proposition_id).first().to_json()), 200\n\n\napi.add_resource(Proposition, '/propositions/')\n\n\n# TrainRecords\n# shows a list of train records in the database\nclass TrainRecords(Resource):\n @staticmethod\n def get():\n \"\"\"\n Get the list of all the train records registered in the database by page.\n :param: page: The page you want to get at, 0 to get all pages. Each page contains 3 train records. - Default is 1\n :return: A list of JSON each containing a train record.\n \"\"\"\n start = time.time()\n # trainrecords parser\n trainrecords_parser = reqparse.RequestParser()\n trainrecords_parser.add_argument(\n name='page', type=int, default=1, help=\"The page you want to get at, 0 to get all pages. Each page contains 3 train records. - Default is 1\")\n trainrecords_args = trainrecords_parser.parse_args()\n\n page_id = trainrecords_args['page']\n offset = 3\n \n if not(page_id):\n db_trainrecords = dbTrainRecord.objects.order_by('departureTime')\n else:\n db_trainrecords = dbTrainRecord.objects.order_by('departureTime')[(page_id-1)*offset:page_id*offset]\n trainrecords = json.loads(db_trainrecords.to_json())\n\n for k in range(len(trainrecords)):\n #step1 = time.time()\n #trainrecords[k]['recordedTime'] = str((db_trainrecords[k]['recordedTime']).isoformat())\n #step2 = time.time()\n #print(\"step 1 took {} s\".format(step2-step1))\n trainrecords[k]['arrivalTime'] = str(\n (db_trainrecords[k].arrivalTime.isoformat()))\n #step3 = time.time()\n #print(\"step 2 took {} s\".format(step3-step2))\n trainrecords[k]['departureTime'] = str(\n (db_trainrecords[k].departureTime.isoformat()))\n #step4 = time.time()\n #print(\"step 3 took {} s\".format(step4-step3))\n trainrecords[k]['propositions'] = []\n for db_propositions in db_trainrecords[k].propositions:\n content = {}\n for db_proposition in db_propositions.content:\n content[db_proposition.type] = {\n 'amount': db_proposition.amount, 'seats': db_proposition.remainingSeat}\n trainrecords[k]['propositions'].append(\n {'recordedTime': db_propositions.recordedTime.isoformat(), 'content': content})\n #step5 = time.time()\n #print(\"step 4 took {} s\".format(step5-step4))\n trainrecords[k]['destination'] = db_trainrecords[k].destination.name\n trainrecords[k]['origin'] = db_trainrecords[k].origin.name\n #print(\"One trainrecord took {} s\".format(step5-step1))\n\n end = time.time()\n print(\"GET /trainrecords took \"+str(end-start)+\" s\")\n return trainrecords, 200\n\n\napi.add_resource(TrainRecords, '/trainrecords')\n\n# class TrainRecordsPages(Resource):\n# @staticmethod\n# def get(page_id):\n# \"\"\"\n# Get the list of all the train records registered in the database by page.\n# :param: page_id: The page you want to get at, 0 to get all pages. Each page contains 3 train records. - Default is 1\n# :return: A list of JSON each containing a train record.\n# \"\"\"\n# start = time.time()\n# page_id=int(page_id)\n# offset = 3\n# if not(page_id):\n# db_trainrecords = dbTrainRecord.objects.order_by('departureTime')\n# else:\n# db_trainrecords = dbTrainRecord.objects.order_by('departureTime')[(page_id-1)*offset:page_id*offset]\n# trainrecords = json.loads(db_trainrecords.to_json())\n# for k in range(len(trainrecords)):\n# #step1 = time.time()\n# #trainrecords[k]['recordedTime'] = str((db_trainrecords[k]['recordedTime']).isoformat())\n# #step2 = time.time()\n# #print(\"step 1 took {} s\".format(step2-step1))\n# trainrecords[k]['arrivalTime'] = str(\n# (db_trainrecords[k].arrivalTime.isoformat()))\n# #step3 = time.time()\n# #print(\"step 2 took {} s\".format(step3-step2))\n# trainrecords[k]['departureTime'] = str(\n# (db_trainrecords[k].departureTime.isoformat()))\n# #step4 = time.time()\n# #print(\"step 3 took {} s\".format(step4-step3))\n# trainrecords[k]['propositions'] = []\n# for db_propositions in db_trainrecords[k].propositions:\n# content = {}\n# for db_proposition in db_propositions.content:\n# content[db_proposition.type] = {\n# 'amount': db_proposition.amount, 'seats': db_proposition.remainingSeat}\n# trainrecords[k]['propositions'].append(\n# {'recordedTime': db_propositions.recordedTime.isoformat(), 'content': content})\n# #step5 = time.time()\n# #print(\"step 4 took {} s\".format(step5-step4))\n# trainrecords[k]['destination'] = db_trainrecords[k].destination.name\n# trainrecords[k]['origin'] = db_trainrecords[k].origin.name\n# #print(\"One trainrecord took {} s\".format(step5-step1))\n\n# end = time.time()\n# print(\"GET /trainrecords/pages/\"+str(page_id)+\" took \"+str(end-start)+\" s\")\n# return trainrecords, 200\n\n\n# api.add_resource(TrainRecordsPages, '/trainrecords/pages/')\n\n\n# TrainRecord\n# shows a single train record item and lets you DELETE a train record item in the database\nclass TrainRecord(Resource):\n\n @staticmethod\n def get(trainrecord_id):\n \"\"\"\n Get a single train record registered in the database.\n :param: trainrecord_id: Id of the train record to get\n :return: A JSON file of the train record.\n \"\"\"\n if dbTrainRecord.objects(id=trainrecord_id).first() is not None:\n db_trainrecord = dbTrainRecord.objects(id=trainrecord_id).first()\n trainrecord = json.loads(db_trainrecord.to_json())\n #trainrecord['recordedTime'] = str((db_trainrecord['recordedTime']).isoformat())\n trainrecord['arrivalTime'] = str(\n (db_trainrecord['arrivalTime']).isoformat())\n trainrecord['departureTime'] = str(\n (db_trainrecord['departureTime']).isoformat())\n trainrecord['propositions'] = []\n for db_propositions in db_trainrecord.propositions:\n content = {}\n for db_proposition in db_propositions.content:\n content[db_proposition.type] = {\n 'amount': db_proposition.amount, 'seats': db_proposition.remainingSeat}\n trainrecord['propositions'].append(\n {'recordedTime': db_propositions.recordedTime.isoformat(), 'content': content})\n trainrecord['origin'] = db_trainrecord['origin'].name\n return trainrecord, 200\n else:\n return \"Train record not found at this id {}\".format(trainrecord_id), 404\n\n @staticmethod\n def delete(trainrecord_id):\n \"\"\"\n Delete from the database a single train record registered in the database.\n :param: trainrecord_id: Id of the train record to delete\n :return: 204.\n \"\"\"\n dbTrainRecord.objects(id=trainrecord_id).delete()\n return \"\", 204\n\n\napi.add_resource(TrainRecord, '/trainrecords/')\n\n\nif __name__ == '__main__':\n app.run(port='8080', debug=True)\n","repo_name":"Kornflex28/train-tracker","sub_path":"webserver/webserver.py","file_name":"webserver.py","file_ext":"py","file_size_in_byte":25924,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"}
+{"seq_id":"17201185563","text":"import numpy as np\n\ndef transform_to_text(pred_start, pred_end, text, offset, sentiment):\n\n def decode(pred_start, pred_end, text, offset):\n decoded_text = \"\"\n for i in range(pred_start, pred_end+1):\n decoded_text += text[offset[i][0]:offset[i][1]]\n if (i+1) < len(offset) and offset[i][1] < offset[i+1][0]:\n decoded_text += \" \"\n return decoded_text\n\n decoded_predictions = []\n for i in range(len(text)):\n # if sentiment[i] == \"neutral\" or len(text[i].split()) < 2:\n # decoded_text = text[i]\n # else:\n idx_start = np.argmax(pred_start[i])\n # idx_end = np.argmax(pred_end[i])\n candidates_end = np.argsort(pred_end[i])[::-1]\n\n j = 0\n while 1:\n idx_end = candidates_end[j]\n if idx_start <= idx_end:\n break\n j += 1\n\n decoded_text = str(decode(idx_start, idx_end, text[i], offset[i]))\n # if len(decoded_text) == 0:\n # decoded_text = text[i]\n decoded_predictions.append(decoded_text)\n\n return decoded_predictions\n\n\ndef compute_jaccard(selected_text, selected_text_pred):\n\n def jaccard(str1, str2):\n a = set(str1.lower().split())\n b = set(str2.lower().split())\n c = a.intersection(b)\n return float(len(c)) / (len(a) + len(b) - len(c))\n\n jaccard_mean = 0.\n for i in range(len(selected_text)):\n jaccard_mean += jaccard(selected_text[i], selected_text_pred[i])\n return jaccard_mean / len(selected_text)\n","repo_name":"akensert/kaggle-tweet-sentiment-extraction","sub_path":"src/tweet-sentiment-extraction/common/prediction_utils.py","file_name":"prediction_utils.py","file_ext":"py","file_size_in_byte":1550,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"}
+{"seq_id":"3368654006","text":"\n# -*- coding: utf-8 -*-\nfrom locale import *\nimport sys,os\n\nproject_dir = '../tr/tr/'\n\nsys.path.append(project_dir)\nos.environ['DJANGO_SETTINGS_MODULE'] = 'settings'\nimport django\ndjango.setup()\n\nimport soundcloud\nfrom music.models import *\nfrom datetime import datetime, date, time\n\n\nclient = soundcloud.Client(client_id='dce5652caa1b66331903493735ddd64d')\npage_size = 200\ngenres_list = SoundGenres.objects.values('name')\ngenres_list_names = [name['name'] for name in genres_list]\n\nп_rus_list_1 = [\n\"П. Луспекаев\",\n\"Павел Артемьев И Ирина Тонева\",\n\"Павел Балтийский\",\n\"Павел Беккерман\",\n\"Павел Вишерский\",\n\"Павел Воробьёв\",\n\"Павел Данилов\",\n\"Павел Кашин\",\n\"Павел Козлов\",\n\"Павел Красношлык\",\n\"Павел Мах\",\n\"Павел Михайлов\",\n\"Павел Мурашов\",\n\"Павел Нарочанский\",\n\"Павел Павлецов\",\n\"Павел Пиковский и Группа Хьюго\",\n\"Павел Родни\",\n\"Павел Соколов\",\n\"Павел Фёдоров\",\n\"Павел Федоров (Paulo)\",\n\"Павел Филатов\",\n\"Павел Филатов & Настя\",\n\"Павел Филатов и группа Вне Зоны\",\n\"Павел Чумаков\",\n\"Павел Шевцов\",\n\"Павел Шубин и Андрей Якиманский\",\n\"Павла и Денис Ковальский\",\n\"Павлентий Чернов\",\n\"Павло Табаков\",\n\"Пальчики Оближешь\",\n\"Пан and Dino MC 47\",\n\"Панакота\",\n\"Панда feat. tompSON\",\n\"Паола\",\n\"Папины Дети\",\n\"Пара нормальных\",\n\"Пара Совпала\",\n\"Параллельные\",\n\"ПараТайн\",\n\"Парень Из Союза\",\n\"Пари\",\n\"Парк Удовольствий\",\n\"Паскаль\",\n\"ПатриотЪ\",\n\"Паук feat. Togga & Kvadrat\",\n\"Паулина Андреева feat. Баста\",\n\"Пацанка\",\n\"Пацаны\",\n\"Пачуля\",\n\"Паша Proorok\",\n\"Паша Вайти\",\n\"Паша Захарчук\",\n\"Паша Климат feat. Сюзанна Абдулла\",\n\"Паша Ли\",\n\"Паша Люмин и Даша Шувалова\",\n\"Паша Мос\",\n\"Паша Панамо\",\n\"Паша Руденко\",\n\"Паша Сли\",\n\"Паша Цветомузыка\",\n\"Паша Юдин\",\n\"Пающие Трусы\",\n\"Певица Афродита\",\n\"Пелих Ангелина\",\n\"Пепел Роза\",\n\"Первая Zаповедь & Савва Тихий\",\n\"Первая Zаповедь, Ahimas & Чак (M.Family)\",\n\"Первый Контакт\",\n\"Первый поворот\",\n\"Песняры\",\n\"Петкун, Голубев, Макарский\",\n\"Петлюра\",\n\"Петр Гара\",\n\"Пётр Дранга\",\n\"Петр Елфимов\",]\n\nп_rus_list_2 = [\n\"Петр Ильич Чайковский\",\n\"Пётр Казаков\",\n\"Петр Лещенко\",\n\"Петр Налич\",\n\"Петр Сергеев\",\n\"Петя Черный\",\n\"ПЗЖЕ feat. Рыбос\",\n\"Пиджаков\",\n\"Пикник\",\n\"Пилот\",\n\"Пиноккио\",\n\"Пионерский Хор Им. В.У. Попова\",\n\"Питер Пэн\",\n\"Пицца\",\n\"Пламя\",\n\"Планета 90\",\n\"Планка\",\n\"Пласти��а\",\n\"Платина\",\n\"Плохиш\",\n\"По Ту Сторону\",\n\"По Фрейду\",\n\"Под Одним Небом\",\n\"ПодZемка\",\n\"Подиум\",\n\"Поднимаем Руки Вверх\",\n\"Подпольная Траектория feat. Ahimas\",\n\"Подруги\",\n\"Подстрелов\",\n\"Подъём!\",\n\"Позитив and Напильник\",\n\"Покахонтас\",\n\"Полежаев\",\n\"Полиграф ШарикOFF\",\n\"Полина Богатикова\",\n\"Полина Богусевич\",\n\"Полина Буторина feat. DJ Groove\",\n\"Полина Гагарина\",\n\"Полина Гриффис\",\n\"Полина Зизак\",\n\"Полина Кузовкова (Pollykuu)\",\n\"Полина Ростова\",\n\"Полина Смолова\",\n\"Полина Сокольская\",\n\"Полина Соя\",\n\"Полнолуние\",\n\"Положительный Заряд\",\n\"Полтергейст\",\n\"Полумягкие\",\n\"Полюса\",\n\"Попанбэнд\",\n\"ПопКорн\",\n\"После 11\",\n\"После Вчерашнего\",\n\"Потапов Владимир\",\n\"Потемковский\",\n\"Потехин Бэнд\",\n\"Потехин, Трэк и Блюз\",\n\"Поющие вместе\",\n\"Поющие трусы\",\n\"Президент И Амазонка\",\n\"Премьер-Министр\",\n\"Приключения Мишек Гамми\",\n\"Приключения Спин И Марти\",\n\"Приключения Тигрули\",\n\"Приключения Флика\",\n\"Принцесса Авенью\",\n\"Принцесса И Лягушка\",\n\"Принцип (ZM)\",\n\"Провинция 42 feat. Bizaro\",\n\"Прогульщики\",\n\"Продавцы Новостей\",\n\"Проект Димac\",\n\"Проект Жить\",\n\"Проект Увечье\",]\n\nп_rus_list_3 = [\n\"Проект-22\",\n\"Прокофьев\",\n\"ПромЗона\",\n\"Пропаганда\",\n\"Пропорции\",\n\"Проспект 64\",\n\"Против Правил\",\n\"Профессор Лебединский\",\n\"Профилактика\",\n\"Профсоюзный Ансамбль Песни И Пляски\",\n\"Прохор Шаляпин\",\n\"Психо\",\n\"Птаха\",\n\"Пугачева Алла\",\n\"Пульсы\",\n\"Пуля\",\n\"Путевка В Жизнь\",\n\"Пушкашу & Випи\",\n\"Пыльца\",\n\"Пьер Нарцисс\",\n\"Пьера\",\n\"Пэссо\",\n\"Пятилетка\",\n\"Пятница 13-е\",\n]\n\nlitera = SoundSymbol.objects.get(name=\"П\")\n\ncount = 0\n\nfor tag in п_rus_list_1:\n tracks = client.get('/tracks', q=tag, limit=page_size, linked_partitioning=1)\n if tracks:\n for track in tracks.collection:\n created_at = track.created_at\n created_at = datetime.strptime('Jun 1 2005 1:33PM', '%b %d %Y %I:%M%p')\n if track.description:\n description = track.description[:500]\n else:\n description=None\n try:\n Music.objects.get(id=track.id)\n except:\n if track.genre and track.release_year and track.duration > 90000 and track.genre in genres_list_names:\n try:\n self_tag = SoundTags.objects.get(name=tag, symbol=litera)\n except:\n self_tag = SoundTags.objects.create(name=tag, symbol=litera)\n genre =SoundGenres.objects.get(name=track.genre.replace(\"'\", '') )\n new_track = Music.objects.create(id=track.id, tag=self_tag, artwork_url=track.artwork_url, created_at=created_at, duration=track.duration, genre=genre, description=description, title=track.title, uri=track.uri, release_year=track.release_year)\n count = count + 1\n while tracks.next_href != None and count < 2000:\n tracks = client.get(tracks.next_href, limit=page_size, linked_partitioning=1)\n for track in tracks.collection:\n created_at = track.created_at\n created_at = datetime.strptime('Jun 1 2005 1:33PM', '%b %d %Y %I:%M%p')\n if track.description:\n description = track.description[:500]\n else:\n description=None\n try:\n Music.objects.get(id=track.id)\n except:\n if track.genre and track.release_year and track.duration > 90000 and track.genre in genres_list_names:\n try:\n self_tag = SoundTags.objects.get(name=tag, symbol=litera)\n except:\n self_tag = SoundTags.objects.create(name=tag, symbol=litera)\n genre =SoundGenres.objects.get(name=track.genre.replace(\"'\", '') )\n new_track = Music.objects.create(id=track.id, tag=self_tag, artwork_url=track.artwork_url, created_at=created_at, duration=track.duration, genre=genre, description=description, title=track.title, uri=track.uri, release_year=track.release_year)\n count = count + 1\n","repo_name":"interesnij/django-social-network","sub_path":"common/parsing_soundcloud/rus/parsing_п_rus.py","file_name":"parsing_п_rus.py","file_ext":"py","file_size_in_byte":8284,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"}
+{"seq_id":"19433842524","text":"from rest_framework.views import APIView, Request, Response\nfrom rest_framework.permissions import IsAuthenticated\nfrom .models import Movie\nfrom .serializers import MovieSerializer, MovieOrderSerializer\nfrom rest_framework.pagination import PageNumberPagination\n\n\nclass MoviesView(APIView, PageNumberPagination):\n def get(self, request: Request) -> Response:\n movies = Movie.objects.all().order_by(\"id\")\n\n result_page = self.paginate_queryset(movies, request, view=self)\n\n serializer = MovieSerializer(result_page, many=True)\n\n return self.get_paginated_response(serializer.data)\n\n def post(self, request: Request) -> Response:\n data = request.data\n\n serializer = MovieSerializer(data=data)\n\n if not serializer.is_valid():\n return Response(serializer.errors, 400)\n\n serializer.save(user=request.user)\n\n return Response(serializer.data, 201)\n\n\nclass MoviesIdView(APIView):\n def delete(self, request: Request, movie_id: int) -> Response:\n try:\n movie = Movie.objects.get(id=movie_id)\n movie.delete()\n return Response(status=204)\n\n except Movie.DoesNotExist:\n return Response({\"detail\": \"Not found\"}, 404)\n\n\nclass MovieOrderView(APIView):\n permission_classes = [IsAuthenticated]\n\n def post(self, request: Request, movie_id: int) -> Response:\n try:\n data = request.data\n movie = Movie.objects.get(pk=movie_id)\n serializer = MovieOrderSerializer(data=data)\n\n if not serializer.is_valid():\n return Response(serializer.errors, 400)\n\n serializer.save(buyed_by=request.user, movie=movie)\n\n return Response(serializer.data, 201)\n\n except Movie.DoesNotExist:\n return Response({\"detail\": \"Not found\"})\n","repo_name":"micaias-silva/kenzie-buster-micaias-silva","sub_path":"movies/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1844,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"}
+{"seq_id":"6002225984","text":"from collections import defaultdict\n\n\ndef first_recurrent(string):\n \"\"\"\n This problem was asked by Google.\n\n Given a string, return the first recurring character in it, or null if there is no recurring character.\n\n For example, given the string \"acbbac\", return \"b\". Given the string \"abcdef\", return null.\n :return:\n \"\"\"\n count = defaultdict(int)\n for char in string:\n if count[char]:\n return char\n else:\n count[char] += 1\n return \"Null\"\n\n\nif __name__ == \"__main__\":\n print(first_recurrent(\"acbbac\"))","repo_name":"Michael-Mbajwa/coding_challenges","sub_path":"first_recurrent.py","file_name":"first_recurrent.py","file_ext":"py","file_size_in_byte":567,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"}
+{"seq_id":"27862292979","text":"class Car:\n\n def __init__(self):\n self.plate_num = ''\n self.digit = 0\n self.date = 0\n self.day_week = 0\n self.name = ''\n self.hour = 0\n self.m = 0\n self.restriction_time = ''\n self.car_move = ''\n\n def get_plate_validation(self):\n import re\n valid_plate = False\n while not valid_plate:\n try:\n plate_let, self.plate_num = input(\"Plate number in the format AAA-1234: \").split('-')\n\n if plate_let == [] or self.plate_num == []:\n print(\"Incorrect format! empty\")\n elif 3 < len(plate_let) or len(plate_let) < 3 or not re.match(\"^[A-Z]*$\", plate_let):\n print(\"Incorrect format! three uppercase letters\")\n elif 4 < len(self.plate_num) or len(self.plate_num) < 3 or not re.match(\"^[0-9]*$\", self.plate_num):\n print(\"Incorrect format! three or four numbers\")\n else:\n valid_plate = True\n except ValueError:\n print(\"Incorrect format! AAA-1234\")\n\n self.digit = int(self.plate_num[-1])\n\n return self.digit\n\n def get_date_validation(self):\n import datetime\n valid_date = False\n while not valid_date:\n date_input = input(\"Date in the format dd/mm/yy: \")\n try:\n self.date = datetime.datetime.strptime(date_input, \"%d/%m/%y\")\n valid_date = True\n except ValueError:\n print(\"Incorrect format! dd/mm/yy\")\n\n self.day_week = self.date.weekday()\n day_names = {'Monday': 0, 'Tuesday': 1, 'Wednesday': 2, 'Thursday': 3, 'Friday': 4}\n list_items = day_names.items()\n for item in list_items:\n if item[1] == self.day_week:\n self.name = item[0]\n\n return self.day_week, self.name\n\n def get_time_validation(self):\n import datetime\n import time\n valid_time = False\n while not valid_time:\n time_input = input(\"Time in the format hour,min: \")\n try:\n time1 = time.strptime(time_input, '%H,%M')\n valid_time = True\n except ValueError:\n print(\"Incorrect format! hour,min\")\n\n time1 = time.strftime('%H:%M', time1)\n self.hour, self.m = time1.split(':')\n\n time_in = datetime.time(int(self.hour), int(self.m))\n if (datetime.time(7, 0) <= time_in <= datetime.time(9, 30)) or (\n datetime.time(16, 0) <= time_in <= datetime.time(19, 30)):\n self.restriction_time = 1\n else:\n self.restriction_time = 0\n\n return self.restriction_time\n\n def transit(self, digit_plate, day_week, restriction_time):\n\n if (digit_plate == 1 or digit_plate == 2) and day_week == 0: # Monday\n move = 0\n elif (digit_plate == 3 or digit_plate == 4) and day_week == 1: # Tuesday\n move = 0\n elif (digit_plate == 5 or digit_plate == 6) and day_week == 2: # Wednesday\n move = 0\n elif (digit_plate == 7 or digit_plate == 8) and day_week == 3: # Thursday\n move = 0\n elif (digit_plate == 9 or digit_plate == 0) and day_week == 4: # Friday\n move = 0\n else:\n move = 1\n\n if move == 0 and restriction_time == 1:\n self.car_move = \"cannot transit\"\n else:\n self.car_move = \"can transit\"\n\n return self.car_move\n\n def run_inspection(self):\n\n plate_dig = self.get_plate_validation()\n day_num, day_name = self.get_date_validation()\n time_restriction = self.get_time_validation()\n\n move_car = self.transit(plate_dig, day_num, time_restriction)\n\n print(\"Last Digit: \" + str(plate_dig) + \" in \" + day_name + \" at \" + str(self.hour) + \":\" + str(\n self.m) + \" ==> \" + str(move_car))\n\n\nif __name__ == '__main__':\n\n import datetime\n import time\n import re\n my_car = Car()\n\n print(\"PICO Y PLACA PREDICTOR\")\n\n while True:\n\n my_car.run_inspection()\n\n","repo_name":"siCaet/PicoyPlacaProject","sub_path":"car_restrictions.py","file_name":"car_restrictions.py","file_ext":"py","file_size_in_byte":4125,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"}
+{"seq_id":"25967147636","text":"# 字母rot13,数字rot5\n\n# ROT5、13、18 解密\n\nimport string\n\n# 引入string的定义字符串\nascii_lowercase = string.ascii_lowercase # 小写字符串\nascii_uppercase = string.ascii_uppercase # 大写字符串\ndigits = string.digits\n\n# rot-18\n# ROT18:这是一个异类,本来没有,它是将ROT5和ROT13组合在一起,为了好称呼,将其命名为ROT18。\n\n# rot-5\n# ROT5:只对数字进行编码,用当前数字往前数的第5个数字替换当前数字,例如当前为0,编码后变成5,当前为1,编码后变成6,以此类推顺序循环。\ndigits_dict = {}\nfor i in range(len(digits)):\n digits_dict[digits[i]] = digits[i - 5]\n\n# rot-13\n# ROT13:只对字母进行编码,用当前字母往前数的第13个字母替换当前字母,例如当前为A,编码后变成N,当前为B,编码后变成O,以此类推顺序循环。\nlookup_dict = {}\n# 大写字符串填充\nfor i in range(len(ascii_uppercase)):\n lookup_dict[ascii_uppercase[i]] = ascii_uppercase[i - 13]\n# 小写字符串填充\nfor i in range(len(ascii_lowercase)):\n lookup_dict[ascii_lowercase[i]] = ascii_lowercase[i - 13]\n\n# 判断输入是否为数字、字母 后转换\n# 这里有一个很有意思的发现:中文被if判断为alpha\n\n\nimport string\n# a = string.ascii_letters + string.digits\ncipher = input(\"what's your cipher str:\")\n# cipher = a\nclear = ''\nfor i in cipher:\n if i.isdigit():\n a_digit = digits_dict[i]\n elif i.isalpha():\n a_digit = lookup_dict[i]\n else:\n a_digit = i\n clear += a_digit\n\nprint(clear)\n# print(a)\n\n","repo_name":"wgf4242/text","sub_path":"docs/ctf/scripts/Crypto_rot18.py","file_name":"Crypto_rot18.py","file_ext":"py","file_size_in_byte":1593,"program_lang":"python","lang":"zh","doc_type":"code","stars":19,"dataset":"github-code","pt":"77"}
+{"seq_id":"72891666170","text":"import time\nfrom tkinter import *\nfrom models.Setting import Setting\nimport json\n\n\n# xử lý sự kiện nút\n\n\n\n# khởi tạo màn hình\ndef start():\n def vao_game():\n s = Setting(hang.get(), cot.get(), dkt.get())\n j = json.dumps(s.__dict__)\n # f = open(\"setting.json\", \"w\")\n # f.write(j)\n # f.close()\n root.setvar(\"setting\", j)\n root.destroy()\n return s\n root = Tk()\n root.geometry(\"250x250\")\n # tạo số mặc định\n hang = StringVar(root)\n cot = StringVar(root)\n # dkt = StringVar(root)\n hang.set(\"3\")\n cot.set(\"3\")\n # dkt.set(\"3\")\n # tên chính\n l = Label(root, text=\"Cài đặt chung\")\n # label cho chọn\n lb_chon_hang = Label(root, text=\"Chọn số hàng\")\n lb_chon_cot = Label(root, text=\"Chọn số cột\")\n lb_chon_win = Label(root, text=\"Chọn luật thắng\")\n # chọn số hàng và cột luật thắng\n chon_so_hang = Spinbox(root, from_=3, to=10, textvariable=hang)\n chon_so_cot = Spinbox(root, from_=3, to=10, textvariable=cot)\n # chon_so_win = Spinbox(root, from_=3, to=10, textvariable=dkt)\n\n # vào game\n b1 = Button(root, text=\"Vào game\", command=vao_game)\n\n # thoát game\n b2 = Button(root, text=\"Thoát\",\n command=root.destroy)\n\n # hiển thị trên màn hình\n l.pack()\n lb_chon_hang.pack()\n chon_so_hang.pack()\n lb_chon_cot.pack()\n chon_so_cot.pack()\n lb_chon_win.pack()\n # chon_so_win.pack()\n b1.pack()\n b2.pack()\n\n\n root.mainloop()\n return root\n","repo_name":"thien2001git/TTNT6","sub_path":"windows/Window1.py","file_name":"Window1.py","file_ext":"py","file_size_in_byte":1566,"program_lang":"python","lang":"vi","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"}
+{"seq_id":"536670536","text":"#Projectile Coordinates\n\nimport math\n\ndef plot_coordinates():\n for i in range(1, 100):\n t = i * 0.1\n x = 20.0 * t * math.cos(math.radians(70))\n y = (20.0 * t * math.sin(math.radians(70))) - ((9.81 * (t**2))/2)\n print(round(x), round(y), sep=',')\n if y <= 0:\n break\n\nif __name__ == '__main__':\n plot_coordinates()","repo_name":"vinmen/learnpy","sub_path":"projectile.py","file_name":"projectile.py","file_ext":"py","file_size_in_byte":364,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"}
+{"seq_id":"37035480876","text":"# Florence has N integers, Her objective is to have N equal integers by transforming some of them.\n\n# She may transform each integer at most once. Transforming an integer X into another integer Y costs her (x-y)^2 pounds. Even if ai = aj, she has to pay the cost separately for transforming each of them (see Sample 2).\n\n# Find the minimum total cost to achieve her objective.\n\n# Input Format\n\n# Given standard input string as follows:\n\n# N\n# a1,a2,a3,...,an\n\n#30/30\n\n# Enter your code here. Read input from STDIN. Print output to STDOUT\nN = int(input())\nlis = [eval(i) for i in input().split(\" \")]\n\navg = round (sum(lis)/len(lis))\nrt = 0\nfor i in range(len(lis)):\n rt += (avg-lis[i])**2\n \nprint(int(rt))\n\n","repo_name":"Arsenic-33/GUTSCodeHackathon","sub_path":"BTogether.py","file_name":"BTogether.py","file_ext":"py","file_size_in_byte":713,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"}
+{"seq_id":"19446383307","text":"from sys import platform\nfrom functools import wraps, partial\nfrom itertools import count\nfrom weakref import WeakValueDictionary\nfrom errno import errorcode\n\nfrom six import text_type as _text_type\nfrom six import integer_types as integer_types\n\nfrom OpenSSL._util import (\n ffi as _ffi,\n lib as _lib,\n exception_from_error_queue as _exception_from_error_queue,\n native as _native)\n\nfrom OpenSSL.crypto import (\n FILETYPE_PEM, _PassphraseHelper, PKey, X509Name, X509, X509Store)\n\n_unspecified = object()\n\ntry:\n _memoryview = memoryview\nexcept NameError:\n class _memoryview(object):\n pass\n\nOPENSSL_VERSION_NUMBER = _lib.OPENSSL_VERSION_NUMBER\nSSLEAY_VERSION = _lib.SSLEAY_VERSION\nSSLEAY_CFLAGS = _lib.SSLEAY_CFLAGS\nSSLEAY_PLATFORM = _lib.SSLEAY_PLATFORM\nSSLEAY_DIR = _lib.SSLEAY_DIR\nSSLEAY_BUILT_ON = _lib.SSLEAY_BUILT_ON\n\nSENT_SHUTDOWN = _lib.SSL_SENT_SHUTDOWN\nRECEIVED_SHUTDOWN = _lib.SSL_RECEIVED_SHUTDOWN\n\nSSLv2_METHOD = 1\nSSLv3_METHOD = 2\nSSLv23_METHOD = 3\nTLSv1_METHOD = 4\nTLSv1_1_METHOD = 5\nTLSv1_2_METHOD = 6\n\nOP_NO_SSLv2 = _lib.SSL_OP_NO_SSLv2\nOP_NO_SSLv3 = _lib.SSL_OP_NO_SSLv3\nOP_NO_TLSv1 = _lib.SSL_OP_NO_TLSv1\n\nOP_NO_TLSv1_1 = getattr(_lib, \"SSL_OP_NO_TLSv1_1\", 0)\nOP_NO_TLSv1_2 = getattr(_lib, \"SSL_OP_NO_TLSv1_2\", 0)\n\ntry:\n MODE_RELEASE_BUFFERS = _lib.SSL_MODE_RELEASE_BUFFERS\nexcept AttributeError:\n pass\n\nOP_SINGLE_DH_USE = _lib.SSL_OP_SINGLE_DH_USE\nOP_EPHEMERAL_RSA = _lib.SSL_OP_EPHEMERAL_RSA\nOP_MICROSOFT_SESS_ID_BUG = _lib.SSL_OP_MICROSOFT_SESS_ID_BUG\nOP_NETSCAPE_CHALLENGE_BUG = _lib.SSL_OP_NETSCAPE_CHALLENGE_BUG\nOP_NETSCAPE_REUSE_CIPHER_CHANGE_BUG = _lib.SSL_OP_NETSCAPE_REUSE_CIPHER_CHANGE_BUG\nOP_SSLREF2_REUSE_CERT_TYPE_BUG = _lib.SSL_OP_SSLREF2_REUSE_CERT_TYPE_BUG\nOP_MICROSOFT_BIG_SSLV3_BUFFER = _lib.SSL_OP_MICROSOFT_BIG_SSLV3_BUFFER\ntry:\n OP_MSIE_SSLV2_RSA_PADDING = _lib.SSL_OP_MSIE_SSLV2_RSA_PADDING\nexcept AttributeError:\n pass\nOP_SSLEAY_080_CLIENT_DH_BUG = _lib.SSL_OP_SSLEAY_080_CLIENT_DH_BUG\nOP_TLS_D5_BUG = _lib.SSL_OP_TLS_D5_BUG\nOP_TLS_BLOCK_PADDING_BUG = _lib.SSL_OP_TLS_BLOCK_PADDING_BUG\nOP_DONT_INSERT_EMPTY_FRAGMENTS = _lib.SSL_OP_DONT_INSERT_EMPTY_FRAGMENTS\nOP_CIPHER_SERVER_PREFERENCE = _lib.SSL_OP_CIPHER_SERVER_PREFERENCE\nOP_TLS_ROLLBACK_BUG = _lib.SSL_OP_TLS_ROLLBACK_BUG\nOP_PKCS1_CHECK_1 = _lib.SSL_OP_PKCS1_CHECK_1\nOP_PKCS1_CHECK_2 = _lib.SSL_OP_PKCS1_CHECK_2\nOP_NETSCAPE_CA_DN_BUG = _lib.SSL_OP_NETSCAPE_CA_DN_BUG\nOP_NETSCAPE_DEMO_CIPHER_CHANGE_BUG= _lib.SSL_OP_NETSCAPE_DEMO_CIPHER_CHANGE_BUG\ntry:\n OP_NO_COMPRESSION = _lib.SSL_OP_NO_COMPRESSION\nexcept AttributeError:\n pass\n\nOP_NO_QUERY_MTU = _lib.SSL_OP_NO_QUERY_MTU\nOP_COOKIE_EXCHANGE = _lib.SSL_OP_COOKIE_EXCHANGE\nOP_NO_TICKET = _lib.SSL_OP_NO_TICKET\n\nOP_ALL = _lib.SSL_OP_ALL\n\nVERIFY_PEER = _lib.SSL_VERIFY_PEER\nVERIFY_FAIL_IF_NO_PEER_CERT = _lib.SSL_VERIFY_FAIL_IF_NO_PEER_CERT\nVERIFY_CLIENT_ONCE = _lib.SSL_VERIFY_CLIENT_ONCE\nVERIFY_NONE = _lib.SSL_VERIFY_NONE\n\nSESS_CACHE_OFF = _lib.SSL_SESS_CACHE_OFF\nSESS_CACHE_CLIENT = _lib.SSL_SESS_CACHE_CLIENT\nSESS_CACHE_SERVER = _lib.SSL_SESS_CACHE_SERVER\nSESS_CACHE_BOTH = _lib.SSL_SESS_CACHE_BOTH\nSESS_CACHE_NO_AUTO_CLEAR = _lib.SSL_SESS_CACHE_NO_AUTO_CLEAR\nSESS_CACHE_NO_INTERNAL_LOOKUP = _lib.SSL_SESS_CACHE_NO_INTERNAL_LOOKUP\nSESS_CACHE_NO_INTERNAL_STORE = _lib.SSL_SESS_CACHE_NO_INTERNAL_STORE\nSESS_CACHE_NO_INTERNAL = _lib.SSL_SESS_CACHE_NO_INTERNAL\n\nSSL_ST_CONNECT = _lib.SSL_ST_CONNECT\nSSL_ST_ACCEPT = _lib.SSL_ST_ACCEPT\nSSL_ST_MASK = _lib.SSL_ST_MASK\nSSL_ST_INIT = _lib.SSL_ST_INIT\nSSL_ST_BEFORE = _lib.SSL_ST_BEFORE\nSSL_ST_OK = _lib.SSL_ST_OK\nSSL_ST_RENEGOTIATE = _lib.SSL_ST_RENEGOTIATE\n\nSSL_CB_LOOP = _lib.SSL_CB_LOOP\nSSL_CB_EXIT = _lib.SSL_CB_EXIT\nSSL_CB_READ = _lib.SSL_CB_READ\nSSL_CB_WRITE = _lib.SSL_CB_WRITE\nSSL_CB_ALERT = _lib.SSL_CB_ALERT\nSSL_CB_READ_ALERT = _lib.SSL_CB_READ_ALERT\nSSL_CB_WRITE_ALERT = _lib.SSL_CB_WRITE_ALERT\nSSL_CB_ACCEPT_LOOP = _lib.SSL_CB_ACCEPT_LOOP\nSSL_CB_ACCEPT_EXIT = _lib.SSL_CB_ACCEPT_EXIT\nSSL_CB_CONNECT_LOOP = _lib.SSL_CB_CONNECT_LOOP\nSSL_CB_CONNECT_EXIT = _lib.SSL_CB_CONNECT_EXIT\nSSL_CB_HANDSHAKE_START = _lib.SSL_CB_HANDSHAKE_START\nSSL_CB_HANDSHAKE_DONE = _lib.SSL_CB_HANDSHAKE_DONE\n\n\nclass Error(Exception):\n \"\"\"\n An error occurred in an `OpenSSL.SSL` API.\n \"\"\"\n\n\n\n_raise_current_error = partial(_exception_from_error_queue, Error)\n\n\nclass WantReadError(Error):\n pass\n\n\n\nclass WantWriteError(Error):\n pass\n\n\n\nclass WantX509LookupError(Error):\n pass\n\n\n\nclass ZeroReturnError(Error):\n pass\n\n\n\nclass SysCallError(Error):\n pass\n\n\n\nclass _VerifyHelper(object):\n def __init__(self, connection, callback):\n self._problems = []\n\n @wraps(callback)\n def wrapper(ok, store_ctx):\n cert = X509.__new__(X509)\n cert._x509 = _lib.X509_STORE_CTX_get_current_cert(store_ctx)\n error_number = _lib.X509_STORE_CTX_get_error(store_ctx)\n error_depth = _lib.X509_STORE_CTX_get_error_depth(store_ctx)\n\n try:\n result = callback(connection, cert, error_number, error_depth, ok)\n except Exception as e:\n self._problems.append(e)\n return 0\n else:\n if result:\n _lib.X509_STORE_CTX_set_error(store_ctx, _lib.X509_V_OK)\n return 1\n else:\n return 0\n\n self.callback = _ffi.callback(\n \"int (*)(int, X509_STORE_CTX *)\", wrapper)\n\n\n def raise_if_problem(self):\n if self._problems:\n try:\n _raise_current_error()\n except Error:\n pass\n raise self._problems.pop(0)\n\n\n\ndef _asFileDescriptor(obj):\n fd = None\n if not isinstance(obj, integer_types):\n meth = getattr(obj, \"fileno\", None)\n if meth is not None:\n obj = meth()\n\n if isinstance(obj, integer_types):\n fd = obj\n\n if not isinstance(fd, integer_types):\n raise TypeError(\"argument must be an int, or have a fileno() method.\")\n elif fd < 0:\n raise ValueError(\n \"file descriptor cannot be a negative integer (%i)\" % (fd,))\n\n return fd\n\n\n\ndef SSLeay_version(type):\n \"\"\"\n Return a string describing the version of OpenSSL in use.\n\n :param type: One of the SSLEAY_ constants defined in this module.\n \"\"\"\n return _ffi.string(_lib.SSLeay_version(type))\n\n\n\nclass Session(object):\n pass\n\n\n\nclass Context(object):\n \"\"\"\n :py:obj:`OpenSSL.SSL.Context` instances define the parameters for setting up\n new SSL connections.\n \"\"\"\n _methods = {\n SSLv3_METHOD: \"SSLv3_method\",\n SSLv23_METHOD: \"SSLv23_method\",\n TLSv1_METHOD: \"TLSv1_method\",\n TLSv1_1_METHOD: \"TLSv1_1_method\",\n TLSv1_2_METHOD: \"TLSv1_2_method\",\n }\n _methods = dict(\n (identifier, getattr(_lib, name))\n for (identifier, name) in _methods.items()\n if getattr(_lib, name, None) is not None)\n\n\n def __init__(self, method):\n \"\"\"\n :param method: One of SSLv2_METHOD, SSLv3_METHOD, SSLv23_METHOD, or\n TLSv1_METHOD.\n \"\"\"\n if not isinstance(method, integer_types):\n raise TypeError(\"method must be an integer\")\n\n try:\n method_func = self._methods[method]\n except KeyError:\n raise ValueError(\"No such protocol\")\n\n method_obj = method_func()\n if method_obj == _ffi.NULL:\n # TODO: This is untested.\n _raise_current_error()\n\n context = _lib.SSL_CTX_new(method_obj)\n if context == _ffi.NULL:\n # TODO: This is untested.\n _raise_current_error()\n context = _ffi.gc(context, _lib.SSL_CTX_free)\n\n self._context = context\n self._passphrase_helper = None\n self._passphrase_callback = None\n self._passphrase_userdata = None\n self._verify_helper = None\n self._verify_callback = None\n self._info_callback = None\n self._tlsext_servername_callback = None\n self._app_data = None\n\n # SSL_CTX_set_app_data(self->ctx, self);\n # SSL_CTX_set_mode(self->ctx, SSL_MODE_ENABLE_PARTIAL_WRITE |\n # SSL_MODE_ACCEPT_MOVING_WRITE_BUFFER |\n # SSL_MODE_AUTO_RETRY);\n self.set_mode(_lib.SSL_MODE_ENABLE_PARTIAL_WRITE)\n\n\n def load_verify_locations(self, cafile, capath=None):\n \"\"\"\n Let SSL know where we can find trusted certificates for the certificate\n chain\n\n :param cafile: In which file we can find the certificates\n :param capath: In which directory we can find the certificates\n :return: None\n \"\"\"\n if cafile is None:\n cafile = _ffi.NULL\n elif not isinstance(cafile, bytes):\n raise TypeError(\"cafile must be None or a byte string\")\n\n if capath is None:\n capath = _ffi.NULL\n elif not isinstance(capath, bytes):\n raise TypeError(\"capath must be None or a byte string\")\n\n load_result = _lib.SSL_CTX_load_verify_locations(self._context, cafile, capath)\n if not load_result:\n _raise_current_error()\n\n\n def _wrap_callback(self, callback):\n @wraps(callback)\n def wrapper(size, verify, userdata):\n return callback(size, verify, self._passphrase_userdata)\n return _PassphraseHelper(\n FILETYPE_PEM, wrapper, more_args=True, truncate=True)\n\n\n def set_passwd_cb(self, callback, userdata=None):\n \"\"\"\n Set the passphrase callback\n\n :param callback: The Python callback to use\n :param userdata: (optional) A Python object which will be given as\n argument to the callback\n :return: None\n \"\"\"\n if not callable(callback):\n raise TypeError(\"callback must be callable\")\n\n self._passphrase_helper = self._wrap_callback(callback)\n self._passphrase_callback = self._passphrase_helper.callback\n _lib.SSL_CTX_set_default_passwd_cb(\n self._context, self._passphrase_callback)\n self._passphrase_userdata = userdata\n\n\n def set_default_verify_paths(self):\n \"\"\"\n Use the platform-specific CA certificate locations\n\n :return: None\n \"\"\"\n set_result = _lib.SSL_CTX_set_default_verify_paths(self._context)\n if not set_result:\n # TODO: This is untested.\n _raise_current_error()\n\n\n def use_certificate_chain_file(self, certfile):\n \"\"\"\n Load a certificate chain from a file\n\n :param certfile: The name of the certificate chain file\n :return: None\n \"\"\"\n if isinstance(certfile, _text_type):\n # Perhaps sys.getfilesystemencoding() could be better?\n certfile = certfile.encode(\"utf-8\")\n\n if not isinstance(certfile, bytes):\n raise TypeError(\"certfile must be bytes or unicode\")\n\n result = _lib.SSL_CTX_use_certificate_chain_file(self._context, certfile)\n if not result:\n _raise_current_error()\n\n\n def use_certificate_file(self, certfile, filetype=FILETYPE_PEM):\n \"\"\"\n Load a certificate from a file\n\n :param certfile: The name of the certificate file\n :param filetype: (optional) The encoding of the file, default is PEM\n :return: None\n \"\"\"\n if isinstance(certfile, _text_type):\n # Perhaps sys.getfilesystemencoding() could be better?\n certfile = certfile.encode(\"utf-8\")\n if not isinstance(certfile, bytes):\n raise TypeError(\"certfile must be bytes or unicode\")\n if not isinstance(filetype, integer_types):\n raise TypeError(\"filetype must be an integer\")\n\n use_result = _lib.SSL_CTX_use_certificate_file(self._context, certfile, filetype)\n if not use_result:\n _raise_current_error()\n\n\n def use_certificate(self, cert):\n \"\"\"\n Load a certificate from a X509 object\n\n :param cert: The X509 object\n :return: None\n \"\"\"\n if not isinstance(cert, X509):\n raise TypeError(\"cert must be an X509 instance\")\n\n use_result = _lib.SSL_CTX_use_certificate(self._context, cert._x509)\n if not use_result:\n _raise_current_error()\n\n\n def add_extra_chain_cert(self, certobj):\n \"\"\"\n Add certificate to chain\n\n :param certobj: The X509 certificate object to add to the chain\n :return: None\n \"\"\"\n if not isinstance(certobj, X509):\n raise TypeError(\"certobj must be an X509 instance\")\n\n copy = _lib.X509_dup(certobj._x509)\n add_result = _lib.SSL_CTX_add_extra_chain_cert(self._context, copy)\n if not add_result:\n # TODO: This is untested.\n _lib.X509_free(copy)\n _raise_current_error()\n\n\n def _raise_passphrase_exception(self):\n if self._passphrase_helper is None:\n _raise_current_error()\n exception = self._passphrase_helper.raise_if_problem(Error)\n if exception is not None:\n raise exception\n\n\n def use_privatekey_file(self, keyfile, filetype=_unspecified):\n \"\"\"\n Load a private key from a file\n\n :param keyfile: The name of the key file\n :param filetype: (optional) The encoding of the file, default is PEM\n :return: None\n \"\"\"\n if isinstance(keyfile, _text_type):\n # Perhaps sys.getfilesystemencoding() could be better?\n keyfile = keyfile.encode(\"utf-8\")\n\n if not isinstance(keyfile, bytes):\n raise TypeError(\"keyfile must be a byte string\")\n\n if filetype is _unspecified:\n filetype = FILETYPE_PEM\n elif not isinstance(filetype, integer_types):\n raise TypeError(\"filetype must be an integer\")\n\n use_result = _lib.SSL_CTX_use_PrivateKey_file(\n self._context, keyfile, filetype)\n if not use_result:\n self._raise_passphrase_exception()\n\n\n def use_privatekey(self, pkey):\n \"\"\"\n Load a private key from a PKey object\n\n :param pkey: The PKey object\n :return: None\n \"\"\"\n if not isinstance(pkey, PKey):\n raise TypeError(\"pkey must be a PKey instance\")\n\n use_result = _lib.SSL_CTX_use_PrivateKey(self._context, pkey._pkey)\n if not use_result:\n self._raise_passphrase_exception()\n\n\n def check_privatekey(self):\n \"\"\"\n Check that the private key and certificate match up\n\n :return: None (raises an exception if something's wrong)\n \"\"\"\n\n def load_client_ca(self, cafile):\n \"\"\"\n Load the trusted certificates that will be sent to the client (basically\n telling the client \"These are the guys I trust\"). Does not actually\n imply any of the certificates are trusted; that must be configured\n separately.\n\n :param cafile: The name of the certificates file\n :return: None\n \"\"\"\n\n def set_session_id(self, buf):\n \"\"\"\n Set the session identifier. This is needed if you want to do session\n resumption.\n\n :param buf: A Python object that can be safely converted to a string\n :returns: None\n \"\"\"\n\n def set_session_cache_mode(self, mode):\n \"\"\"\n Enable/disable session caching and specify the mode used.\n\n :param mode: One or more of the SESS_CACHE_* flags (combine using\n bitwise or)\n :returns: The previously set caching mode.\n \"\"\"\n if not isinstance(mode, integer_types):\n raise TypeError(\"mode must be an integer\")\n\n return _lib.SSL_CTX_set_session_cache_mode(self._context, mode)\n\n\n def get_session_cache_mode(self):\n \"\"\"\n :returns: The currently used cache mode.\n \"\"\"\n return _lib.SSL_CTX_get_session_cache_mode(self._context)\n\n\n def set_verify(self, mode, callback):\n \"\"\"\n Set the verify mode and verify callback\n\n :param mode: The verify mode, this is either VERIFY_NONE or\n VERIFY_PEER combined with possible other flags\n :param callback: The Python callback to use\n :return: None\n\n See SSL_CTX_set_verify(3SSL) for further details.\n \"\"\"\n if not isinstance(mode, integer_types):\n raise TypeError(\"mode must be an integer\")\n\n if not callable(callback):\n raise TypeError(\"callback must be callable\")\n\n self._verify_helper = _VerifyHelper(self, callback)\n self._verify_callback = self._verify_helper.callback\n _lib.SSL_CTX_set_verify(self._context, mode, self._verify_callback)\n\n\n def set_verify_depth(self, depth):\n \"\"\"\n Set the verify depth\n\n :param depth: An integer specifying the verify depth\n :return: None\n \"\"\"\n if not isinstance(depth, integer_types):\n raise TypeError(\"depth must be an integer\")\n\n _lib.SSL_CTX_set_verify_depth(self._context, depth)\n\n\n def get_verify_mode(self):\n \"\"\"\n Get the verify mode\n\n :return: The verify mode\n \"\"\"\n return _lib.SSL_CTX_get_verify_mode(self._context)\n\n\n def get_verify_depth(self):\n \"\"\"\n Get the verify depth\n\n :return: The verify depth\n \"\"\"\n return _lib.SSL_CTX_get_verify_depth(self._context)\n\n\n def load_tmp_dh(self, dhfile):\n \"\"\"\n Load parameters for Ephemeral Diffie-Hellman\n\n :param dhfile: The file to load EDH parameters from\n :return: None\n \"\"\"\n if not isinstance(dhfile, bytes):\n raise TypeError(\"dhfile must be a byte string\")\n\n bio = _lib.BIO_new_file(dhfile, b\"r\")\n if bio == _ffi.NULL:\n _raise_current_error()\n bio = _ffi.gc(bio, _lib.BIO_free)\n\n dh = _lib.PEM_read_bio_DHparams(bio, _ffi.NULL, _ffi.NULL, _ffi.NULL)\n dh = _ffi.gc(dh, _lib.DH_free)\n _lib.SSL_CTX_set_tmp_dh(self._context, dh)\n\n\n def set_cipher_list(self, cipher_list):\n \"\"\"\n Change the cipher list\n\n :param cipher_list: A cipher list, see ciphers(1)\n :return: None\n \"\"\"\n if isinstance(cipher_list, _text_type):\n cipher_list = cipher_list.encode(\"ascii\")\n\n if not isinstance(cipher_list, bytes):\n raise TypeError(\"cipher_list must be bytes or unicode\")\n\n result = _lib.SSL_CTX_set_cipher_list(self._context, cipher_list)\n if not result:\n _raise_current_error()\n\n\n def set_client_ca_list(self, certificate_authorities):\n \"\"\"\n Set the list of preferred client certificate signers for this server context.\n\n This list of certificate authorities will be sent to the client when the\n server requests a client certificate.\n\n :param certificate_authorities: a sequence of X509Names.\n :return: None\n \"\"\"\n name_stack = _lib.sk_X509_NAME_new_null()\n if name_stack == _ffi.NULL:\n # TODO: This is untested.\n _raise_current_error()\n\n try:\n for ca_name in certificate_authorities:\n if not isinstance(ca_name, X509Name):\n raise TypeError(\n \"client CAs must be X509Name objects, not %s objects\" % (\n type(ca_name).__name__,))\n copy = _lib.X509_NAME_dup(ca_name._name)\n if copy == _ffi.NULL:\n # TODO: This is untested.\n _raise_current_error()\n push_result = _lib.sk_X509_NAME_push(name_stack, copy)\n if not push_result:\n _lib.X509_NAME_free(copy)\n _raise_current_error()\n except:\n _lib.sk_X509_NAME_free(name_stack)\n raise\n\n _lib.SSL_CTX_set_client_CA_list(self._context, name_stack)\n\n\n def add_client_ca(self, certificate_authority):\n \"\"\"\n Add the CA certificate to the list of preferred signers for this context.\n\n The list of certificate authorities will be sent to the client when the\n server requests a client certificate.\n\n :param certificate_authority: certificate authority's X509 certificate.\n :return: None\n \"\"\"\n if not isinstance(certificate_authority, X509):\n raise TypeError(\"certificate_authority must be an X509 instance\")\n\n add_result = _lib.SSL_CTX_add_client_CA(\n self._context, certificate_authority._x509)\n if not add_result:\n # TODO: This is untested.\n _raise_current_error()\n\n\n def set_timeout(self, timeout):\n \"\"\"\n Set session timeout\n\n :param timeout: The timeout in seconds\n :return: The previous session timeout\n \"\"\"\n if not isinstance(timeout, integer_types):\n raise TypeError(\"timeout must be an integer\")\n\n return _lib.SSL_CTX_set_timeout(self._context, timeout)\n\n\n def get_timeout(self):\n \"\"\"\n Get the session timeout\n\n :return: The session timeout\n \"\"\"\n return _lib.SSL_CTX_get_timeout(self._context)\n\n\n def set_info_callback(self, callback):\n \"\"\"\n Set the info callback\n\n :param callback: The Python callback to use\n :return: None\n \"\"\"\n @wraps(callback)\n def wrapper(ssl, where, return_code):\n callback(Connection._reverse_mapping[ssl], where, return_code)\n self._info_callback = _ffi.callback(\n \"void (*)(const SSL *, int, int)\", wrapper)\n _lib.SSL_CTX_set_info_callback(self._context, self._info_callback)\n\n\n def get_app_data(self):\n \"\"\"\n Get the application data (supplied via set_app_data())\n\n :return: The application data\n \"\"\"\n return self._app_data\n\n\n def set_app_data(self, data):\n \"\"\"\n Set the application data (will be returned from get_app_data())\n\n :param data: Any Python object\n :return: None\n \"\"\"\n self._app_data = data\n\n\n def get_cert_store(self):\n \"\"\"\n Get the certificate store for the context.\n\n :return: A X509Store object or None if it does not have one.\n \"\"\"\n store = _lib.SSL_CTX_get_cert_store(self._context)\n if store == _ffi.NULL:\n # TODO: This is untested.\n return None\n\n pystore = X509Store.__new__(X509Store)\n pystore._store = store\n return pystore\n\n\n def set_options(self, options):\n \"\"\"\n Add options. Options set before are not cleared!\n\n :param options: The options to add.\n :return: The new option bitmask.\n \"\"\"\n if not isinstance(options, integer_types):\n raise TypeError(\"options must be an integer\")\n\n return _lib.SSL_CTX_set_options(self._context, options)\n\n\n def set_mode(self, mode):\n \"\"\"\n Add modes via bitmask. Modes set before are not cleared!\n\n :param mode: The mode to add.\n :return: The new mode bitmask.\n \"\"\"\n if not isinstance(mode, integer_types):\n raise TypeError(\"mode must be an integer\")\n\n return _lib.SSL_CTX_set_mode(self._context, mode)\n\n\n def set_tlsext_servername_callback(self, callback):\n \"\"\"\n Specify a callback function to be called when clients specify a server name.\n\n :param callback: The callback function. It will be invoked with one\n argument, the Connection instance.\n \"\"\"\n @wraps(callback)\n def wrapper(ssl, alert, arg):\n callback(Connection._reverse_mapping[ssl])\n return 0\n\n self._tlsext_servername_callback = _ffi.callback(\n \"int (*)(const SSL *, int *, void *)\", wrapper)\n _lib.SSL_CTX_set_tlsext_servername_callback(\n self._context, self._tlsext_servername_callback)\n\nContextType = Context\n\n\n\nclass Connection(object):\n \"\"\"\n \"\"\"\n _reverse_mapping = WeakValueDictionary()\n\n def __init__(self, context, socket=None):\n \"\"\"\n Create a new Connection object, using the given OpenSSL.SSL.Context\n instance and socket.\n\n :param context: An SSL Context to use for this connection\n :param socket: The socket to use for transport layer\n \"\"\"\n if not isinstance(context, Context):\n raise TypeError(\"context must be a Context instance\")\n\n ssl = _lib.SSL_new(context._context)\n self._ssl = _ffi.gc(ssl, _lib.SSL_free)\n self._context = context\n\n self._reverse_mapping[self._ssl] = self\n\n if socket is None:\n self._socket = None\n # Don't set up any gc for these, SSL_free will take care of them.\n self._into_ssl = _lib.BIO_new(_lib.BIO_s_mem())\n self._from_ssl = _lib.BIO_new(_lib.BIO_s_mem())\n\n if self._into_ssl == _ffi.NULL or self._from_ssl == _ffi.NULL:\n # TODO: This is untested.\n _raise_current_error()\n\n _lib.SSL_set_bio(self._ssl, self._into_ssl, self._from_ssl)\n else:\n self._into_ssl = None\n self._from_ssl = None\n self._socket = socket\n set_result = _lib.SSL_set_fd(self._ssl, _asFileDescriptor(self._socket))\n if not set_result:\n # TODO: This is untested.\n _raise_current_error()\n\n\n def __getattr__(self, name):\n \"\"\"\n Look up attributes on the wrapped socket object if they are not found on\n the Connection object.\n \"\"\"\n return getattr(self._socket, name)\n\n\n def _raise_ssl_error(self, ssl, result):\n if self._context._verify_helper is not None:\n self._context._verify_helper.raise_if_problem()\n\n error = _lib.SSL_get_error(ssl, result)\n if error == _lib.SSL_ERROR_WANT_READ:\n raise WantReadError()\n elif error == _lib.SSL_ERROR_WANT_WRITE:\n raise WantWriteError()\n elif error == _lib.SSL_ERROR_ZERO_RETURN:\n raise ZeroReturnError()\n elif error == _lib.SSL_ERROR_WANT_X509_LOOKUP:\n # TODO: This is untested.\n raise WantX509LookupError()\n elif error == _lib.SSL_ERROR_SYSCALL:\n if _lib.ERR_peek_error() == 0:\n if result < 0:\n if platform == \"win32\":\n errno = _ffi.getwinerror()[0]\n else:\n errno = _ffi.errno\n raise SysCallError(errno, errorcode[errno])\n else:\n raise SysCallError(-1, \"Unexpected EOF\")\n else:\n # TODO: This is untested.\n _raise_current_error()\n elif error == _lib.SSL_ERROR_NONE:\n pass\n else:\n _raise_current_error()\n\n\n def get_context(self):\n \"\"\"\n Get session context\n \"\"\"\n return self._context\n\n\n def set_context(self, context):\n \"\"\"\n Switch this connection to a new session context\n\n :param context: A :py:class:`Context` instance giving the new session\n context to use.\n \"\"\"\n if not isinstance(context, Context):\n raise TypeError(\"context must be a Context instance\")\n\n _lib.SSL_set_SSL_CTX(self._ssl, context._context)\n self._context = context\n\n\n def get_servername(self):\n \"\"\"\n Retrieve the servername extension value if provided in the client hello\n message, or None if there wasn't one.\n\n :return: A byte string giving the server name or :py:data:`None`.\n \"\"\"\n name = _lib.SSL_get_servername(self._ssl, _lib.TLSEXT_NAMETYPE_host_name)\n if name == _ffi.NULL:\n return None\n\n return _ffi.string(name)\n\n\n def set_tlsext_host_name(self, name):\n \"\"\"\n Set the value of the servername extension to send in the client hello.\n\n :param name: A byte string giving the name.\n \"\"\"\n if not isinstance(name, bytes):\n raise TypeError(\"name must be a byte string\")\n elif b\"\\0\" in name:\n raise TypeError(\"name must not contain NUL byte\")\n\n # XXX I guess this can fail sometimes?\n _lib.SSL_set_tlsext_host_name(self._ssl, name)\n\n\n def pending(self):\n \"\"\"\n Get the number of bytes that can be safely read from the connection\n\n :return: The number of bytes available in the receive buffer.\n \"\"\"\n return _lib.SSL_pending(self._ssl)\n\n\n def send(self, buf, flags=0):\n \"\"\"\n Send data on the connection. NOTE: If you get one of the WantRead,\n WantWrite or WantX509Lookup exceptions on this, you have to call the\n method again with the SAME buffer.\n\n :param buf: The string to send\n :param flags: (optional) Included for compatibility with the socket\n API, the value is ignored\n :return: The number of bytes written\n \"\"\"\n if isinstance(buf, _memoryview):\n buf = buf.tobytes()\n if not isinstance(buf, bytes):\n raise TypeError(\"data must be a byte string\")\n\n result = _lib.SSL_write(self._ssl, buf, len(buf))\n self._raise_ssl_error(self._ssl, result)\n return result\n write = send\n\n\n def sendall(self, buf, flags=0):\n \"\"\"\n Send \"all\" data on the connection. This calls send() repeatedly until\n all data is sent. If an error occurs, it's impossible to tell how much\n data has been sent.\n\n :param buf: The string to send\n :param flags: (optional) Included for compatibility with the socket\n API, the value is ignored\n :return: The number of bytes written\n \"\"\"\n if isinstance(buf, _memoryview):\n buf = buf.tobytes()\n if not isinstance(buf, bytes):\n raise TypeError(\"buf must be a byte string\")\n\n left_to_send = len(buf)\n total_sent = 0\n data = _ffi.new(\"char[]\", buf)\n\n while left_to_send:\n result = _lib.SSL_write(self._ssl, data + total_sent, left_to_send)\n self._raise_ssl_error(self._ssl, result)\n total_sent += result\n left_to_send -= result\n\n\n def recv(self, bufsiz, flags=None):\n \"\"\"\n Receive data on the connection. NOTE: If you get one of the WantRead,\n WantWrite or WantX509Lookup exceptions on this, you have to call the\n method again with the SAME buffer.\n\n :param bufsiz: The maximum number of bytes to read\n :param flags: (optional) Included for compatibility with the socket\n API, the value is ignored\n :return: The string read from the Connection\n \"\"\"\n buf = _ffi.new(\"char[]\", bufsiz)\n result = _lib.SSL_read(self._ssl, buf, bufsiz)\n self._raise_ssl_error(self._ssl, result)\n return _ffi.buffer(buf, result)[:]\n read = recv\n\n\n def _handle_bio_errors(self, bio, result):\n if _lib.BIO_should_retry(bio):\n if _lib.BIO_should_read(bio):\n raise WantReadError()\n elif _lib.BIO_should_write(bio):\n # TODO: This is untested.\n raise WantWriteError()\n elif _lib.BIO_should_io_special(bio):\n # TODO: This is untested. I think io_special means the socket\n # BIO has a not-yet connected socket.\n raise ValueError(\"BIO_should_io_special\")\n else:\n # TODO: This is untested.\n raise ValueError(\"unknown bio failure\")\n else:\n # TODO: This is untested.\n _raise_current_error()\n\n\n def bio_read(self, bufsiz):\n \"\"\"\n When using non-socket connections this function reads the \"dirty\" data\n that would have traveled away on the network.\n\n :param bufsiz: The maximum number of bytes to read\n :return: The string read.\n \"\"\"\n if self._from_ssl is None:\n raise TypeError(\"Connection sock was not None\")\n\n if not isinstance(bufsiz, integer_types):\n raise TypeError(\"bufsiz must be an integer\")\n\n buf = _ffi.new(\"char[]\", bufsiz)\n result = _lib.BIO_read(self._from_ssl, buf, bufsiz)\n if result <= 0:\n self._handle_bio_errors(self._from_ssl, result)\n\n return _ffi.buffer(buf, result)[:]\n\n\n def bio_write(self, buf):\n \"\"\"\n When using non-socket connections this function sends \"dirty\" data that\n would have traveled in on the network.\n\n :param buf: The string to put into the memory BIO.\n :return: The number of bytes written\n \"\"\"\n if self._into_ssl is None:\n raise TypeError(\"Connection sock was not None\")\n\n if not isinstance(buf, bytes):\n raise TypeError(\"buf must be a byte string\")\n\n result = _lib.BIO_write(self._into_ssl, buf, len(buf))\n if result <= 0:\n self._handle_bio_errors(self._into_ssl, result)\n return result\n\n\n def renegotiate(self):\n \"\"\"\n Renegotiate the session\n\n :return: True if the renegotiation can be started, false otherwise\n \"\"\"\n\n def do_handshake(self):\n \"\"\"\n Perform an SSL handshake (usually called after renegotiate() or one of\n set_*_state()). This can raise the same exceptions as send and recv.\n\n :return: None.\n \"\"\"\n result = _lib.SSL_do_handshake(self._ssl)\n self._raise_ssl_error(self._ssl, result)\n\n\n def renegotiate_pending(self):\n \"\"\"\n Check if there's a renegotiation in progress, it will return false once\n a renegotiation is finished.\n\n :return: Whether there's a renegotiation in progress\n \"\"\"\n\n def total_renegotiations(self):\n \"\"\"\n Find out the total number of renegotiations.\n\n :return: The number of renegotiations.\n \"\"\"\n return _lib.SSL_total_renegotiations(self._ssl)\n\n\n def connect(self, addr):\n \"\"\"\n Connect to remote host and set up client-side SSL\n\n :param addr: A remote address\n :return: What the socket's connect method returns\n \"\"\"\n _lib.SSL_set_connect_state(self._ssl)\n return self._socket.connect(addr)\n\n\n def connect_ex(self, addr):\n \"\"\"\n Connect to remote host and set up client-side SSL. Note that if the socket's\n connect_ex method doesn't return 0, SSL won't be initialized.\n\n :param addr: A remove address\n :return: What the socket's connect_ex method returns\n \"\"\"\n connect_ex = self._socket.connect_ex\n self.set_connect_state()\n return connect_ex(addr)\n\n\n def accept(self):\n \"\"\"\n Accept incoming connection and set up SSL on it\n\n :return: A (conn,addr) pair where conn is a Connection and addr is an\n address\n \"\"\"\n client, addr = self._socket.accept()\n conn = Connection(self._context, client)\n conn.set_accept_state()\n return (conn, addr)\n\n\n def bio_shutdown(self):\n \"\"\"\n When using non-socket connections this function signals end of\n data on the input for this connection.\n\n :return: None\n \"\"\"\n if self._from_ssl is None:\n raise TypeError(\"Connection sock was not None\")\n\n _lib.BIO_set_mem_eof_return(self._into_ssl, 0)\n\n\n def shutdown(self):\n \"\"\"\n Send closure alert\n\n :return: True if the shutdown completed successfully (i.e. both sides\n have sent closure alerts), false otherwise (i.e. you have to\n wait for a ZeroReturnError on a recv() method call\n \"\"\"\n result = _lib.SSL_shutdown(self._ssl)\n if result < 0:\n # TODO: This is untested.\n _raise_current_error()\n elif result > 0:\n return True\n else:\n return False\n\n\n def get_cipher_list(self):\n \"\"\"\n Get the session cipher list\n\n :return: A list of cipher strings\n \"\"\"\n ciphers = []\n for i in count():\n result = _lib.SSL_get_cipher_list(self._ssl, i)\n if result == _ffi.NULL:\n break\n ciphers.append(_native(_ffi.string(result)))\n return ciphers\n\n\n def get_client_ca_list(self):\n \"\"\"\n Get CAs whose certificates are suggested for client authentication.\n\n :return: If this is a server connection, a list of X509Names representing\n the acceptable CAs as set by :py:meth:`OpenSSL.SSL.Context.set_client_ca_list` or\n :py:meth:`OpenSSL.SSL.Context.add_client_ca`. If this is a client connection,\n the list of such X509Names sent by the server, or an empty list if that\n has not yet happened.\n \"\"\"\n ca_names = _lib.SSL_get_client_CA_list(self._ssl)\n if ca_names == _ffi.NULL:\n # TODO: This is untested.\n return []\n\n result = []\n for i in range(_lib.sk_X509_NAME_num(ca_names)):\n name = _lib.sk_X509_NAME_value(ca_names, i)\n copy = _lib.X509_NAME_dup(name)\n if copy == _ffi.NULL:\n # TODO: This is untested.\n _raise_current_error()\n\n pyname = X509Name.__new__(X509Name)\n pyname._name = _ffi.gc(copy, _lib.X509_NAME_free)\n result.append(pyname)\n return result\n\n\n def makefile(self):\n \"\"\"\n The makefile() method is not implemented, since there is no dup semantics\n for SSL connections\n\n :raise NotImplementedError\n \"\"\"\n raise NotImplementedError(\"Cannot make file object of OpenSSL.SSL.Connection\")\n\n\n def get_app_data(self):\n \"\"\"\n Get application data\n\n :return: The application data\n \"\"\"\n return self._app_data\n\n\n def set_app_data(self, data):\n \"\"\"\n Set application data\n\n :param data - The application data\n :return: None\n \"\"\"\n self._app_data = data\n\n\n def get_shutdown(self):\n \"\"\"\n Get shutdown state\n\n :return: The shutdown state, a bitvector of SENT_SHUTDOWN, RECEIVED_SHUTDOWN.\n \"\"\"\n return _lib.SSL_get_shutdown(self._ssl)\n\n\n def set_shutdown(self, state):\n \"\"\"\n Set shutdown state\n\n :param state - bitvector of SENT_SHUTDOWN, RECEIVED_SHUTDOWN.\n :return: None\n \"\"\"\n if not isinstance(state, integer_types):\n raise TypeError(\"state must be an integer\")\n\n _lib.SSL_set_shutdown(self._ssl, state)\n\n\n def state_string(self):\n \"\"\"\n Get a verbose state description\n\n :return: A string representing the state\n \"\"\"\n\n def server_random(self):\n \"\"\"\n Get a copy of the server hello nonce.\n\n :return: A string representing the state\n \"\"\"\n if self._ssl.session == _ffi.NULL:\n return None\n return _ffi.buffer(\n self._ssl.s3.server_random,\n _lib.SSL3_RANDOM_SIZE)[:]\n\n\n def client_random(self):\n \"\"\"\n Get a copy of the client hello nonce.\n\n :return: A string representing the state\n \"\"\"\n if self._ssl.session == _ffi.NULL:\n return None\n return _ffi.buffer(\n self._ssl.s3.client_random,\n _lib.SSL3_RANDOM_SIZE)[:]\n\n\n def master_key(self):\n \"\"\"\n Get a copy of the master key.\n\n :return: A string representing the state\n \"\"\"\n if self._ssl.session == _ffi.NULL:\n return None\n return _ffi.buffer(\n self._ssl.session.master_key,\n self._ssl.session.master_key_length)[:]\n\n\n def sock_shutdown(self, *args, **kwargs):\n \"\"\"\n See shutdown(2)\n\n :return: What the socket's shutdown() method returns\n \"\"\"\n return self._socket.shutdown(*args, **kwargs)\n\n\n def get_peer_certificate(self):\n \"\"\"\n Retrieve the other side's certificate (if any)\n\n :return: The peer's certificate\n \"\"\"\n cert = _lib.SSL_get_peer_certificate(self._ssl)\n if cert != _ffi.NULL:\n pycert = X509.__new__(X509)\n pycert._x509 = _ffi.gc(cert, _lib.X509_free)\n return pycert\n return None\n\n\n def get_peer_cert_chain(self):\n \"\"\"\n Retrieve the other side's certificate (if any)\n\n :return: A list of X509 instances giving the peer's certificate chain,\n or None if it does not have one.\n \"\"\"\n cert_stack = _lib.SSL_get_peer_cert_chain(self._ssl)\n if cert_stack == _ffi.NULL:\n return None\n\n result = []\n for i in range(_lib.sk_X509_num(cert_stack)):\n # TODO could incref instead of dup here\n cert = _lib.X509_dup(_lib.sk_X509_value(cert_stack, i))\n pycert = X509.__new__(X509)\n pycert._x509 = _ffi.gc(cert, _lib.X509_free)\n result.append(pycert)\n return result\n\n\n def want_read(self):\n \"\"\"\n Checks if more data has to be read from the transport layer to complete an\n operation.\n\n :return: True iff more data has to be read\n \"\"\"\n return _lib.SSL_want_read(self._ssl)\n\n\n def want_write(self):\n \"\"\"\n Checks if there is data to write to the transport layer to complete an\n operation.\n\n :return: True iff there is data to write\n \"\"\"\n return _lib.SSL_want_write(self._ssl)\n\n\n def set_accept_state(self):\n \"\"\"\n Set the connection to work in server mode. The handshake will be handled\n automatically by read/write.\n\n :return: None\n \"\"\"\n _lib.SSL_set_accept_state(self._ssl)\n\n\n def set_connect_state(self):\n \"\"\"\n Set the connection to work in client mode. The handshake will be handled\n automatically by read/write.\n\n :return: None\n \"\"\"\n _lib.SSL_set_connect_state(self._ssl)\n\n\n def get_session(self):\n \"\"\"\n Returns the Session currently used.\n\n @return: An instance of :py:class:`OpenSSL.SSL.Session` or :py:obj:`None` if\n no session exists.\n \"\"\"\n session = _lib.SSL_get1_session(self._ssl)\n if session == _ffi.NULL:\n return None\n\n pysession = Session.__new__(Session)\n pysession._session = _ffi.gc(session, _lib.SSL_SESSION_free)\n return pysession\n\n\n def set_session(self, session):\n \"\"\"\n Set the session to be used when the TLS/SSL connection is established.\n\n :param session: A Session instance representing the session to use.\n :returns: None\n \"\"\"\n if not isinstance(session, Session):\n raise TypeError(\"session must be a Session instance\")\n\n result = _lib.SSL_set_session(self._ssl, session._session)\n if not result:\n _raise_current_error()\n\nConnectionType = Connection\n\n# This is similar to the initialization calls at the end of OpenSSL/crypto.py\n# but is exercised mostly by the Context initializer.\n_lib.SSL_library_init()\n","repo_name":"sorig/moodle-scraper","sub_path":"venv/lib/python2.7/site-packages/OpenSSL/SSL.py","file_name":"SSL.py","file_ext":"py","file_size_in_byte":43264,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"77"}
+{"seq_id":"19536288437","text":"from tqdm import tqdm\nfrom subprocess import Popen\nimport os\nimport sys\nimport logging\n\nlogging.basicConfig()\nlogger = logging.getLogger(\"createDataset\")\nlogger.setLevel(logging.INFO)\n\n\ndef createDataset(datasetPath):\n numberOfMatchesPerConfiguration = 10\n contestantAIs = [\"MctsAi\", \"JerryMizunoAI\", \"LoadTorchWeightAI\", \"RandomAI\"]\n availableCharacters = [\"ZEN\", \"GARNET\", \"LUD\"]\n positions = [\"player 1\", \"player 2\"]\n\n datasetPath += 'dataset/'\n if not os.path.isdir(datasetPath):\n os.mkdir(datasetPath)\n createTemporaryFileWithDatasetLocation(datasetPath)\n\n try:\n for contestant in tqdm(contestantAIs):\n for position in positions:\n for character1 in availableCharacters:\n for character2 in availableCharacters:\n prepareTemporaryFile(datasetPath, contestant, character1, character2, position)\n playMatch(contestant, character1, character2, position, numberOfGames=numberOfMatchesPerConfiguration)\n except KeyboardInterrupt:\n os.remove('.datasetPath.yaml')\n logger.info(\"DATASET CREATION STOPPED BY USER\")\n\n\ndef createTemporaryFileWithDatasetLocation(datasetPath):\n with open('.datasetPath.yaml', 'w') as f:\n f.write('datasetPath: ' + datasetPath)\n\n\ndef prepareTemporaryFile(datasetPath, contestant, character1, character2, playerPosition):\n content = 'contestantAI: {}\\ncharacter1: {}\\ncharacter2: {}\\n'.format(contestant, character1, character2)\n with open(datasetPath + \".temp_match_info.yaml\", \"w+\") as f:\n f.write(content)\n f.truncate()\n\n\ndef playMatch(contestant, character1, character2, position, numberOfGames):\n classpath = \"bin:lib/logback/:lib/slf4j-1.7.25/slf4j-api-1.7.25.jar:lib/slf4j-1.7.25/slf4j-simple-1.7.25.jar:lib/snakeyaml-1.17.jar:lib/natives/linux/lwjgl-glfw-natives-linux.jar:data/aiData/:lib/natives/linux/lwjgl-natives-linux.jar:lib/natives/linux/lwjgl-openal-natives-linux.jar:lib/natives/linux/lwjgl-opengl-natives-linux.jar:FightingICE.jar:lib/lwjgl/lwjgl_util.jar:lib/lwjgl/lwjgl-glfw.jar:lib/lwjgl/lwjgl-openal.jar:lib/lwjgl/lwjgl-opengl.jar:lib/lwjgl/lwjgl.jar:lib/javax.json-1.0.4.jar:lib/py4j0.10.4.jar\"\n mainClass = \"Main\"\n\n numberOfMatches = '-n {}'.format(numberOfGames)\n matchContestants = '--a1 DatasetCreator --a2 {}'.format(contestant) if position == \"player 1\" else '--a1 {} --a2 DatasetCreator'.format(contestant)\n playableCharacters = '--c1 {} --c2 {}'.format(character1, character2)\n otherFlags = '--grey-bg --inverted-player 1 --mute'\n flags = (numberOfMatches + ' ' + matchContestants + ' ' + playableCharacters + ' ' + otherFlags).split(' ')\n\n p = Popen([\"java\", \"-classpath\", classpath, mainClass] + flags)\n p.wait()\n\n\nif __name__ == '__main__':\n if len(sys.argv) != 2:\n print('\\n')\n logger.error(\"The script takes exactly 1 argument, which represents the path where the dataset will be created\")\n print('\\n')\n else:\n datasetPath = str(sys.argv[1])\n createDataset(datasetPath)\n","repo_name":"Danielhp95/CIG-FightingICE-2018-entry","sub_path":"FTG4.30/createDataset.py","file_name":"createDataset.py","file_ext":"py","file_size_in_byte":3076,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"}
+{"seq_id":"41506508358","text":"\n\ndef frac_knapsack(arr : list, weight):\n arr.sort(key=lambda x : x[1]/x[0], reverse=True)\n res = 0.0\n for item in arr:\n if item[0] <= weight:\n res += item[1]\n weight -= item[0]\n else:\n res += item[1] * weight/item[0]\n break\n\n return res\n\n\nprint(frac_knapsack([(10,60), (40,40), (20,100), (30,120)], 50))","repo_name":"anmol211/TrailStuff","sub_path":"Greedy/fractional_knapsack.py","file_name":"fractional_knapsack.py","file_ext":"py","file_size_in_byte":374,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"}
+{"seq_id":"25569331664","text":"# Definition for a binary tree node\n# class TreeNode:\n# def __init__(self, x):\n# self.val = x\n# self.left = None\n# self.right = None\n\nclass Solution:\n # @param root, a tree node\n # @return a list of lists of integers\n result = []\n def zigzagLevelOrder(self, root):\n if (root == None) :\n return []\n if (root.left == None and root.right == None):\n return [[root.val]]\n self.result = []\n self.result.append([root.val])\n self.helper([root])\n return self.result\n \n def helper(self, treeArray):\n tempResult = []\n tempTreeArray = []\n for i in treeArray:\n if(i.left != None):\n tempResult.append(i.left.val)\n tempTreeArray.append(i.left)\n if(i.right != None):\n tempResult.append(i.right.val)\n tempTreeArray.append(i.right)\n \n if (tempResult == [] or tempTreeArray == []):\n return\n if(len(self.result) % 2 == 1):\n tempResult.reverse()\n self.result.append(tempResult)\n self.helper (tempTreeArray)\n \n","repo_name":"jinmingmu/codeingInterview","sub_path":"Binary_Tree_Zigzag_Level_Order_Traversal.py","file_name":"Binary_Tree_Zigzag_Level_Order_Traversal.py","file_ext":"py","file_size_in_byte":1178,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"}
+{"seq_id":"15161296649","text":"from flask import Flask,render_template,request\nimport pickle\napp=Flask(__name__)\n\n@app.route(\"/\")\ndef index():\n return render_template('index.html')\n\n@app.route(\"/form\")\ndef form():\n return render_template('form.html')\n\n@app.route('/result',methods=['POST'])\ndef insurance_cost():\n age=int(request.form['age'])\n bmi=float(request.form['bmi'])\n children=int(request.form['no_of_children'])\n sex=request.form['gender']\n smoker=request.form['smoker']\n region=request.form['region']\n r1=0\n r2=0\n r3=0\n if(region==1):\n r1=1\n r2=0\n r3=0\n elif(region==2):\n r1=0\n r2=0\n r3=0\n elif(region==3):\n r1=0\n r2=1\n r3=0\n elif(region==4):\n r1=0\n r2=0\n r3=1\n \n print(r1,r2,r3)\n filename = 'model.sav'\n loaded_model = pickle.load(open(filename, 'rb'))\n result = round(float(loaded_model.predict([[age,bmi,children,sex,smoker,r1,r2,r3]])),2)\n return render_template('result.html',result=result)\n\nif __name__=='__main__':\n app.run(debug=True)","repo_name":"anushavc/Insurance-Cost-Predictor","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1071,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"}
+{"seq_id":"39430726816","text":"import xbmc\nimport xbmcgui\nimport xbmcvfs\nimport datetime\nimport resources.lib.utils as utils\nfrom json import loads\nfrom string import Formatter\nfrom collections import defaultdict\nfrom resources.lib.plugin import Plugin\nfrom resources.lib.kodilibrary import KodiLibrary\nfrom resources.lib.traktapi import TraktAPI\nfrom resources.lib.listitem import ListItem\ntry:\n from urllib.parse import quote_plus # Py3\nexcept ImportError:\n from urllib import quote_plus # Py2\n\n\ndef string_format_map(fmt, d):\n try:\n str.format_map\n except AttributeError:\n parts = Formatter().parse(fmt)\n return fmt.format(**{part[1]: d[part[1]] for part in parts})\n else:\n return fmt.format(**d)\n\n\nclass Player(Plugin):\n def __init__(self):\n super(Player, self).__init__()\n self.traktapi = TraktAPI()\n self.search_movie, self.search_episode, self.play_movie, self.play_episode = [], [], [], []\n self.item = defaultdict(lambda: '+')\n self.itemlist, self.actions, self.players, self.identifierlist = [], [], {}, []\n self.is_local = None\n self.dp_movies = self.addon.getSettingString('default_player_movies')\n self.dp_episodes = self.addon.getSettingString('default_player_episodes')\n self.dp_movies_id = None\n self.dp_episodes_id = None\n self.fallbacks = {}\n\n def setup_players(self, tmdbtype=None, details=False, clearsetting=False, assertplayers=True):\n self.build_players(tmdbtype)\n if details:\n self.build_details()\n self.build_selectbox(clearsetting, assertplayers)\n\n def get_fallback(self, dp_file, dp_action):\n fallback = self.players.get(dp_file, {}).get('fallback', {}).get(dp_action)\n if not fallback: # No fallback so prompt dialog\n return xbmcgui.Dialog().select(self.addon.getLocalizedString(32042), self.itemlist)\n if fallback in self.identifierlist: # Found a fallback in list so play that\n return self.identifierlist.index(fallback)\n fb_file, fb_action = fallback.split()\n return self.get_fallback(fb_file, fb_action) # Fallback not in list so let's check fallback's fallback\n\n def get_playerindex(self, force_dialog=False):\n if force_dialog or (self.itemtype == 'movie' and not self.dp_movies) or (self.itemtype == 'episode' and not self.dp_episodes):\n idx = xbmcgui.Dialog().select(self.addon.getLocalizedString(32042), self.itemlist) # Ask user to select player\n if self.itemtype == 'movie':\n self.dp_movies = self.itemlist[idx].getLabel()\n self.dp_movies_id = self.identifierlist[idx]\n elif self.itemtype == 'episode':\n self.dp_episodes = self.itemlist[idx].getLabel()\n self.dp_episodes_id = self.identifierlist[idx]\n return idx\n\n for i in range(0, len(self.itemlist)):\n label = self.itemlist[i].getLabel()\n if (\n (label == self.dp_movies and self.itemtype == 'movie') or\n (label == self.dp_episodes and self.itemtype == 'episode') or\n (label == u'{0} {1}'.format(self.addon.getLocalizedString(32061), 'Kodi'))):\n return i # Play local or with default player if found\n\n # Check for fallbacks\n if self.itemtype == 'movie' and self.dp_movies_id:\n dp_file, dp_action = self.dp_movies_id.split()\n return self.get_fallback(dp_file, dp_action)\n if self.itemtype == 'episode' and self.dp_episodes_id:\n dp_file, dp_action = self.dp_episodes_id.split()\n return self.get_fallback(dp_file, dp_action)\n\n return -1\n\n def play_external(self, force_dialog=False, playerindex=-1):\n if playerindex > -1: # Previous iteration didn't find an item to play so remove it and retry\n xbmcgui.Dialog().notification(self.itemlist[playerindex].getLabel(), self.addon.getLocalizedString(32040))\n del self.actions[playerindex] # Item not found so remove the player's action list\n del self.itemlist[playerindex] # Item not found so remove the player's select dialog entry\n del self.identifierlist[playerindex] # Item not found so remove the player's index\n\n playerindex = self.get_playerindex(force_dialog=force_dialog)\n\n # User cancelled dialog\n if not playerindex > -1:\n return False\n\n player = self.actions[playerindex]\n if not player or not player[1]:\n return False\n\n # External player has list of actions so let's iterate through them to find our item\n resolve_url = False\n if isinstance(player[1], list):\n actionlist = player[1]\n player = (False, actionlist[0])\n for d in actionlist[1:]:\n if player[0]:\n break # Playable item was found in last action so let's break and play it\n folder = KodiLibrary().get_directory(string_format_map(player[1], self.item)) # Get the next folder from the plugin\n\n if d.get('dialog'): # Special option to show dialog of items to select from\n d_items = []\n for f in folder: # Create our list of items\n if not f.get('label') or f.get('label') == 'None':\n continue\n lb_list = []\n label_a = f.get('label')\n if f.get('year') and f.get('year') != 1601:\n label_a = u'{} ({})'.format(label_a, f.get('year'))\n if utils.try_parse_int(f.get('season', 0)) > 0 and utils.try_parse_int(f.get('episode', 0)) > 0:\n label_a = u'{}x{}. {}'.format(f.get('season'), f.get('episode'), label_a)\n if f.get('streamdetails'):\n sdv_list = f.get('streamdetails', {}).get('video', [{}]) or [{}]\n sda_list = f.get('streamdetails', {}).get('audio', [{}]) or [{}]\n sdv, sda = sdv_list[0], sda_list[0]\n if sdv.get('width') or sdv.get('height'):\n lb_list.append(u'{}x{}'.format(sdv.get('width'), sdv.get('height')))\n if sdv.get('codec'):\n lb_list.append(u'{}'.format(sdv.get('codec', '').upper()))\n if sda.get('codec'):\n lb_list.append(u'{}'.format(sda.get('codec', '').upper()))\n if sda.get('channels'):\n lb_list.append(u'{} CH'.format(sda.get('channels', '')))\n for i in sda_list:\n if i.get('language'):\n lb_list.append(u'{}'.format(i.get('language', '').upper()))\n if sdv.get('duration'):\n lb_list.append(u'{} mins'.format(utils.try_parse_int(sdv.get('duration', 0)) // 60))\n if f.get('size'):\n lb_list.append(u'{}'.format(utils.normalise_filesize(f.get('size', 0))))\n label_b = ' | '.join(lb_list) if lb_list else ''\n d_items.append(ListItem(label=label_a, label2=label_b, icon=f.get('thumbnail')).set_listitem())\n if d_items:\n idx = 0\n if d.get('dialog', '').lower() != 'auto' or len(d_items) != 1:\n idx = xbmcgui.Dialog().select('Select Item', d_items, useDetails=True)\n if idx == -1: # User exited the dialog so return and do nothing\n return\n resolve_url = True if folder[idx].get('filetype') == 'file' else False # Set true for files so we can play\n player = (resolve_url, folder[idx].get('file')) # Set the folder path to open/play\n break # Move onto next action\n else: # Ask user to select a different player if no items in dialog\n return self.play_external(force_dialog=force_dialog, playerindex=playerindex)\n\n x = 0\n for f in folder: # Iterate through plugin folder looking for a matching item\n x += 1 # Keep an index for position matching\n for k, v in d.items(): # Iterate through our key (infolabel) / value (infolabel must match) pairs of our action\n if k == 'position': # We're looking for an item position not an infolabel\n if utils.try_parse_int(string_format_map(v, self.item)) != x: # Format our position value\n break # Not the item position we want so let's go to next item in folder\n elif not f.get(k) or string_format_map(v, self.item) not in u'{}'.format(f.get(k, '')): # Format our value and check if it matches the infolabel key\n break # Item's key value doesn't match value we are looking for so let's got to next item in folder\n else: # Item matched our criteria so let's open it up\n resolve_url = True if f.get('filetype') == 'file' else False # Set true for files so we can play\n player = (resolve_url, f.get('file')) # Get ListItem.FolderPath for item\n break # Move onto next action (either open next folder or play file)\n else:\n return self.play_external(force_dialog=force_dialog, playerindex=playerindex) # Ask user to select a different player\n\n # Play/Search found item\n if player and player[1]:\n action = string_format_map(player[1], self.item)\n if player[0] and action.endswith('.strm'): # Action is play and is a strm so PlayMedia\n xbmc.executebuiltin(utils.try_decode_string(u'PlayMedia({0})'.format(action)))\n elif player[0]: # Action is play and not a strm so play with player\n xbmc.Player().play(action, ListItem(library='video', **self.details).set_listitem())\n else:\n action = u'Container.Update({0})'.format(action) if xbmc.getCondVisibility(\"Window.IsMedia\") else u'ActivateWindow(videos,{0},return)'.format(action)\n xbmc.executebuiltin(utils.try_decode_string(action))\n return action\n\n def play(self, itemtype, tmdb_id, season=None, episode=None, force_dialog=False):\n \"\"\" Entry point for player method \"\"\"\n if not tmdb_id or not itemtype:\n return\n\n # Get the details for the item\n self.itemtype, self.tmdb_id, self.season, self.episode = itemtype, tmdb_id, season, episode\n self.tmdbtype = 'tv' if self.itemtype in ['episode', 'tv'] else 'movie'\n self.details = self.tmdb.get_detailed_item(self.tmdbtype, tmdb_id, season=season, episode=episode)\n self.item['imdb_id'] = self.details.get('infolabels', {}).get('imdbnumber')\n self.item['originaltitle'] = self.details.get('infolabels', {}).get('originaltitle')\n self.item['title'] = self.details.get('infolabels', {}).get('tvshowtitle') or self.details.get('infolabels', {}).get('title')\n self.item['year'] = self.details.get('infolabels', {}).get('year')\n\n # Check if we have a local file\n # TODO: Add option to auto play local\n if self.details and self.itemtype == 'movie':\n self.is_local = self.localmovie()\n if self.details and self.itemtype == 'episode':\n self.is_local = self.localepisode()\n\n self.setup_players(details=True)\n\n if not self.itemlist:\n return False\n\n return self.play_external(force_dialog=force_dialog)\n\n def build_details(self):\n self.item['id'] = self.tmdb_id\n self.item['tmdb'] = self.tmdb_id\n self.item['imdb'] = self.details.get('infolabels', {}).get('imdbnumber')\n self.item['name'] = u'{0} ({1})'.format(self.item.get('title'), self.item.get('year'))\n self.item['firstaired'] = self.details.get('infolabels', {}).get('premiered')\n self.item['premiered'] = self.details.get('infolabels', {}).get('premiered')\n self.item['released'] = self.details.get('infolabels', {}).get('premiered')\n self.item['showname'] = self.item.get('title')\n self.item['clearname'] = self.item.get('title')\n self.item['tvshowtitle'] = self.item.get('title')\n self.item['title'] = self.item.get('title')\n self.item['thumbnail'] = self.details.get('thumb')\n self.item['poster'] = self.details.get('poster')\n self.item['fanart'] = self.details.get('fanart')\n self.item['now'] = datetime.datetime.now().strftime('%Y%m%d%H%M%S%f')\n\n if self.traktapi:\n slug_type = utils.type_convert(self.tmdbtype, 'trakt')\n trakt_details = self.traktapi.get_details(slug_type, self.traktapi.get_traktslug(slug_type, 'tmdb', self.tmdb_id))\n self.item['trakt'] = trakt_details.get('ids', {}).get('trakt')\n self.item['imdb'] = trakt_details.get('ids', {}).get('imdb')\n self.item['tvdb'] = trakt_details.get('ids', {}).get('tvdb')\n self.item['slug'] = trakt_details.get('ids', {}).get('slug')\n\n if self.itemtype == 'episode': # Do some special episode stuff\n self.item['id'] = self.item.get('tvdb')\n self.item['title'] = self.details.get('infolabels', {}).get('title') # Set Episode Title\n self.item['name'] = u'{0} S{1:02d}E{2:02d}'.format(self.item.get('showname'), int(utils.try_parse_int(self.season)), int(utils.try_parse_int(self.episode)))\n self.item['season'] = self.season\n self.item['episode'] = self.episode\n\n if self.traktapi and self.itemtype == 'episode':\n trakt_details = self.traktapi.get_details(slug_type, self.item.get('slug'), season=self.season, episode=self.episode)\n self.item['epid'] = trakt_details.get('ids', {}).get('tvdb')\n self.item['epimdb'] = trakt_details.get('ids', {}).get('imdb')\n self.item['eptmdb'] = trakt_details.get('ids', {}).get('tmdb')\n self.item['eptrakt'] = trakt_details.get('ids', {}).get('trakt')\n\n for k, v in self.item.copy().items():\n v = u'{0}'.format(v)\n self.item[k] = v.replace(',', '')\n self.item[k + '_+'] = v.replace(' ', '+')\n self.item[k + '_-'] = v.replace(' ', '-')\n self.item[k + '_escaped'] = v.replace(' ', '%2520')\n self.item[k + '_escaped+'] = v.replace(' ', '%252B')\n self.item[k + '_url'] = quote_plus(utils.try_encode_string(v))\n\n def build_players(self, tmdbtype=None):\n basedirs = ['special://profile/addon_data/plugin.video.themoviedb.helper/players/']\n if self.addon.getSettingBool('bundled_players'):\n basedirs.append('special://home/addons/plugin.video.themoviedb.helper/resources/players/')\n for basedir in basedirs:\n files = [x for x in xbmcvfs.listdir(basedir)[1] if x.endswith('.json')]\n for file in files:\n vfs_file = xbmcvfs.File(basedir + file)\n try:\n content = vfs_file.read()\n meta = loads(content) or {}\n finally:\n vfs_file.close()\n\n self.players[file] = meta\n if not meta.get('plugin') or not xbmc.getCondVisibility(u'System.HasAddon({0})'.format(meta.get('plugin'))):\n continue # Don't have plugin so skip\n\n tmdbtype = tmdbtype or self.tmdbtype\n priority = utils.try_parse_int(meta.get('priority')) or 1000\n if tmdbtype == 'movie' and meta.get('search_movie'):\n self.search_movie.append((file, priority))\n if tmdbtype == 'movie' and meta.get('play_movie'):\n self.play_movie.append((file, priority))\n if tmdbtype == 'tv' and meta.get('search_episode'):\n self.search_episode.append((file, priority))\n if tmdbtype == 'tv' and meta.get('play_episode'):\n self.play_episode.append((file, priority))\n\n def build_playeraction(self, playerfile, action, assertplayers=True):\n player = self.players.get(playerfile, {})\n isplay = True if action.startswith('play_') else False\n prefix = self.addon.getLocalizedString(32061) if action.startswith('play_') else xbmc.getLocalizedString(137)\n label = u'{0} {1}'.format(prefix, player.get('name', ''))\n\n # Check if matches default player and set default player id\n if label == self.dp_movies:\n self.dp_movies_id = '{} {}'.format(playerfile, action)\n if label == self.dp_episodes:\n self.dp_episodes_id = '{} {}'.format(playerfile, action)\n\n # Check that asserted values exist\n if assertplayers:\n for i in player.get('assert', {}).get(action, []):\n if i.startswith('!'):\n if self.item.get(i[1:]) and self.item.get(i[1:]) != 'None':\n return # inverted assert - has value but we don't want it so don't build that player\n else:\n if not self.item.get(i) or self.item.get(i) == 'None':\n return # missing / empty asserted value so don't build that player\n\n # Add player action to list for dialog\n self.append_playeraction(\n label=label, action=player.get(action, ''), isplay=isplay,\n identifier='{} {}'.format(playerfile, action))\n\n def append_playeraction(self, label, action, isplay=True, identifier=''):\n self.itemlist.append(xbmcgui.ListItem(label))\n self.actions.append((isplay, action))\n self.identifierlist.append(identifier)\n\n def build_selectbox(self, clearsetting=False, assertplayers=True):\n self.itemlist, self.actions = [], []\n if clearsetting:\n self.itemlist.append(xbmcgui.ListItem(xbmc.getLocalizedString(13403))) # Clear Default\n if self.is_local:\n self.append_playeraction(u'{0} {1}'.format(self.addon.getLocalizedString(32061), 'Kodi'), self.is_local, identifier='play_kodi')\n for i in sorted(self.play_movie, key=lambda x: x[1]):\n self.build_playeraction(i[0], 'play_movie', assertplayers=assertplayers)\n for i in sorted(self.search_movie, key=lambda x: x[1]):\n self.build_playeraction(i[0], 'search_movie', assertplayers=assertplayers)\n for i in sorted(self.play_episode, key=lambda x: x[1]):\n self.build_playeraction(i[0], 'play_episode', assertplayers=assertplayers)\n for i in sorted(self.search_episode, key=lambda x: x[1]):\n self.build_playeraction(i[0], 'search_episode', assertplayers=assertplayers)\n\n def localfile(self, file):\n if not file:\n return\n if file.endswith('.strm'):\n f = xbmcvfs.File(file)\n contents = f.read()\n f.close()\n if contents.startswith('plugin://plugin.video.themoviedb.helper'):\n return\n return file\n\n def localmovie(self):\n fuzzy_match = self.addon.getSettingBool('fuzzymatch_movie')\n return self.localfile(KodiLibrary(dbtype='movie').get_info('file', fuzzy_match=fuzzy_match, **self.item))\n\n def localepisode(self):\n fuzzy_match = self.addon.getSettingBool('fuzzymatch_tv')\n fuzzy_match = True # TODO: Get tvshow year to match against but for now force fuzzy match\n dbid = KodiLibrary(dbtype='tvshow').get_info('dbid', fuzzy_match=fuzzy_match, **self.item)\n return self.localfile(KodiLibrary(dbtype='episode', tvshowid=dbid).get_info('file', season=self.season, episode=self.episode))\n","repo_name":"cbec-dev/plugin.video.themoviedb.helper","sub_path":"resources/lib/player.py","file_name":"player.py","file_ext":"py","file_size_in_byte":20050,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"}
+{"seq_id":"26097327560","text":"from typing import Optional, TYPE_CHECKING\nimport zulip\nfrom running.plugin.runbms import RunbmsPlugin\nfrom running.util import Moma, register, MomaReservationStatus, config_index_to_chr\nimport logging\nimport copy\nfrom running.suite import is_dry_run\nfrom running.command.runbms import hfac_str\nfrom datetime import datetime, timedelta\nif TYPE_CHECKING:\n from running.benchmark import Benchmark\n\nRESERVATION_WARNING_HOURS = 12\nRESERVATION_WARNING_THRESHOLD = timedelta(\n seconds=RESERVATION_WARNING_HOURS * 60 * 60)\n\n\n@register(RunbmsPlugin)\nclass Zulip(RunbmsPlugin):\n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n self.config_file = kwargs.get(\"config_file\", \"~/.zuliprc\")\n self.client = zulip.Client(config_file=self.config_file)\n self.request = kwargs.get(\"request\", {})\n if type(self.request) is not dict:\n raise TypeError(\"request of Zulip must be a dictionary\")\n if self.request.get(\"type\") not in [\"private\", \"stream\"]:\n raise ValueError(\"Request type must be either private or stream\")\n if self.request.get(\"type\") == \"stream\" and \"topic\" not in self.request:\n raise KeyError(\"Stream messages must have a topic\")\n if \"to\" not in self.request:\n raise KeyError(\"Request must have a to field\")\n self.nop = is_dry_run()\n self.moma = Moma()\n self.last_message_id = None\n self.last_message_content = None\n\n def send_message(self, content):\n message_data = copy.deepcopy(self.request)\n message_data[\"content\"] = \"{}\\n{}{}\\n\".format(\n self.run_id,\n self.get_reservation_message(),\n content\n )\n try:\n result = self.client.send_message(message_data=message_data)\n if result[\"result\"] != \"success\":\n logging.warning(\"Zulip send_message failed\\n{}\".format(result))\n else:\n self.last_message_id = result[\"id\"]\n self.last_message_content = message_data[\"content\"]\n except:\n logging.exception(\"Unhandled Zulip send_message exception\")\n\n def modify_message(self, content):\n request = {\n \"message_id\": self.last_message_id,\n \"content\": content,\n }\n try:\n result = self.client.update_message(request)\n if result[\"result\"] != \"success\":\n logging.warning(\n \"Zulip update_message failed\\n{}\".format(result))\n else:\n self.last_message_content = content\n except:\n logging.exception(\"Unhandled Zulip update_message exception\")\n\n def __str__(self) -> str:\n return \"Zulip {}\".format(self.name)\n\n def start_hfac(self, hfac: Optional[float]):\n if self.nop:\n return\n self.send_message(\"hfac {} started\".format(\n hfac_str(hfac) if hfac is not None else \"None\"))\n\n def end_hfac(self, hfac: Optional[float]):\n if self.nop:\n return\n self.send_message(\"hfac {} ended\".format(\n hfac_str(hfac) if hfac is not None else \"None\"))\n\n def start_benchmark(self, _hfac: Optional[float], _size: Optional[int], bm: \"Benchmark\"):\n if self.nop:\n return\n self.send_message(\"benchmark {} started\".format(bm.name))\n\n def end_benchmark(self, _hfac: Optional[float], _size: Optional[int], bm: \"Benchmark\"):\n if self.nop:\n return\n self.send_message(\"benchmark {} ended\".format(bm.name))\n\n def start_invocation(self, _hfac: Optional[float], _size: Optional[int], _bm: \"Benchmark\", invocation: int):\n if self.nop:\n return\n if self.last_message_id and self.last_message_content:\n self.modify_message(self.last_message_content + str(invocation))\n\n def end_invocation(self, _hfac: Optional[float], _size: Optional[int], _bm: \"Benchmark\", _invocation: int):\n if self.nop:\n return\n\n def start_config(self, _hfac: Optional[float], _size: Optional[int], _bm: \"Benchmark\", _invocation: int, _config: str, _config_index: int):\n if self.nop:\n return\n\n def end_config(self, _hfac: Optional[float], _size: Optional[int], _bm: \"Benchmark\", _invocation: int, _config: str, config_index: int, passed: bool):\n if self.nop:\n return\n if self.last_message_id and self.last_message_content:\n if passed:\n self.modify_message(self.last_message_content +\n config_index_to_chr(config_index))\n else:\n self.modify_message(self.last_message_content + \".\")\n\n def get_reservation_message(self) -> str:\n reservation = self.moma.get_reservation()\n if reservation is None:\n return \"\"\n if reservation.status is MomaReservationStatus.NOT_MOMA:\n return \"# ** Warning: not running on a moma machine. **\\n\"\n elif reservation.status is MomaReservationStatus.NOT_RESERVED:\n return \"# ** Warning: machine not reserved. **\\n\"\n elif reservation.status is MomaReservationStatus.RESERVED_BY_OTHERS:\n return \"# ** Warning: machine reserved by {}, ends at {}. **\\n\".format(\n reservation.user,\n reservation.end\n )\n elif reservation.status is MomaReservationStatus.RESERVED_BY_ME:\n assert reservation.end is not None\n delta = reservation.end - datetime.now()\n if delta < RESERVATION_WARNING_THRESHOLD:\n return \"# ** Warning: less than {} hours of reservation left. Current reservation ends at {}. **\\n\".format(\n RESERVATION_WARNING_HOURS,\n reservation.end\n )\n else:\n return \"\"\n else:\n raise ValueError(\"Unhandled reservation status value\")\n","repo_name":"anupli/running-ng","sub_path":"src/running/plugin/runbms/zulip.py","file_name":"zulip.py","file_ext":"py","file_size_in_byte":5917,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"77"}
+{"seq_id":"5940867039","text":"import argparse\nimport os\nimport subprocess\nimport pandas as pd\n\nparser = argparse.ArgumentParser(description='reads')\nparser.add_argument('--ind', help=\"ind to run this on\")\nargs = parser.parse_args()\nind = args.ind\n\n\nseq = '/scratch/lsa_flux/baizm/reference_genome/AloPal_combined.a.lines.fasta'\nread_dir = '/scratch/lsa_flux/baizm/Lane_1/cortes/flash_out/'\nout_dir = '/scratch/lsa_flux/baizm/alignments/'\n\n\ndef align_seq_pe(ind, out_dir, read_dir, seq):\n\tr1 = '%s%s.notCombined_1.fastq.gz' % (read_dir, ind)\n\tr2 = '%s%s.notCombined_2.fastq.gz' % (read_dir, ind)\n\t\n\tout1 = '%s%s_pe.sam' % (out_dir, ind)\n\tout2 = '%s%s_pe.mateFixed.bam' % (out_dir, ind)\n\tout3 = '%s%s_pe.mateFixed.sorted' % (out_dir, ind)\n\n\t# align\n\tsubprocess.call(\"bwa mem -t 20 %s %s %s > %s\" % (seq, r1, r2, out1), shell=True)\n\t# fixmate\n\tsubprocess.call(\"samtools fixmate %s %s\" % (out1, out2), shell=True)\n\t# sorted\n\tsubprocess.call(\"samtools sort %s %s\" % (out2, out3), shell=True)\n\t\n\t\ndef align_seq_ext(ind, out_dir, read_dir, seq):\n\tex = '%s%s.extendedFrags.fastq.gz' % (read_dir, ind)\n\n\tout1 = '%s%s_ext.sam' % (out_dir, ind)\n\tout2 = '%s%s_ext.bam' % (out_dir, ind)\n\tout3 = '%s%s_ext.sorted' % (out_dir, ind)\n\t\n\t# align\n\tsubprocess.call(\"bwa mem -t 20 %s %s > %s\" % (seq, ex, out1), shell=True)\n\t# bam\n\tsubprocess.call(\"samtools view -uS %s > %s\" % (out1, out2), shell=True)\n\t# sorted\n\tsubprocess.call(\"samtools sort %s %s\" % (out2, out3), shell=True)\n\n\ndef merge_and_rg(ind, out_dir, read_dir, seq):\n\n\tout1 = '%s%s_pe.mateFixed.sorted.bam' % (out_dir, ind)\n\tout2 = '%s%s_ext.sorted.bam' % (out_dir, ind)\n\tout3 = '%s%s_all.bam' % (out_dir, ind)\n\tout4 = '%s%s_all.rg.bam' % (out_dir, ind)\n\tintervals = '%s%s_all.intervals' % (out_dir, ind)\n\tout5 = '%s%s_all.realigned.bam' % (out_dir, ind)\n\tout6 = '%s%s_all.bwamem.unique.bam' % (out_dir, ind)\n\n\n\t# merge bam files for alignments of paired extended reads \n\tsubprocess.call(\"samtools merge %s %s %s\" % (out3, out1, out2), shell=True) \n\t# readgroup\n\tsubprocess.call(\"java -jar ./picard.jar AddOrReplaceReadGroups INPUT=%s OUTPUT=%s RGLB=%s RGPL=Illumina RGPU=%s RGSM=%s\" % (out3, out4, ind, ind, ind), shell=True)\n\tsubprocess.call(\"samtools index %s\" % out4, shell=True)\n\t\n\n\n# align all the way until time to call SNPs\nalign_seq_pe(ind, out_dir, read_dir, seq)\nalign_seq_ext(ind, out_dir, read_dir, seq)\nmerge_and_rg(ind, out_dir, read_dir, seq)\n","repo_name":"baizm/snp-calling","sub_path":"align_reads.py","file_name":"align_reads.py","file_ext":"py","file_size_in_byte":2372,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"77"}
+{"seq_id":"39235338718","text":"from django.shortcuts import render\nfrom sigirexperiments import performancemeasure\nfrom ERtasks.models import Cora_labeled,Cora\nfrom sigirexperiments import models\nfrom register.models import WorkLog, WorkerInfo\nfrom pyweb import dxaconstants\nimport json\nimport dedupe\nimport os\nfrom django.http import HttpResponse\nfrom baselinealloy.castgather import searchmh,findtail\nfrom baselinealloy import headcast\nfrom baselinealloy import models as alloymodels\nfrom django.core.paginator import Paginator, EmptyPage, PageNotAnInteger\nfrom sigirexperiments import recordsampling,featuresampling,clustering,processhelper,dextrapreclustering,patternRecommendation,hbf,trainingGenerator\nfrom django.db.models import Q\n\ntraining_file = 'cora_records_randsampling80_training.json'\nsettings_file = 'cora_records_randsampling80_learned_settings'\n\nworkeroperatiionNum = 10\n#samplemth = dxaconstants.RecordSamplingMethod.UncertainSampling\n#samplemth = dxaconstants.RecordSamplingMethod.SearchSampling\n# samplemth = dxaconstants.RecordSamplingMethod.DEXTRARandomSamplingIG\n# samplemth = dxaconstants.RecordSamplingMethod.DEXTRARandomSamplingIGPattern\n# samplemth = dxaconstants.RecordSamplingMethod.DEXTRARandomSamplingBasic\n# samplemth = dxaconstants.RecordSamplingMethod.DEXTRADIDSamplingBasic\n# samplemth = dxaconstants.RecordSamplingMethod.DEXTRADIDSamplingIGPattern\n# samplemth = dxaconstants.RecordSamplingMethod.DEXTRADIDIGPatternHBFsimple4\n# samplemth = dxaconstants.RecordSamplingMethod.hbfaffinegapOnlyclusterSimple\n# samplemth = dxaconstants.RecordSamplingMethod.hbfClusterViewSimpleMinhash\n# samplemth = dxaconstants.RecordSamplingMethod.hbfClusterViewSimpleMinhashaffinegap\nsamplemth = dxaconstants.RecordSamplingMethod.hbfClusterViewComplexMinhashaffinegap\n# samplemth = 'attributeClassifiers'\ntaskc = dxaconstants.ERTASK.Cora\nattrExplorationDisplayNum = 3\npattern_siblingDisplayNum = 3\ncontent_DisplayNum = 10\nattrBufferPoolSize = 15\nrecordBufferPoolSize = 200\nentity_view_threshold = 2\n\nexpriment_result_path = 'expriment_result.json'\notherInfo = 'hbf'\nclustering_result_path = 'expriment_clsuterresult.json'\n\nDID_flag = False\n# if samplemth == dxaconstants.RecordSamplingMethod.DEXTRADIDSamplingBasic:\n# DID_flag = True\ndataset = Cora_labeled.objects.order_by('?')\nif taskc == dxaconstants.ERTASK.Cora:\n if samplemth == dxaconstants.RecordSamplingMethod.DEXTRARandomSamplingIG or samplemth == dxaconstants.RecordSamplingMethod.DEXTRARandomSamplingIGPattern or samplemth == dxaconstants.RecordSamplingMethod.DEXTRARandomSamplingBasic:\n dataset = Cora_labeled.objects.order_by('?')\n elif DID_flag:\n coradataset = Cora_labeled.objects.order_by('?') #init\n clustdict = dextrapreclustering.minhashPreClustering(coradataset)\n BPid = processhelper.fulfillRecordBufferPool(clustdict=clustdict,BP_size=recordBufferPoolSize)\n # BP = [coradataset.get(id=item) for item in BPid]\n # dataset = recordsampling.DIDsamplingInit(dataset=coradataset,BF=BP,beta=1,clustdict=clustdict)\n # dataset = dataset.order_by('orderscore').reverse()\n # dataset = recordsampling.DIDsamplingLittleInit(dataset=coradataset,beta=1,clustdict=clustdict)\n dataset = coradataset\n\n\n\n\n\ndef exploreperformance(request):\n\n # nmi = record_uncertainsampling_temp(request)\n # return render(request,'sigir/exploreperformance.html',{'RecordSamplingMethod':dxaconstants.RecordSamplingMethod.UncertainSampling,'workerOperationNum':workeroperatiionNum,'data':nmi})\n # dict = record_uncertainsampling_multimeasure(request,workeroperatiionNum)\n # # aa(request)\n\n # #record_common_cluster_vs_opnum(username=username)\n # record_common_cluster_measure(username=username)\n # headcast.test('1,p,a',2,9)\n # headcast.searchtfidf(seedid=885,kw='sympos,1993,tolerant',num=9)\n username = request.session['username']\n #recordsampling.DIDsampling()\n # featuresampling.IG()\n data = WorkerInfo.objects.filter(user=username).values_list('id', flat=True)\n # outputpath = 'E:\\experiment_temp\\dextra_randomsampling_IG_Pattern\\_trainingdata\\DEXTRARandomSamplingIGPattern_op10_data.json'\n # clustering.dextra_attrbute_hcluster(samplingMethod=samplemth,workeropNum=10,outputpath=outputpath,userid=data[0],username=username)\n # performancemeasure.record_common_cluster_measure(samplemth=samplemth, list=[10])\n # currentattr = models.sigirSynonymsSeedTemp.objects.filter(user=username)[0].cattr\n # dict = patternRecommendation.synonymsForCurrentAttr(data=dataset,currentAttr=currentattr)\n # for k, v in dict.items():\n # print(k, v)\n # print(data[0])\n\n\n\n ahbf = hbf.constructHBFfordataset(data[0])\n print(json.dumps(ahbf))\n hbf.printHBF(ahbf)\n store_hbf_path = '_'.join([taskc.name,samplemth.name,str(workeroperatiionNum),otherInfo,'op.json'])\n fw = open(store_hbf_path, 'w', encoding='utf-8')\n json.dump(ahbf, fw, ensure_ascii=False, indent=4)\n orderhbf = hbf.constructOrderedHBF(ahbf=ahbf,dataset=dataset)\n orderhbf = hbf.computeEdges(orderhbf)\n ordered_layers_dict = orderhbf.get_orderlayers_dict()\n print(json.dumps(ordered_layers_dict))\n # hbf.printOrderedHBF(orderhbf)\n # hbf.printOrderedHBFByOrder(orderhbf)\n hbf.printOrderedHBFByOrderOnly(orderhbf)\n sum_dict = hbf.estimateN_ACR(orderhbf)\n attris = models.sigirCoraAttr.objects.filter(userid=data[0])\n values = models.sigirCoraAttrValue.objects.filter(attr_id__in=[ attr.id for attr in attris])\n syns = models.sigirCoraValueSynonym.objects.filter(value_id__in=[ value.id for value in values])\n experimentmsg = {'workerName':username,'worderid':data[0],'task':taskc.name,'method':samplemth.name,'opNum':workeroperatiionNum,'hbfstoredPath':store_hbf_path,'acr_measure':sum_dict,'U_size':attris.count(),'V_size':values.count(),'D_size':syns.count()}\n f = open(expriment_result_path,'a')\n json.dump(experimentmsg, f, ensure_ascii=False, indent=4)\n f.write('\\n')\n f.close()\n print(json.dumps(experimentmsg))\n cluster_dict = hbf.hbfClusterViewComplexMinhashaffinegap(corahbf=orderhbf,dataset=dataset,acr_threshold=0.3,username=username,dis_threshold=3)\n for k,v in cluster_dict.items():\n models.CoraPerformanceLog.objects.create(\n explorationMethod=samplemth, clusterid=v,\n cora_id=k, confidence=-1, workerOperationNum=workeroperatiionNum)\n clusterresultdict = performancemeasure.record_sampling_clustermultimeasure_vs_opnum(wn=workeroperatiionNum,samplemth=samplemth)\n ff = open(clustering_result_path, 'a')\n json.dump(clusterresultdict, ff, ensure_ascii=False, indent=4)\n ff.write('\\n')\n ff.close()\n\n\n # traingdata_path = '_'.join(['training',taskc.name, samplemth.name, str(workeroperatiionNum), otherInfo, '.train'])\n # testingdata_path = '_'.join(['testing', taskc.name, samplemth.name, str(workeroperatiionNum), otherInfo, '.test'])\n # validationdata_path = '_'.join(['validation', taskc.name, samplemth.name, str(workeroperatiionNum), otherInfo, '.dev'])\n # traingdata, testingdata, validationdata = trainingGenerator.basicTrainingDataDenseGenerator(username=username)\n # processhelper.writeTraingTest(traingdata_path,traingdata)\n # processhelper.writeTraingTest(testingdata_path,testingdata)\n # processhelper.writeTraingTest(validationdata_path, validationdata)\n\n # recommendation_from_classifiers = processhelper.preocessPredictedResult(r'C:\\Users\\sayarara\\Desktop\\experiment_classifiers\\basic_dense\\predicted.txt')\n # filtered_dict = processhelper.filterPredictedResult(recommendation_from_classifiers,userid=data[0])\n # for k,v in filtered_dict.items():\n # print(k,len(v))\n # print(v)\n # f = open('filtered_recommendation_from_basic_classifiers','a')\n # json.dump(filtered_dict, f, ensure_ascii=False, indent=4)\n # attras = models.sigirCoraAttr.objects.filter(userid=data[0])\n # for attra in attras:\n # traingdata, testingdata, validationdata = trainingGenerator.AttributeTrainingDataGenerator(username=username,attr_id=attra.id)\n # traingdata_path = '_'.join([attra.attrname,taskc.name, samplemth.name, str(workeroperatiionNum), otherInfo, '.train'])\n # testingdata_path = '_'.join([attra.attrname, taskc.name, samplemth.name, str(workeroperatiionNum), otherInfo, '.test'])\n # validationdata_path = '_'.join([attra.attrname, taskc.name, samplemth.name, str(workeroperatiionNum), otherInfo, '.dev'])\n # processhelper.writeTraingTest(traingdata_path,traingdata)\n # processhelper.writeTraingTest(testingdata_path,testingdata)\n # processhelper.writeTraingTest(validationdata_path, validationdata)\n # allennlp_test_path = '_'.join([attra.attrname, taskc.name, samplemth.name, str(workeroperatiionNum), otherInfo, 'json.test'])\n # processhelper.writeAllenNLPTestFormat(allennlptestpath=allennlp_test_path,originalpath=testingdata_path)\n\n\n\n # predicted_names_list = ['predicted_author.txt', 'predicted_confshortname.txt', 'predicted_period.txt',\n # 'predicted_press.txt', 'predicted_year.txt', 'predicted_pages.txt', 'predicted_place.txt']\n # dir = r'C:\\Users\\sayarara\\Desktop\\experiment_classifiers\\attributes_clean\\predict'\n # d = []\n # synos = models.sigirCoraValueSynonym.objects.filter(userid=data[0])\n # syns_list = [syn.synonym for syn in synos]\n # for name in predicted_names_list:\n # path = dir + '\\\\' + name\n # print(path)\n # dict,t = processhelper.aaa(path=path)\n # d.append(dict['NULL'])\n # t = set(t).difference(set(syns_list))\n # print(t)\n # print(len(t))\n # nn = list(set(dict['NULL']).difference(set(syns_list)))\n # processhelper.writeTraingTest(dir + '\\\\' + 'filtered_related_'+name, t)\n # processhelper.writeTraingTest(dir + '\\\\' + 'filtered_not_related_' + name, nn)\n # print(d)\n #\n # inter = d[0]\n # for i in range(1, len(d)):\n # inter = set(inter).intersection(set(d[i]))\n # print(inter)\n # inter = list(set(inter).difference(set(syns_list)))\n # processhelper.writeTraingTest(dir + '\\\\' + 'filtered_not_related_inter', inter)\n #\n # union = d[0]\n # for i in range(1, len(d)):\n # union = set(union).union(set(d[i]))\n # print(union)\n # union = list(set(union).difference(set(syns_list)))\n # processhelper.writeTraingTest(dir + '\\\\' + 'filtered_not_related_union', union)\n\n # aa = patternRecommendation.synsUsingWordNet('eighth')\n # print(aa)\n\n\n\n\n\n # processhelper.test(dataset=dataset,username=username,userid=15)\n # testa(username=username)\n # processhelper.simpleAutoMatchDection()\n #performancemeasure.record_sampling_clustermultimeasure_vs_opnum(wn=10,samplemth=samplemth)\n\n # processhelper.testre(str='156')\n # testb()\n # cora = Cora_labeled.objects.all()\n # # featuresampling.minHashFastClusterIG(clusternum=100,cora=cora)\n # data = WorkerInfo.objects.filter(user=username).values_list('id', flat=True)\n # processhelper.sos(attrsynonym='oxford',taskc=taskc,samplemth=samplemth,user=data[0])\n\n\n\n\n\n dict = {}\n return render(request, 'sigir/exploreperformance.html',\n dict)\n\ndeduper = 0\ntemp_d = {}\n\n\n\ndef baselinededupeinit():\n cora = Cora_labeled.objects.all()\n # c_r = {}\n # temp_d = dict((item.id, item.text) for item in cora)\n # count = 1\n for item in cora:\n # c_r['id']=item.id\n # c_r['text'] = item.text\n # clean_row = dict([('text',item.text),('id',item.id)])\n clean_row = dict([('text', item.text), ('id', str(item.id))])\n temp_d[item.id] = dict(clean_row)\n fields = [{'field': 'text', 'type': 'Text'}]\n # Create a new deduper object and pass our data model to it.\n deduper = dedupe.Dedupe(fields)\n deduper.sample(temp_d)\n\ndef testb():\n cora = Cora_labeled.objects.all()\n for item in cora:\n item.cleantext = processhelper.simpledatacleaning(item.text)\n item.save()\n print(\"done\")\n\n\ndef testa(username):\n cora = Cora_labeled.objects.all()\n # c_r = {}\n # temp_d = dict((item.id, item.text) for item in cora)\n # count = 1\n for item in cora:\n # c_r['id']=item.id\n # c_r['text'] = item.text\n # clean_row = dict([('text',item.text),('id',item.id)])\n clean_row = dict([('text', item.text), ('id', str(item.id))])\n temp_d[item.id] = dict(clean_row)\n fields = [{'field': 'text', 'type': 'Text'}]\n # Create a new deduper object and pass our data model to it.\n deduper = dedupe.Dedupe(fields)\n deduper.sample(temp_d)\n clustering.testnosamplingclustering(deduper=deduper,username=username,samplingMethod=dxaconstants.RecordSamplingMethod.UncertainSampling,workeropNum=20,temp_d=temp_d)\n# Create your views here.\n\ndef entityview(request):\n username = request.session['username']\n # f = open('_'.join([username, str(entity_view_threshold),'entityview.json']), encoding='utf-8')\n # entitydict = json.load(f)\n f = open('_'.join([username, str(entity_view_threshold),'entityview.json']), \"r\") # 设置文件对象\n strr = f.read() # 将txt文件的所有内容读入到字符串str中\n f.close() # 将文件关闭\n entitydict = eval(strr)\n dict = {}\n for k,v in entitydict.items():\n print(k)\n if v:\n print(v)\n else:\n print('NULL')\n\n # kstr = []\n # for i in range(entity_view_threshold):\n # aa = processhelper.viewhelperSummary(attrname=k[0][i],value=k[1][i])\n # kstr.append(aa)\n coras = Cora.objects.filter(id__in=v)\n # dict[' '.join(kstr)] = coras\n dict[k] = coras\n \n return render(request,'sigir/entityview.html',{'entities':dict,'entity_view_threshold':entity_view_threshold})\n\n\ndef exploration(request):\n username = request.session['username']\n print(username)\n data = WorkerInfo.objects.filter(user=username).values_list('id', flat=True)\n # if DID_flag:\n # # dataset = recordsampling.DIDsamplingInit(dataset=coradataset, BF=coradataset, beta=1,\n # # clustdict=clustdict)\n # # dataset = recordsampling.DIDsampling(dataset=coradataset, BF=coradataset, username=username, userid=data[0],\n # # attra_id=attrid, beta=1, clustdict=clustdict)\n # # dataset = recordsampling.DIDsamplingNoAttr(dataset=coradataset,BF=BP,username=username,userid=data[0],clustdict=clustdict)\n # dataset = recordsampling.DIDsamplingNoAttrLittle(dataset=coradataset,BF=BPid,username=username,userid=data[0],clustdict=clustdict,beta=1)\n # dataset = dataset.order_by('orderscore').reverse()\n # if request.method == 'POST':\n # attribute_editor = request.POST.get(\"attribute_editor\")\n # attrbute_create = request.POST.get(\"attribute_create\")\n # print(attrbute_create)\n # multis = request.POST.getlist(\"IG\")\n # print(multis)\n # submittype = request.POST.get(\"submit\")\n # print(submittype)\n # if submittype == \"create and bind\":\n # # pseedid = request.POST.get(\"pseedid\")\n # print(\"create and bind\")\n # list_sort_value_desc = featuresampling.IG()\n\n if request.is_ajax():\n print(request.body)\n print(request.POST)\n searchkey = request.POST.get('searchkey')\n page = request.POST.get('page')\n print(searchkey)\n # cora_list = Cora_labeled.objects.filter(text__contains=searchkey)\n cora_list = dataset.filter(text__contains=searchkey)\n print(cora_list)\n paginator = Paginator(cora_list, content_DisplayNum)\n try:\n cora = paginator.page(page)\n except PageNotAnInteger:\n # first page\n cora = paginator.page(1)\n except EmptyPage:\n # last page\n cora = paginator.page(paginator.num_pages)\n response = HttpResponse();\n response['Content-Type'] = \"text/javascript\"\n response.write(json.dumps({'rows': cora, 'total': len(cora)}))\n return response\n else:\n searchkey = request.GET.get('q',\"\")\n print(searchkey)\n if searchkey:\n # cora_list = Cora_labeled.objects.filter(text__icontains=searchkey)\n # request.session['q'] = searchkey\n cora_list = dataset.filter(text__contains=searchkey)\n else:\n # searchkey = request.session['q']\n # if searchkey:\n # cora_list = Cora.objects.filter(text__contains=searchkey)\n # else:\n # cora_list = Cora.objects.all\n # cora_list = Cora_labeled.objects.all()\n cora_list = dataset\n print(searchkey)\n paginator = Paginator(cora_list,content_DisplayNum) # Show 10 per page\n page = request.GET.get('page')\n print(page)\n attrpage = request.GET.get('attrpage')\n print(attrpage)\n # attrpaginator = Paginator(list_sort_value_desc, 10)\n if attrpage:\n attrpage = int(attrpage)\n else:\n attrpage = 1\n\n try:\n cora = paginator.page(page)\n except PageNotAnInteger:\n # first page\n cora = paginator.page(1)\n except EmptyPage:\n # last page\n cora = paginator.page(paginator.num_pages)\n # try:\n # attrig = attrpaginator.page(attrpage)\n # except PageNotAnInteger:\n # # first page\n # attrig = attrpaginator.page(1)\n # except EmptyPage:\n # # last page\n # attrig = attrpaginator.page(attrpaginator.num_pages)\n # coraa = Cora_labeled.objects.all()\n # list_sort_value_desc = featuresampling.minHashFastClusterIG(clusternum=100, cora=coraa)\n # print(list_sort_value_desc[0:10])\n # return render(request, 'sigir/exploration.html',{'data': cora,'searchkey':searchkey,'attrIG':list_sort_value_desc[0+attrExplorationDisplayNum*(attrpage-1):attrExplorationDisplayNum*attrpage]})\n\n if not models.sigirAttrExploration.objects.filter(user=username):\n # init the substrings with information gain\n featuresampling.minHashFastClusterIG(clusternum=100,cora=dataset,username=username)\n attrexplo = models.sigirAttrExploration.objects.filter(user=username,is_labelled=False).order_by('orderscore').reverse()\n a = [ item.substring for item in attrexplo]\n print(a)\n matches = []\n seeds = models.patternSeedTemp.objects.filter(user=username)\n attrname = ''\n if seeds:\n\n seedslist = processhelper.str2list(seedstr=seeds[0].seedsubstring, deli='###')\n print(seedslist)\n syn = models.sigirCoraValueSynonym.objects.filter(synonym=seedslist[0],userid=data[0])[0]\n attrname = syn.value.attr.attrname\n attrid = syn.value.attr_id\n # if DID_flag:\n # # dataset = recordsampling.DIDsamplingInit(dataset=coradataset, BF=coradataset, beta=1,\n # # clustdict=clustdict)\n # # dataset = recordsampling.DIDsampling(dataset=coradataset,BF=BP,username=username,userid=data[0],attra_id=attrid,beta=1,clustdict=clustdict)\n # dataset = recordsampling.DIDsamplingLittle(dataset=coradataset,BF=BPid,username=username,userid=data[0],attra_id=attrid,beta=1,clustdict=clustdict)\n # dataset = dataset.order_by('orderscore').reverse()\n values = models.sigirCoraAttrValue.objects.filter(attr_id=attrid, userid=data[0])\n osysn = models.sigirCoraValueSynonym.objects.filter(value_id__in=[item.id for item in values])\n match = patternRecommendation.findCandidateSilbings(seedslist=seedslist, data=dataset)\n matches = list(set(match).difference(set([item.synonym for item in osysn])))[0:pattern_siblingDisplayNum]\n\n stadvalname = ''\n valsyns = []\n curentattrias = models.sigirSynonymsSeedTemp.objects.filter(user=username)\n if curentattrias:\n currentattr = curentattrias[0].cattr\n dict = patternRecommendation.synonymsForCurrentAttr(data=dataset, currentAttr=currentattr)\n if len(dict) == 0:\n stadvalname = ''\n valsyns = []\n else:\n a = sorted(dict.items())[0]\n stadvalname =a[0]\n valsyns = a[1]\n\n # dataset process progress\n ahbf = hbf.construct_value_level_HBFfordataset(data[0])\n entitydict = hbf.entityView(ahbf=ahbf,entity_view_threshold=entity_view_threshold)\n entityprogress = processhelper.getDatasetProgress(entitydict=entitydict)\n # fw = open('_'.join([username, str(entity_view_threshold),'entityview.json']), 'w', encoding='utf-8')\n # json.dump(entitydict, fw, ensure_ascii=False, indent=4)\n f = open('_'.join([username, str(entity_view_threshold),'entityview.json']), 'w')\n f.write(str(entitydict))\n f.close()\n # attribute process progress\n attributeprogress = processhelper.getAttributeProgress(userid=data[0],user=username)\n print(attributeprogress)\n return render(request, 'sigir/exploration.html', {'epg':entityprogress,'ap':attributeprogress,'ap2':attributeprogress,'data': cora, 'searchkey': searchkey,'matches':matches,'attrname':attrname,'syns':valsyns,'standvalue':stadvalname,\n 'attrIG2': attrexplo[\n 0 + attrExplorationDisplayNum * (\n attrpage - 1):attrExplorationDisplayNum * attrpage]})\n\n # return render(request, 'sigir/exploration.html',\n # {'data': cora, 'searchkey': searchkey, 'attrIG': attrig})\n\n\ndef attrexploration(request):\n username = request.session['username']\n data = WorkerInfo.objects.filter(user=username).values_list('id', flat=True)\n print(data[0])\n if request.method == 'POST':\n attrbute_create = request.POST.get('attrname')\n print(attrbute_create)\n multis = request.POST.getlist(\"IG\")\n print(multis)\n msg = {\"attrname\":attrbute_create,'selectedvalues':multis}\n models.dextraitems.objects.create(task=taskc,msg=json.dumps(msg),optype=dxaconstants.WorkerOperation.createAndBind,user=data[0],samplingMethod=samplemth)\n coraattr = models.sigirCoraAttr.objects.filter(attrname=attrbute_create,userid=data[0])\n\n\n\n\n\n if not coraattr:\n coraattr = models.sigirCoraAttr(attrname=attrbute_create, attrscope='local', is_alive=1, userid=data[0])\n coraattr.save()\n else:\n coraattr = coraattr[0]\n print(coraattr.id,coraattr.attrname)\n values = models.sigirAttrExploration.objects.filter(id__in = multis)\n seeds = [ item.substring for item in values]\n ss = models.patternSeedTemp.objects.filter(user=username)\n seedsstr = processhelper.list2str(seedslist=seeds,deli='###')\n if ss:\n ss[0].seedsubstring = seedsstr\n ss[0].save()\n models.sigirSynonymsSeedTemp.objects.filter(user=username).update(cattr=coraattr)\n else:\n models.patternSeedTemp.objects.create(seedsubstring=seedsstr,user=username)\n models.sigirSynonymsSeedTemp.objects.create(cattr=coraattr,user=username)\n substrings = models.sigirAttrExploration.objects.filter(is_labelled=False,user=username).order_by('orderscore').reverse()[\n 0:attrBufferPoolSize]\n #decay\n for item in substrings:\n if item.id not in multis:\n item.orderscore = item.orderscore*0.8\n item.save()\n for item in values:\n print(item.substring)\n val = models.sigirCoraAttrValue(attr_id=coraattr.id, value=item.substring, userid=data[0])\n val.save()\n corasyno = models.sigirCoraValueSynonym(value=val, synonym=item.substring, userid=data[0])\n corasyno.save()\n # if len(item.substring) < 4:\n # # check boundaries\n # print(item.substring)\n # llist = Cora_labeled.objects.filter(cleantext__icontains=' '+item.substring+' ')\n # else:\n # llist = Cora_labeled.objects.filter(text__icontains=item.substring)\n # llist = Cora_labeled.objects.filter(Q(cleantext__icontains=' ' + item.substring + ' ')| Q(cleantext__istartswith=item.substring + ' ')| Q(cleantext__iendswith=' ' + item.substring))\n # llist = dataset.filter(\n # Q(cleantext__icontains=' ' + item.substring + ' ') | Q(cleantext__istartswith=item.substring + ' ') | Q(\n # cleantext__iendswith=' ' + item.substring))\n #\n #\n # # restr = ''+attra.attrname+'